keyword
stringclasses
7 values
repo_name
stringlengths
8
98
file_path
stringlengths
4
244
file_extension
stringclasses
29 values
file_size
int64
0
84.1M
line_count
int64
0
1.6M
content
stringlengths
1
84.1M
language
stringclasses
14 values
3D
mcellteam/mcell
libs/gperftools/src/windows/auto_testing_hook.h
.h
6,220
157
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2010 The Chromium Authors. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Utility for using SideStep with unit tests. #ifndef CEEE_TESTING_SIDESTEP_AUTO_TESTING_HOOK_H_ #define CEEE_TESTING_SIDESTEP_AUTO_TESTING_HOOK_H_ #include "base/basictypes.h" #include "base/logging.h" #include "preamble_patcher.h" #define SIDESTEP_CHK(x) CHECK(x) #define SIDESTEP_EXPECT_TRUE(x) SIDESTEP_CHK(x) namespace sidestep { // Same trick as common/scope_cleanup.h ScopeGuardImplBase class AutoTestingHookBase { public: virtual ~AutoTestingHookBase() {} }; // This is the typedef you normally use for the class, e.g. // // AutoTestingHook hook = MakeTestingHook(TargetFunc, HookTargetFunc); // // The 'hook' variable will then be destroyed when it goes out of scope. // // NOTE: You must not hold this type as a member of another class. Its // destructor will not get called. typedef const AutoTestingHookBase& AutoTestingHook; // This is the class you must use when holding a hook as a member of another // class, e.g. // // public: // AutoTestingHookHolder holder_; // MyClass() : my_hook_holder(MakeTestingHookHolder(Target, Hook)) {} class AutoTestingHookHolder { public: explicit AutoTestingHookHolder(AutoTestingHookBase* hook) : hook_(hook) {} ~AutoTestingHookHolder() { delete hook_; } private: AutoTestingHookHolder() {} // disallow AutoTestingHookBase* hook_; }; // This class helps patch a function, then unpatch it when the object exits // scope, and also maintains the pointer to the original function stub. // // To enable use of the class without having to explicitly provide the // type of the function pointers (and instead only providing it // implicitly) we use the same trick as ScopeGuard (see // common/scope_cleanup.h) uses, so to create a hook you use the MakeHook // function rather than a constructor. // // NOTE: This function is only safe for e.g. unit tests and _not_ for // production code. See PreamblePatcher class for details. template <typename T> class AutoTestingHookImpl : public AutoTestingHookBase { public: static AutoTestingHookImpl<T> MakeTestingHook(T target_function, T replacement_function, bool do_it) { return AutoTestingHookImpl<T>(target_function, replacement_function, do_it); } static AutoTestingHookImpl<T>* MakeTestingHookHolder(T target_function, T replacement_function, bool do_it) { return new AutoTestingHookImpl<T>(target_function, replacement_function, do_it); } ~AutoTestingHookImpl() { if (did_it_) { SIDESTEP_CHK(SIDESTEP_SUCCESS == PreamblePatcher::Unpatch( (void*)target_function_, (void*)replacement_function_, (void*)original_function_)); } } // Returns a pointer to the original function. To use this method you will // have to explicitly create an AutoTestingHookImpl of the specific // function pointer type (i.e. not use the AutoTestingHook typedef). T original_function() { return original_function_; } private: AutoTestingHookImpl(T target_function, T replacement_function, bool do_it) : target_function_(target_function), original_function_(NULL), replacement_function_(replacement_function), did_it_(do_it) { if (do_it) { SIDESTEP_CHK(SIDESTEP_SUCCESS == PreamblePatcher::Patch(target_function, replacement_function, &original_function_)); } } T target_function_; // always valid T original_function_; // always valid T replacement_function_; // always valid bool did_it_; // Remember if we did it or not... }; template <typename T> inline AutoTestingHookImpl<T> MakeTestingHook(T target, T replacement, bool do_it) { return AutoTestingHookImpl<T>::MakeTestingHook(target, replacement, do_it); } template <typename T> inline AutoTestingHookImpl<T> MakeTestingHook(T target, T replacement) { return AutoTestingHookImpl<T>::MakeTestingHook(target, replacement, true); } template <typename T> inline AutoTestingHookImpl<T>* MakeTestingHookHolder(T target, T replacement) { return AutoTestingHookImpl<T>::MakeTestingHookHolder(target, replacement, true); } }; // namespace sidestep #endif // CEEE_TESTING_SIDESTEP_AUTO_TESTING_HOOK_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/windows/preamble_patcher.h
.h
26,453
621
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2007, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Joi Sigurdsson * Author: Scott Francis * * Definition of PreamblePatcher */ #ifndef GOOGLE_PERFTOOLS_PREAMBLE_PATCHER_H_ #define GOOGLE_PERFTOOLS_PREAMBLE_PATCHER_H_ #include "config.h" #include <windows.h> // compatibility shim #include "base/logging.h" #define SIDESTEP_ASSERT(cond) RAW_DCHECK(cond, #cond) #define SIDESTEP_LOG(msg) RAW_VLOG(1, msg) // Maximum size of the preamble stub. We overwrite at least the first 5 // bytes of the function. Considering the worst case scenario, we need 4 // bytes + the max instruction size + 5 more bytes for our jump back to // the original code. With that in mind, 32 is a good number :) #ifdef _M_X64 // In 64-bit mode we may need more room. In 64-bit mode all jumps must be // within +/-2GB of RIP. Because of this limitation we may need to use a // trampoline to jump to the replacement function if it is further than 2GB // away from the target. The trampoline is 14 bytes. // // So 4 bytes + max instruction size (17 bytes) + 5 bytes to jump back to the // original code + trampoline size. 64 bytes is a nice number :-) #define MAX_PREAMBLE_STUB_SIZE (64) #else #define MAX_PREAMBLE_STUB_SIZE (32) #endif // Determines if this is a 64-bit binary. #ifdef _M_X64 static const bool kIs64BitBinary = true; #else static const bool kIs64BitBinary = false; #endif namespace sidestep { // Possible results of patching/unpatching enum SideStepError { SIDESTEP_SUCCESS = 0, SIDESTEP_INVALID_PARAMETER, SIDESTEP_INSUFFICIENT_BUFFER, SIDESTEP_JUMP_INSTRUCTION, SIDESTEP_FUNCTION_TOO_SMALL, SIDESTEP_UNSUPPORTED_INSTRUCTION, SIDESTEP_NO_SUCH_MODULE, SIDESTEP_NO_SUCH_FUNCTION, SIDESTEP_ACCESS_DENIED, SIDESTEP_UNEXPECTED, }; #define SIDESTEP_TO_HRESULT(error) \ MAKE_HRESULT(SEVERITY_ERROR, FACILITY_NULL, error) class DeleteUnsignedCharArray; // Implements a patching mechanism that overwrites the first few bytes of // a function preamble with a jump to our hook function, which is then // able to call the original function via a specially-made preamble-stub // that imitates the action of the original preamble. // // NOTE: This patching mechanism should currently only be used for // non-production code, e.g. unit tests, because it is not threadsafe. // See the TODO in preamble_patcher_with_stub.cc for instructions on what // we need to do before using it in production code; it's fairly simple // but unnecessary for now since we only intend to use it in unit tests. // // To patch a function, use either of the typesafe Patch() methods. You // can unpatch a function using Unpatch(). // // Typical usage goes something like this: // @code // typedef int (*MyTypesafeFuncPtr)(int x); // MyTypesafeFuncPtr original_func_stub; // int MyTypesafeFunc(int x) { return x + 1; } // int HookMyTypesafeFunc(int x) { return 1 + original_func_stub(x); } // // void MyPatchInitializingFunction() { // original_func_stub = PreamblePatcher::Patch( // MyTypesafeFunc, HookMyTypesafeFunc); // if (!original_func_stub) { // // ... error handling ... // } // // // ... continue - you have patched the function successfully ... // } // @endcode // // Note that there are a number of ways that this method of patching can // fail. The most common are: // - If there is a jump (jxx) instruction in the first 5 bytes of // the function being patched, we cannot patch it because in the // current implementation we do not know how to rewrite relative // jumps after relocating them to the preamble-stub. Note that // if you really really need to patch a function like this, it // would be possible to add this functionality (but at some cost). // - If there is a return (ret) instruction in the first 5 bytes // we cannot patch the function because it may not be long enough // for the jmp instruction we use to inject our patch. // - If there is another thread currently executing within the bytes // that are copied to the preamble stub, it will crash in an undefined // way. // // If you get any other error than the above, you're either pointing the // patcher at an invalid instruction (e.g. into the middle of a multi- // byte instruction, or not at memory containing executable instructions) // or, there may be a bug in the disassembler we use to find // instruction boundaries. // // NOTE: In optimized builds, when you have very trivial functions that // the compiler can reason do not have side effects, the compiler may // reuse the result of calling the function with a given parameter, which // may mean if you patch the function in between your patch will never get // invoked. See preamble_patcher_test.cc for an example. class PERFTOOLS_DLL_DECL PreamblePatcher { public: // This is a typesafe version of RawPatch(), identical in all other // ways than it takes a template parameter indicating the type of the // function being patched. // // @param T The type of the function you are patching. Usually // you will establish this type using a typedef, as in the following // example: // @code // typedef BOOL (WINAPI *MessageBoxPtr)(HWND, LPCTSTR, LPCTSTR, UINT); // MessageBoxPtr original = NULL; // PreamblePatcher::Patch(MessageBox, Hook_MessageBox, &original); // @endcode template <class T> static SideStepError Patch(T target_function, T replacement_function, T* original_function_stub) { // NOTE: casting from a function to a pointer is contra the C++ // spec. It's not safe on IA64, but is on i386. We use // a C-style cast here to emphasize this is not legal C++. return RawPatch((void*)(target_function), (void*)(replacement_function), (void**)(original_function_stub)); } // Patches a named function imported from the named module using // preamble patching. Uses RawPatch() to do the actual patching // work. // // @param T The type of the function you are patching. Must // exactly match the function you specify using module_name and // function_name. // // @param module_name The name of the module from which the function // is being imported. Note that the patch will fail if this module // has not already been loaded into the current process. // // @param function_name The name of the function you wish to patch. // // @param replacement_function Your replacement function which // will be called whenever code tries to call the original function. // // @param original_function_stub Pointer to memory that should receive a // pointer that can be used (e.g. in the replacement function) to call the // original function, or NULL to indicate failure. // // @return One of the EnSideStepError error codes; only SIDESTEP_SUCCESS // indicates success. template <class T> static SideStepError Patch(LPCTSTR module_name, LPCSTR function_name, T replacement_function, T* original_function_stub) { SIDESTEP_ASSERT(module_name && function_name); if (!module_name || !function_name) { SIDESTEP_ASSERT(false && "You must specify a module name and function name."); return SIDESTEP_INVALID_PARAMETER; } HMODULE module = ::GetModuleHandle(module_name); SIDESTEP_ASSERT(module != NULL); if (!module) { SIDESTEP_ASSERT(false && "Invalid module name."); return SIDESTEP_NO_SUCH_MODULE; } FARPROC existing_function = ::GetProcAddress(module, function_name); if (!existing_function) { SIDESTEP_ASSERT( false && "Did not find any function with that name in the module."); return SIDESTEP_NO_SUCH_FUNCTION; } // NOTE: casting from a function to a pointer is contra the C++ // spec. It's not safe on IA64, but is on i386. We use // a C-style cast here to emphasize this is not legal C++. return RawPatch((void*)existing_function, (void*)replacement_function, (void**)(original_function_stub)); } // Patches a function by overwriting its first few bytes with // a jump to a different function. This is the "worker" function // for each of the typesafe Patch() functions. In most cases, // it is preferable to use the Patch() functions rather than // this one as they do more checking at compile time. // // @param target_function A pointer to the function that should be // patched. // // @param replacement_function A pointer to the function that should // replace the target function. The replacement function must have // exactly the same calling convention and parameters as the original // function. // // @param original_function_stub Pointer to memory that should receive a // pointer that can be used (e.g. in the replacement function) to call the // original function, or NULL to indicate failure. // // @param original_function_stub Pointer to memory that should receive a // pointer that can be used (e.g. in the replacement function) to call the // original function, or NULL to indicate failure. // // @return One of the EnSideStepError error codes; only SIDESTEP_SUCCESS // indicates success. // // @note The preamble-stub (the memory pointed to by // *original_function_stub) is allocated on the heap, and (in // production binaries) never destroyed, resulting in a memory leak. This // will be the case until we implement safe unpatching of a method. // However, it is quite difficult to unpatch a method (because other // threads in the process may be using it) so we are leaving it for now. // See however UnsafeUnpatch, which can be used for binaries where you // know only one thread is running, e.g. unit tests. static SideStepError RawPatch(void* target_function, void* replacement_function, void** original_function_stub); // Unpatches target_function and deletes the stub that previously could be // used to call the original version of the function. // // DELETES the stub that is passed to the function. // // @param target_function Pointer to the target function which was // previously patched, i.e. a pointer which value should match the value // of the symbol prior to patching it. // // @param replacement_function Pointer to the function target_function // was patched to. // // @param original_function_stub Pointer to the stub returned when // patching, that could be used to call the original version of the // patched function. This function will also delete the stub, which after // unpatching is useless. // // If your original call was // Patch(VirtualAlloc, MyVirtualAlloc, &origptr) // then to undo it you would call // Unpatch(VirtualAlloc, MyVirtualAlloc, origptr); // // @return One of the EnSideStepError error codes; only SIDESTEP_SUCCESS // indicates success. static SideStepError Unpatch(void* target_function, void* replacement_function, void* original_function_stub); // A helper routine when patching, which follows jmp instructions at // function addresses, to get to the "actual" function contents. // This allows us to identify two functions that are at different // addresses but actually resolve to the same code. // // @param target_function Pointer to a function. // // @return Either target_function (the input parameter), or if // target_function's body consists entirely of a JMP instruction, // the address it JMPs to (or more precisely, the address at the end // of a chain of JMPs). template <class T> static T ResolveTarget(T target_function) { return (T)ResolveTargetImpl((unsigned char*)target_function, NULL); } // Allocates a block of memory of size MAX_PREAMBLE_STUB_SIZE that is as // close (within 2GB) as possible to target. This is done to ensure that // we can perform a relative jump from target to a trampoline if the // replacement function is > +-2GB from target. This means that we only need // to patch 5 bytes in the target function. // // @param target Pointer to target function. // // @return Returns a block of memory of size MAX_PREAMBLE_STUB_SIZE that can // be used to store a function preamble block. static unsigned char* AllocPreambleBlockNear(void* target); // Frees a block allocated by AllocPreambleBlockNear. // // @param block Block that was returned by AllocPreambleBlockNear. static void FreePreambleBlock(unsigned char* block); private: friend class DeleteUnsignedCharArray; // Used to store data allocated for preamble stubs struct PreamblePage { unsigned int magic_; PreamblePage* next_; // This member points to a linked list of free blocks within the page // or NULL if at the end void* free_; }; // In 64-bit mode, the replacement function must be within 2GB of the original // target in order to only require 5 bytes for the function patch. To meet // this requirement we're creating an allocator within this class to // allocate blocks that are within 2GB of a given target. This member is the // head of a linked list of pages used to allocate blocks that are within // 2GB of the target. static PreamblePage* preamble_pages_; // Page granularity static long granularity_; // Page size static long pagesize_; // Determines if the patcher has been initialized. static bool initialized_; // Used to initialize static members. static void Initialize(); // Patches a function by overwriting its first few bytes with // a jump to a different function. This is similar to the RawPatch // function except that it uses the stub allocated by the caller // instead of allocating it. // // We call VirtualProtect to make the // target function writable at least for the duration of the call. // // @param target_function A pointer to the function that should be // patched. // // @param replacement_function A pointer to the function that should // replace the target function. The replacement function must have // exactly the same calling convention and parameters as the original // function. // // @param preamble_stub A pointer to a buffer where the preamble stub // should be copied. The size of the buffer should be sufficient to // hold the preamble bytes. // // @param stub_size Size in bytes of the buffer allocated for the // preamble_stub // // @param bytes_needed Pointer to a variable that receives the minimum // number of bytes required for the stub. Can be set to NULL if you're // not interested. // // @return An error code indicating the result of patching. static SideStepError RawPatchWithStubAndProtections( void* target_function, void* replacement_function, unsigned char* preamble_stub, unsigned long stub_size, unsigned long* bytes_needed); // A helper function used by RawPatchWithStubAndProtections -- it // does everything but the VirtualProtect work. Defined in // preamble_patcher_with_stub.cc. // // @param target_function A pointer to the function that should be // patched. // // @param replacement_function A pointer to the function that should // replace the target function. The replacement function must have // exactly the same calling convention and parameters as the original // function. // // @param preamble_stub A pointer to a buffer where the preamble stub // should be copied. The size of the buffer should be sufficient to // hold the preamble bytes. // // @param stub_size Size in bytes of the buffer allocated for the // preamble_stub // // @param bytes_needed Pointer to a variable that receives the minimum // number of bytes required for the stub. Can be set to NULL if you're // not interested. // // @return An error code indicating the result of patching. static SideStepError RawPatchWithStub(void* target_function, void* replacement_function, unsigned char* preamble_stub, unsigned long stub_size, unsigned long* bytes_needed); // A helper routine when patching, which follows jmp instructions at // function addresses, to get to the "actual" function contents. // This allows us to identify two functions that are at different // addresses but actually resolve to the same code. // // @param target_function Pointer to a function. // // @param stop_before If, when following JMP instructions from // target_function, we get to the address stop, we return // immediately, the address that jumps to stop_before. // // @param stop_before_trampoline When following JMP instructions from // target_function, stop before a trampoline is detected. See comment in // PreamblePatcher::RawPatchWithStub for more information. This parameter // has no effect in 32-bit mode. // // @return Either target_function (the input parameter), or if // target_function's body consists entirely of a JMP instruction, // the address it JMPs to (or more precisely, the address at the end // of a chain of JMPs). static void* ResolveTargetImpl(unsigned char* target_function, unsigned char* stop_before, bool stop_before_trampoline = false); // Helper routine that attempts to allocate a page as close (within 2GB) // as possible to target. // // @param target Pointer to target function. // // @return Returns an address that is within 2GB of target. static void* AllocPageNear(void* target); // Helper routine that determines if a target instruction is a short // conditional jump. // // @param target Pointer to instruction. // // @param instruction_size Size of the instruction in bytes. // // @return Returns true if the instruction is a short conditional jump. static bool IsShortConditionalJump(unsigned char* target, unsigned int instruction_size); static bool IsShortJump(unsigned char *target, unsigned int instruction_size); // Helper routine that determines if a target instruction is a near // conditional jump. // // @param target Pointer to instruction. // // @param instruction_size Size of the instruction in bytes. // // @return Returns true if the instruction is a near conditional jump. static bool IsNearConditionalJump(unsigned char* target, unsigned int instruction_size); // Helper routine that determines if a target instruction is a near // relative jump. // // @param target Pointer to instruction. // // @param instruction_size Size of the instruction in bytes. // // @return Returns true if the instruction is a near absolute jump. static bool IsNearRelativeJump(unsigned char* target, unsigned int instruction_size); // Helper routine that determines if a target instruction is a near // absolute call. // // @param target Pointer to instruction. // // @param instruction_size Size of the instruction in bytes. // // @return Returns true if the instruction is a near absolute call. static bool IsNearAbsoluteCall(unsigned char* target, unsigned int instruction_size); // Helper routine that determines if a target instruction is a near // absolute call. // // @param target Pointer to instruction. // // @param instruction_size Size of the instruction in bytes. // // @return Returns true if the instruction is a near absolute call. static bool IsNearRelativeCall(unsigned char* target, unsigned int instruction_size); // Helper routine that determines if a target instruction is a 64-bit MOV // that uses a RIP-relative displacement. // // @param target Pointer to instruction. // // @param instruction_size Size of the instruction in bytes. // // @return Returns true if the instruction is a MOV with displacement. static bool IsMovWithDisplacement(unsigned char* target, unsigned int instruction_size); // Helper routine that converts a short conditional jump instruction // to a near conditional jump in a target buffer. Note that the target // buffer must be within 2GB of the source for the near jump to work. // // A short conditional jump instruction is in the format: // 7x xx = Jcc rel8off // // @param source Pointer to instruction. // // @param instruction_size Size of the instruction. // // @param target Target buffer to write the new instruction. // // @param target_bytes Pointer to a buffer that contains the size // of the target instruction, in bytes. // // @param target_size Size of the target buffer. // // @return Returns SIDESTEP_SUCCESS if successful, otherwise an error. static SideStepError PatchShortConditionalJump(unsigned char* source, unsigned int instruction_size, unsigned char* target, unsigned int* target_bytes, unsigned int target_size); static SideStepError PatchShortJump(unsigned char* source, unsigned int instruction_size, unsigned char* target, unsigned int* target_bytes, unsigned int target_size); // Helper routine that converts an instruction that will convert various // jump-like instructions to corresponding instructions in the target buffer. // What this routine does is fix up the relative offsets contained in jump // instructions to point back to the original target routine. Like with // PatchShortConditionalJump, the target buffer must be within 2GB of the // source. // // We currently handle the following instructions: // // E9 xx xx xx xx = JMP rel32off // 0F 8x xx xx xx xx = Jcc rel32off // FF /2 xx xx xx xx = CALL reg/mem32/mem64 // E8 xx xx xx xx = CALL rel32off // // It should not be hard to update this function to support other // instructions that jump to relative targets. // // @param source Pointer to instruction. // // @param instruction_size Size of the instruction. // // @param target Target buffer to write the new instruction. // // @param target_bytes Pointer to a buffer that contains the size // of the target instruction, in bytes. // // @param target_size Size of the target buffer. // // @return Returns SIDESTEP_SUCCESS if successful, otherwise an error. static SideStepError PatchNearJumpOrCall(unsigned char* source, unsigned int instruction_size, unsigned char* target, unsigned int* target_bytes, unsigned int target_size); // Helper routine that patches a 64-bit MOV instruction with a RIP-relative // displacement. The target buffer must be within 2GB of the source. // // 48 8B 0D XX XX XX XX = MOV rel32off // // @param source Pointer to instruction. // // @param instruction_size Size of the instruction. // // @param target Target buffer to write the new instruction. // // @param target_bytes Pointer to a buffer that contains the size // of the target instruction, in bytes. // // @param target_size Size of the target buffer. // // @return Returns SIDESTEP_SUCCESS if successful, otherwise an error. static SideStepError PatchMovWithDisplacement(unsigned char* source, unsigned int instruction_size, unsigned char* target, unsigned int* target_bytes, unsigned int target_size); }; }; // namespace sidestep #endif // GOOGLE_PERFTOOLS_PREAMBLE_PATCHER_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/windows/config.h
.h
10,664
361
/* A manual version of config.h fit for windows machines. * * Use of this source code is governed by a BSD-style license that can * be found in the LICENSE file. */ /* Sometimes we accidentally #include this config.h instead of the one in .. -- this is particularly true for msys/mingw, which uses the unix config.h but also runs code in the windows directory. */ #ifdef __MINGW32__ #include "../config.h" #define GOOGLE_PERFTOOLS_WINDOWS_CONFIG_H_ #endif #ifndef GOOGLE_PERFTOOLS_WINDOWS_CONFIG_H_ #define GOOGLE_PERFTOOLS_WINDOWS_CONFIG_H_ /* used by tcmalloc.h */ #define GPERFTOOLS_CONFIG_H_ /* Enable aggressive decommit by default */ /* #undef ENABLE_AGGRESSIVE_DECOMMIT_BY_DEFAULT */ /* Build new/delete operators for overaligned types */ /* #undef ENABLE_ALIGNED_NEW_DELETE */ /* Build runtime detection for sized delete */ /* #undef ENABLE_DYNAMIC_SIZED_DELETE */ /* Report large allocation */ /* #undef ENABLE_LARGE_ALLOC_REPORT */ /* Build sized deletion operators */ /* #undef ENABLE_SIZED_DELETE */ /* Define to 1 if you have the <asm/ptrace.h> header file. */ /* #undef HAVE_ASM_PTRACE_H */ /* Define to 1 if compiler supports __builtin_stack_pointer */ /* #undef HAVE_BUILTIN_STACK_POINTER */ /* Define to 1 if you have the <conflict-signal.h> header file. */ /* #undef HAVE_CONFLICT_SIGNAL_H */ /* Define to 1 if you have the <cygwin/signal.h> header file. */ /* #undef HAVE_CYGWIN_SIGNAL_H */ /* Define to 1 if you have the declaration of `backtrace', and to 0 if you don't. */ /* #undef HAVE_DECL_BACKTRACE */ /* Define to 1 if you have the declaration of `cfree', and to 0 if you don't. */ #define HAVE_DECL_CFREE 0 /* Define to 1 if you have the declaration of `memalign', and to 0 if you don't. */ #define HAVE_DECL_MEMALIGN 0 /* Define to 1 if you have the declaration of `nanosleep', and to 0 if you don't. */ #define HAVE_DECL_NANOSLEEP 0 /* Define to 1 if you have the declaration of `posix_memalign', and to 0 if you don't. */ #define HAVE_DECL_POSIX_MEMALIGN 0 /* Define to 1 if you have the declaration of `pvalloc', and to 0 if you don't. */ #define HAVE_DECL_PVALLOC 0 /* Define to 1 if you have the declaration of `sleep', and to 0 if you don't. */ #define HAVE_DECL_SLEEP 0 /* Define to 1 if you have the declaration of `uname', and to 0 if you don't. */ #define HAVE_DECL_UNAME 0 /* Define to 1 if you have the declaration of `valloc', and to 0 if you don't. */ #define HAVE_DECL_VALLOC 0 /* Define to 1 if you have the <dlfcn.h> header file. */ /* #undef HAVE_DLFCN_H */ /* Define to 1 if the system has the type `Elf32_Versym'. */ /* #undef HAVE_ELF32_VERSYM */ /* Define to 1 if you have the <execinfo.h> header file. */ /* #undef HAVE_EXECINFO_H */ /* Define to 1 if you have the <fcntl.h> header file. */ #define HAVE_FCNTL_H 1 /* Define to 1 if you have the <features.h> header file. */ /* #undef HAVE_FEATURES_H */ /* Define to 1 if you have the `fork' function. */ /* #undef HAVE_FORK */ /* Define to 1 if you have the `geteuid' function. */ /* #undef HAVE_GETEUID */ /* Define to 1 if you have the <glob.h> header file. */ /* #undef HAVE_GLOB_H */ /* Define to 1 if you have the <grp.h> header file. */ /* #undef HAVE_GRP_H */ /* Define to 1 if you have the <inttypes.h> header file. */ #if defined(_MSC_VER) && _MSC_VER >= 1900 #define HAVE_INTTYPES_H 1 #endif /* Define to 1 if you have the <libunwind.h> header file. */ /* #undef HAVE_LIBUNWIND_H */ /* Define to 1 if you have the <linux/ptrace.h> header file. */ /* #undef HAVE_LINUX_PTRACE_H */ /* Define if this is Linux that has SIGEV_THREAD_ID */ /* #undef HAVE_LINUX_SIGEV_THREAD_ID */ /* Define to 1 if you have the <malloc.h> header file. */ #define HAVE_MALLOC_H 1 /* Define to 1 if you have the <memory.h> header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have a working `mmap' system call. */ /* #undef HAVE_MMAP */ /* define if the compiler implements namespaces */ #define HAVE_NAMESPACES 1 /* Define to 1 if you have the <poll.h> header file. */ /* #undef HAVE_POLL_H */ /* define if libc has program_invocation_name */ /* #undef HAVE_PROGRAM_INVOCATION_NAME */ /* Define if you have POSIX threads libraries and header files. */ /* #undef HAVE_PTHREAD */ /* defined to 1 if pthread symbols are exposed even without include pthread.h */ /* #undef HAVE_PTHREAD_DESPITE_ASKING_FOR */ /* Define to 1 if you have the <pwd.h> header file. */ /* #undef HAVE_PWD_H */ /* Define to 1 if you have the `sbrk' function. */ /* #undef HAVE_SBRK */ /* Define to 1 if you have the <sched.h> header file. */ /* #undef HAVE_SCHED_H */ /* Define to 1 if you have the <stdint.h> header file. */ #if defined(_MSC_VER) && _MSC_VER >= 1900 #define HAVE_STDINT_H 1 #endif /* Define to 1 if you have the <stdlib.h> header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the <strings.h> header file. */ /* #undef HAVE_STRINGS_H */ /* Define to 1 if you have the <string.h> header file. */ #define HAVE_STRING_H 1 /* Define to 1 if the system has the type `struct mallinfo'. */ /* #undef HAVE_STRUCT_MALLINFO */ /* Define to 1 if you have the <sys/cdefs.h> header file. */ /* #undef HAVE_SYS_CDEFS_H */ /* Define to 1 if you have the <sys/prctl.h> header file. */ /* #undef HAVE_SYS_PRCTL_H */ /* Define to 1 if you have the <sys/resource.h> header file. */ /* #undef HAVE_SYS_RESOURCE_H */ /* Define to 1 if you have the <sys/socket.h> header file. */ /* #undef HAVE_SYS_SOCKET_H */ /* Define to 1 if you have the <sys/stat.h> header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the <sys/syscall.h> header file. */ /* #undef HAVE_SYS_SYSCALL_H */ /* Define to 1 if you have the <sys/types.h> header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to 1 if you have the <sys/ucontext.h> header file. */ /* #undef HAVE_SYS_UCONTEXT_H */ /* Define to 1 if you have the <sys/wait.h> header file. */ /* #undef HAVE_SYS_WAIT_H */ /* Define to 1 if compiler supports __thread */ #define HAVE_TLS 1 /* Define to 1 if you have the <ucontext.h> header file. */ /* #undef HAVE_UCONTEXT_H */ /* Define to 1 if you have the <unistd.h> header file. */ /* #undef HAVE_UNISTD_H */ /* Whether <unwind.h> contains _Unwind_Backtrace */ /* #undef HAVE_UNWIND_BACKTRACE */ /* Define to 1 if you have the <unwind.h> header file. */ /* #undef HAVE_UNWIND_H */ /* Define to 1 if you have the <valgrind.h> header file. */ /* #undef HAVE_VALGRIND_H */ /* define if your compiler has __attribute__ */ /* #undef HAVE___ATTRIBUTE__ */ /* define if your compiler supports alignment of functions */ /* #undef HAVE___ATTRIBUTE__ALIGNED_FN */ /* Define to 1 if compiler supports __environ */ /* #undef HAVE___ENVIRON */ /* Define to 1 if the system has the type `__int64'. */ #define HAVE___INT64 1 /* Define to 1 if you have the `__sbrk' function. */ /* #undef HAVE___SBRK */ /* prefix where we look for installed files */ /* #undef INSTALL_PREFIX */ /* Define to 1 if int32_t is equivalent to intptr_t */ #ifndef _WIN64 #define INT32_EQUALS_INTPTR 1 #endif /* Define to the sub-directory where libtool stores uninstalled libraries. */ /* #undef LT_OBJDIR */ /* Name of package */ #define PACKAGE "gperftools" /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "gperftools@googlegroups.com" /* Define to the full name of this package. */ #define PACKAGE_NAME "gperftools" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "gperftools 2.7" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "gperftools" /* Define to the home page for this package. */ #define PACKAGE_URL "" /* Define to the version of this package. */ #define PACKAGE_VERSION "2.7" /* How to access the PC from a struct ucontext */ /* #undef PC_FROM_UCONTEXT */ /* Always the empty-string on non-windows systems. On windows, should be "__declspec(dllexport)". This way, when we compile the dll, we export our functions/classes. It's safe to define this here because config.h is only used internally, to compile the DLL, and every DLL source file #includes "config.h" before anything else. */ #ifndef PERFTOOLS_DLL_DECL # define PERFTOOLS_IS_A_DLL 1 /* not set if you're statically linking */ # define PERFTOOLS_DLL_DECL __declspec(dllexport) # define PERFTOOLS_DLL_DECL_FOR_UNITTESTS __declspec(dllimport) #endif /* printf format code for printing a size_t and ssize_t */ #ifdef _WIN64 #define PRIdS "lld" #else #define PRIdS "d" #endif /* printf format code for printing a size_t and ssize_t */ #ifdef _WIN64 #define PRIuS "llu" #else #define PRIuS "u" #endif /* printf format code for printing a size_t and ssize_t */ #ifdef _WIN64 #define PRIxS "llx" #else #define PRIxS "x" #endif /* Mark the systems where we know it's bad if pthreads runs too early before main (before threads are initialized, presumably). */ #ifdef __FreeBSD__ #define PTHREADS_CRASHES_IF_RUN_TOO_EARLY 1 #endif /* Define to necessary symbol if this constant uses a non-standard name on your system. */ /* #undef PTHREAD_CREATE_JOINABLE */ /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* the namespace where STL code like vector<> is defined */ #define STL_NAMESPACE std /* Define 8 bytes of allocation alignment for tcmalloc */ /* #undef TCMALLOC_ALIGN_8BYTES */ /* Define internal page size for tcmalloc as number of left bitshift */ /* #undef TCMALLOC_PAGE_SIZE_SHIFT */ /* Version number of package */ #define VERSION "2.7" /* C99 says: define this to get the PRI... macros from stdint.h */ #ifndef __STDC_FORMAT_MACROS # define __STDC_FORMAT_MACROS 1 #endif /* Define to `__inline__' or `__inline' if that's what the C compiler calls it, or to nothing if 'inline' is not supported under any name. */ #ifndef __cplusplus /* #undef inline */ #endif // --------------------------------------------------------------------- // Extra stuff not found in config.h.in // This must be defined before the windows.h is included. We need at // least 0x0400 for mutex.h to have access to TryLock, and at least // 0x0501 for patch_functions.cc to have access to GetModuleHandleEx. // (This latter is an optimization we could take out if need be.) #ifndef _WIN32_WINNT # define _WIN32_WINNT 0x0501 #endif #if defined(_MSC_VER) && _MSC_VER >= 1900 #define HAVE_SNPRINTF 1 #endif // We want to make sure not to ever try to #include heap-checker.h #define NO_HEAP_CHECK 1 // TODO(csilvers): include windows/port.h in every relevant source file instead? #include "windows/port.h" #endif /* GOOGLE_PERFTOOLS_WINDOWS_CONFIG_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/windows/override_functions.cc
.cc
4,949
174
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2007, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // --- // Author: Mike Belshe // // To link tcmalloc into a EXE or DLL statically without using the patching // facility, we can take a stock libcmt and remove all the allocator functions. // When we relink the EXE/DLL with the modified libcmt and tcmalloc, a few // functions are missing. This file contains the additional overrides which // are required in the VS2005 libcmt in order to link the modified libcmt. // // See also // http://groups.google.com/group/google-perftools/browse_thread/thread/41cd3710af85e57b #include <config.h> #ifndef _WIN32 # error You should only be including this file in a windows environment! #endif #ifndef WIN32_OVERRIDE_ALLOCATORS # error This file is intended for use when overriding allocators #endif #include "tcmalloc.cc" extern "C" { void* _malloc_base(size_t size) { return malloc(size); } void _free_base(void* p) { free(p); } void* _calloc_base(size_t n, size_t size) { return calloc(n, size); } void* _recalloc(void* old_ptr, size_t n, size_t size) { // Ensure that (n * size) does not overflow if (!(n == 0 || (std::numeric_limits<size_t>::max)() / n >= size)) { errno = ENOMEM; return NULL; } const size_t old_size = tc_malloc_size(old_ptr); const size_t new_size = n * size; void* new_ptr = realloc(old_ptr, new_size); // If the reallocation succeeded and the new block is larger, zero-fill the // new bytes: if (new_ptr != NULL && new_size > old_size) { memset(static_cast<char*>(new_ptr) + old_size, 0, tc_nallocx(new_size, 0) - old_size); } return new_ptr; } void* _calloc_impl(size_t n, size_t size) { return calloc(n, size); } size_t _msize(void* p) { return MallocExtension::instance()->GetAllocatedSize(p); } HANDLE __acrt_heap = nullptr; bool __acrt_initialize_heap() { new TCMallocGuard(); return true; } bool __acrt_uninitialize_heap(bool) { return true; } intptr_t _get_heap_handle() { return 0; } HANDLE __acrt_getheap() { return __acrt_heap; } // The CRT heap initialization stub. int _heap_init() { // We intentionally leak this object. It lasts for the process // lifetime. Trying to teardown at _heap_term() is so late that // you can't do anything useful anyway. new TCMallocGuard(); return 1; } // The CRT heap cleanup stub. void _heap_term() { } // We set this to 1 because part of the CRT uses a check of _crtheap != 0 // to test whether the CRT has been initialized. Once we've ripped out // the allocators from libcmt, we need to provide this definition so that // the rest of the CRT is still usable. void* _crtheap = reinterpret_cast<void*>(1); int _set_new_mode(int flag) { return tc_set_new_mode(flag); } int _query_new_mode() { return tc_query_new_mode(); } } // extern "C" #ifndef NDEBUG #undef malloc #undef free #undef calloc int _CrtDbgReport(int, const char*, int, const char*, const char*, ...) { return 0; } int _CrtDbgReportW(int, const wchar_t*, int, const wchar_t*, const wchar_t*, ...) { return 0; } int _CrtSetReportMode(int, int) { return 0; } extern "C" void* _malloc_dbg(size_t size, int , const char*, int) { return malloc(size); } extern "C" void _free_dbg(void* ptr, int) { free(ptr); } extern "C" void* _calloc_dbg(size_t n, size_t size, int, const char*, int) { return calloc(n, size); } #endif // NDEBUG
Unknown
3D
mcellteam/mcell
libs/gperftools/src/windows/system-alloc.cc
.cc
7,048
205
// Copyright (c) 2013, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Petr Hosek #ifndef _WIN32 # error You should only be including windows/system-alloc.cc in a windows environment! #endif #include <config.h> #include <windows.h> #include <algorithm> // std::min #include <gperftools/malloc_extension.h> #include "base/logging.h" #include "base/spinlock.h" #include "internal_logging.h" #include "system-alloc.h" static SpinLock spinlock(SpinLock::LINKER_INITIALIZED); // The current system allocator declaration SysAllocator* tcmalloc_sys_alloc = NULL; // Number of bytes taken from system. size_t TCMalloc_SystemTaken = 0; class VirtualSysAllocator : public SysAllocator { public: VirtualSysAllocator() : SysAllocator() { } void* Alloc(size_t size, size_t *actual_size, size_t alignment); }; static char virtual_space[sizeof(VirtualSysAllocator)]; // This is mostly like MmapSysAllocator::Alloc, except it does these weird // munmap's in the middle of the page, which is forbidden in windows. void* VirtualSysAllocator::Alloc(size_t size, size_t *actual_size, size_t alignment) { // Align on the pagesize boundary const int pagesize = getpagesize(); if (alignment < pagesize) alignment = pagesize; size = ((size + alignment - 1) / alignment) * alignment; // Report the total number of bytes the OS actually delivered. This might be // greater than |size| because of alignment concerns. The full size is // necessary so that adjacent spans can be coalesced. // TODO(antonm): proper processing of alignments // in actual_size and decommitting. if (actual_size) { *actual_size = size; } // We currently do not support alignments larger than the pagesize or // alignments that are not multiples of the pagesize after being floored. // If this ability is needed it can be done by the caller (assuming it knows // the page size). assert(alignment <= pagesize); void* result = VirtualAlloc(0, size, MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE); if (result == NULL) return NULL; // If the result is not aligned memory fragmentation will result which can // lead to pathological memory use. assert((reinterpret_cast<uintptr_t>(result) & (alignment - 1)) == 0); return result; } #ifdef _MSC_VER extern "C" SysAllocator* tc_get_sysalloc_override(SysAllocator *def); extern "C" SysAllocator* tc_get_sysalloc_default(SysAllocator *def) { return def; } #if defined(_M_IX86) #pragma comment(linker, "/alternatename:_tc_get_sysalloc_override=_tc_get_sysalloc_default") #elif defined(_M_X64) #pragma comment(linker, "/alternatename:tc_get_sysalloc_override=tc_get_sysalloc_default") #endif #else // !_MSC_VER extern "C" ATTRIBUTE_NOINLINE SysAllocator* tc_get_sysalloc_override(SysAllocator *def) { return def; } #endif static bool system_alloc_inited = false; void InitSystemAllocators(void) { VirtualSysAllocator *alloc = new (virtual_space) VirtualSysAllocator(); tcmalloc_sys_alloc = tc_get_sysalloc_override(alloc); } extern PERFTOOLS_DLL_DECL void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) { SpinLockHolder lock_holder(&spinlock); if (!system_alloc_inited) { InitSystemAllocators(); system_alloc_inited = true; } void* result = tcmalloc_sys_alloc->Alloc(size, actual_size, alignment); if (result != NULL) { if (actual_size) { TCMalloc_SystemTaken += *actual_size; } else { TCMalloc_SystemTaken += size; } } return result; } extern PERFTOOLS_DLL_DECL bool TCMalloc_SystemRelease(void* start, size_t length) { if (VirtualFree(start, length, MEM_DECOMMIT)) return true; // The decommit may fail if the memory region consists of allocations // from more than one call to VirtualAlloc. In this case, fall back to // using VirtualQuery to retrieve the allocation boundaries and decommit // them each individually. char* ptr = static_cast<char*>(start); char* end = ptr + length; MEMORY_BASIC_INFORMATION info; while (ptr < end) { size_t resultSize = VirtualQuery(ptr, &info, sizeof(info)); assert(resultSize == sizeof(info)); size_t decommitSize = std::min<size_t>(info.RegionSize, end - ptr); BOOL success = VirtualFree(ptr, decommitSize, MEM_DECOMMIT); assert(success == TRUE); ptr += decommitSize; } return true; } extern PERFTOOLS_DLL_DECL void TCMalloc_SystemCommit(void* start, size_t length) { if (VirtualAlloc(start, length, MEM_COMMIT, PAGE_READWRITE) == start) return; // The commit may fail if the memory region consists of allocations // from more than one call to VirtualAlloc. In this case, fall back to // using VirtualQuery to retrieve the allocation boundaries and commit them // each individually. char* ptr = static_cast<char*>(start); char* end = ptr + length; MEMORY_BASIC_INFORMATION info; while (ptr < end) { size_t resultSize = VirtualQuery(ptr, &info, sizeof(info)); assert(resultSize == sizeof(info)); size_t commitSize = std::min<size_t>(info.RegionSize, end - ptr); void* newAddress = VirtualAlloc(ptr, commitSize, MEM_COMMIT, PAGE_READWRITE); assert(newAddress == ptr); ptr += commitSize; } } bool RegisterSystemAllocator(SysAllocator *allocator, int priority) { return false; // we don't allow registration on windows, right now } void DumpSystemAllocatorStats(TCMalloc_Printer* printer) { // We don't dump stats on windows, right now }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/windows/port.h
.h
16,012
500
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2007, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Craig Silverstein * * These are some portability typedefs and defines to make it a bit * easier to compile this code under VC++. * * Several of these are taken from glib: * http://developer.gnome.org/doc/API/glib/glib-windows-compatability-functions.html */ #ifndef GOOGLE_BASE_WINDOWS_H_ #define GOOGLE_BASE_WINDOWS_H_ /* You should never include this file directly, but always include it from either config.h (MSVC) or mingw.h (MinGW/msys). */ #if !defined(GOOGLE_PERFTOOLS_WINDOWS_CONFIG_H_) && \ !defined(GOOGLE_PERFTOOLS_WINDOWS_MINGW_H_) # error "port.h should only be included from config.h or mingw.h" #endif #ifdef _WIN32 #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN /* We always want minimal includes */ #endif #include <windows.h> #include <io.h> /* because we so often use open/close/etc */ #include <direct.h> /* for _getcwd */ #include <process.h> /* for _getpid */ #include <limits.h> /* for PATH_MAX */ #include <stdarg.h> /* for va_list */ #include <stdio.h> /* need this to override stdio's (v)snprintf */ #include <sys/types.h> /* for _off_t */ #include <assert.h> #include <stdlib.h> /* for rand, srand, _strtoxxx */ #if defined(_MSC_VER) && _MSC_VER >= 1900 #define _TIMESPEC_DEFINED #include <time.h> #endif /* * 4018: signed/unsigned mismatch is common (and ok for signed_i < unsigned_i) * 4244: otherwise we get problems when subtracting two size_t's to an int * 4288: VC++7 gets confused when a var is defined in a loop and then after it * 4267: too many false positives for "conversion gives possible data loss" * 4290: it's ok windows ignores the "throw" directive * 4996: Yes, we're ok using "unsafe" functions like vsnprintf and getenv() * 4146: internal_logging.cc intentionally negates an unsigned value */ #ifdef _MSC_VER #pragma warning(disable:4018 4244 4288 4267 4290 4996 4146) #endif #ifndef __cplusplus /* MSVC does not support C99 */ # if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901L # ifdef _MSC_VER # define inline __inline # else # define inline static # endif # endif #endif #ifdef __cplusplus # define EXTERN_C extern "C" #else # define EXTERN_C extern #endif /* ----------------------------------- BASIC TYPES */ #ifndef HAVE_STDINT_H #ifndef HAVE___INT64 /* we need to have all the __intX names */ # error Do not know how to set up type aliases. Edit port.h for your system. #endif typedef __int8 int8_t; typedef __int16 int16_t; typedef __int32 int32_t; typedef __int64 int64_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; #endif /* #ifndef HAVE_STDINT_H */ /* I guess MSVC's <types.h> doesn't include ssize_t by default? */ #ifdef _MSC_VER typedef intptr_t ssize_t; #endif /* ----------------------------------- THREADS */ #ifndef HAVE_PTHREAD /* not true for MSVC, but may be true for MSYS */ typedef DWORD pthread_t; typedef DWORD pthread_key_t; typedef LONG pthread_once_t; enum { PTHREAD_ONCE_INIT = 0 }; /* important that this be 0! for SpinLock */ inline pthread_t pthread_self(void) { return GetCurrentThreadId(); } #ifdef __cplusplus inline bool pthread_equal(pthread_t left, pthread_t right) { return left == right; } /* * windows/port.h defines compatibility APIs for several .h files, which * we therefore shouldn't be #including directly. This hack keeps us from * doing so. TODO(csilvers): do something more principled. */ #define GOOGLE_MAYBE_THREADS_H_ 1 /* This replaces maybe_threads.{h,cc} */ EXTERN_C pthread_key_t PthreadKeyCreate(void (*destr_fn)(void*)); /* port.cc */ inline int perftools_pthread_key_create(pthread_key_t *pkey, void (*destructor)(void*)) { pthread_key_t key = PthreadKeyCreate(destructor); if (key != TLS_OUT_OF_INDEXES) { *(pkey) = key; return 0; } else { return GetLastError(); } } inline void* perftools_pthread_getspecific(DWORD key) { DWORD err = GetLastError(); void* rv = TlsGetValue(key); if (err) SetLastError(err); return rv; } inline int perftools_pthread_setspecific(pthread_key_t key, const void *value) { if (TlsSetValue(key, (LPVOID)value)) return 0; else return GetLastError(); } EXTERN_C int perftools_pthread_once(pthread_once_t *once_control, void (*init_routine)(void)); #endif /* __cplusplus */ inline void sched_yield(void) { Sleep(0); } #endif /* HAVE_PTHREAD */ /* * __declspec(thread) isn't usable in a dll opened via LoadLibrary(). * But it doesn't work to LoadLibrary() us anyway, because of all the * things we need to do before main()! So this kind of TLS is safe for us. */ #define __thread __declspec(thread) /* * This code is obsolete, but I keep it around in case we are ever in * an environment where we can't or don't want to use google spinlocks * (from base/spinlock.{h,cc}). In that case, uncommenting this out, * and removing spinlock.cc from the build, should be enough to revert * back to using native spinlocks. */ #if 0 // Windows uses a spinlock internally for its mutexes, making our life easy! // However, the Windows spinlock must always be initialized, making life hard, // since we want LINKER_INITIALIZED. We work around this by having the // linker initialize a bool to 0, and check that before accessing the mutex. // This replaces spinlock.{h,cc}, and all the stuff it depends on (atomicops) #ifdef __cplusplus class SpinLock { public: SpinLock() : initialize_token_(PTHREAD_ONCE_INIT) {} // Used for global SpinLock vars (see base/spinlock.h for more details). enum StaticInitializer { LINKER_INITIALIZED }; explicit SpinLock(StaticInitializer) : initialize_token_(PTHREAD_ONCE_INIT) { perftools_pthread_once(&initialize_token_, InitializeMutex); } // It's important SpinLock not have a destructor: otherwise we run // into problems when the main thread has exited, but other threads // are still running and try to access a main-thread spinlock. This // means we leak mutex_ (we should call DeleteCriticalSection() // here). However, I've verified that all SpinLocks used in // perftools have program-long scope anyway, so the leak is // perfectly fine. But be aware of this for the future! void Lock() { // You'd thionk this would be unnecessary, since we call // InitializeMutex() in our constructor. But sometimes Lock() can // be called before our constructor is! This can only happen in // global constructors, when this is a global. If we live in // bar.cc, and some global constructor in foo.cc calls a routine // in bar.cc that calls this->Lock(), then Lock() may well run // before our global constructor does. To protect against that, // we do this check. For SpinLock objects created after main() // has started, this pthread_once call will always be a noop. perftools_pthread_once(&initialize_token_, InitializeMutex); EnterCriticalSection(&mutex_); } void Unlock() { LeaveCriticalSection(&mutex_); } // Used in assertion checks: assert(lock.IsHeld()) (see base/spinlock.h). inline bool IsHeld() const { // This works, but probes undocumented internals, so I've commented it out. // c.f. http://msdn.microsoft.com/msdnmag/issues/03/12/CriticalSections/ //return mutex_.LockCount>=0 && mutex_.OwningThread==GetCurrentThreadId(); return true; } private: void InitializeMutex() { InitializeCriticalSection(&mutex_); } pthread_once_t initialize_token_; CRITICAL_SECTION mutex_; }; class SpinLockHolder { // Acquires a spinlock for as long as the scope lasts private: SpinLock* lock_; public: inline explicit SpinLockHolder(SpinLock* l) : lock_(l) { l->Lock(); } inline ~SpinLockHolder() { lock_->Unlock(); } }; #endif // #ifdef __cplusplus // This keeps us from using base/spinlock.h's implementation of SpinLock. #define BASE_SPINLOCK_H_ 1 #endif /* #if 0 */ /* ----------------------------------- MMAP and other memory allocation */ #ifndef HAVE_MMAP /* not true for MSVC, but may be true for msys */ #define MAP_FAILED 0 #define MREMAP_FIXED 2 /* the value in linux, though it doesn't really matter */ /* These, when combined with the mmap invariants below, yield the proper action */ #define PROT_READ PAGE_READWRITE #define PROT_WRITE PAGE_READWRITE #define MAP_ANONYMOUS MEM_RESERVE #define MAP_PRIVATE MEM_COMMIT #define MAP_SHARED MEM_RESERVE /* value of this #define is 100% arbitrary */ #if __STDC__ && !defined(__MINGW32__) typedef _off_t off_t; #endif /* VirtualAlloc only replaces for mmap when certain invariants are kept. */ inline void *mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset) { if (addr == NULL && fd == -1 && offset == 0 && prot == (PROT_READ|PROT_WRITE) && flags == (MAP_PRIVATE|MAP_ANONYMOUS)) { return VirtualAlloc(0, length, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); } else { return NULL; } } inline int munmap(void *addr, size_t length) { return VirtualFree(addr, 0, MEM_RELEASE) ? 0 : -1; } #endif /* HAVE_MMAP */ /* We could maybe use VirtualAlloc for sbrk as well, but no need */ inline void *sbrk(intptr_t increment) { // sbrk returns -1 on failure return (void*)-1; } /* ----------------------------------- STRING ROUTINES */ /* * We can't just use _vsnprintf and _snprintf as drop-in-replacements, * because they don't always NUL-terminate. :-( We also can't use the * name vsnprintf, since windows defines that (but not snprintf (!)). */ #if defined(_MSC_VER) && _MSC_VER >= 1400 /* We can use safe CRT functions, which the required functionality */ inline int perftools_vsnprintf(char *str, size_t size, const char *format, va_list ap) { return vsnprintf_s(str, size, _TRUNCATE, format, ap); } #else inline int perftools_vsnprintf(char *str, size_t size, const char *format, va_list ap) { if (size == 0) /* not even room for a \0? */ return -1; /* not what C99 says to do, but what windows does */ str[size-1] = '\0'; return _vsnprintf(str, size-1, format, ap); } #endif #ifndef HAVE_SNPRINTF inline int snprintf(char *str, size_t size, const char *format, ...) { va_list ap; int r; va_start(ap, format); r = perftools_vsnprintf(str, size, format, ap); va_end(ap); return r; } #endif #ifndef HAVE_INTTYPES_H #define PRIx64 "I64x" #define SCNx64 "I64x" #define PRId64 "I64d" #define SCNd64 "I64d" #define PRIu64 "I64u" #ifdef _WIN64 # define PRIuPTR "llu" # define PRIxPTR "llx" #else # define PRIuPTR "lu" # define PRIxPTR "lx" #endif #endif /* ----------------------------------- FILE IO */ #ifndef PATH_MAX #define PATH_MAX 1024 #endif #ifndef __MINGW32__ enum { STDIN_FILENO = 0, STDOUT_FILENO = 1, STDERR_FILENO = 2 }; #endif #ifndef O_RDONLY #define O_RDONLY _O_RDONLY #endif #if __STDC__ && !defined(__MINGW32__) /* These functions are considered non-standard */ inline int access(const char *pathname, int mode) { return _access(pathname, mode); } inline int open(const char *pathname, int flags, int mode = 0) { return _open(pathname, flags, mode); } inline int close(int fd) { return _close(fd); } inline ssize_t read(int fd, void *buf, size_t count) { return _read(fd, buf, count); } inline ssize_t write(int fd, const void *buf, size_t count) { return _write(fd, buf, count); } inline off_t lseek(int fd, off_t offset, int whence) { return _lseek(fd, offset, whence); } inline char *getcwd(char *buf, size_t size) { return _getcwd(buf, size); } inline int mkdir(const char *pathname, int) { return _mkdir(pathname); } inline FILE *popen(const char *command, const char *type) { return _popen(command, type); } inline int pclose(FILE *stream) { return _pclose(stream); } #endif EXTERN_C PERFTOOLS_DLL_DECL void WriteToStderr(const char* buf, int len); /* ----------------------------------- SYSTEM/PROCESS */ #ifndef HAVE_PID_T typedef int pid_t; #endif #if __STDC__ && !defined(__MINGW32__) inline pid_t getpid(void) { return _getpid(); } #endif inline pid_t getppid(void) { return 0; } /* Handle case when poll is used to simulate sleep. */ inline int poll(struct pollfd* fds, int nfds, int timeout) { assert(fds == NULL); assert(nfds == 0); Sleep(timeout); return 0; } EXTERN_C PERFTOOLS_DLL_DECL int getpagesize(); /* in port.cc */ /* ----------------------------------- OTHER */ inline void srandom(unsigned int seed) { srand(seed); } inline long random(void) { return rand(); } #ifndef HAVE_DECL_SLEEP #define HAVE_DECL_SLEEP 0 #endif #if !HAVE_DECL_SLEEP inline unsigned int sleep(unsigned int seconds) { Sleep(seconds * 1000); return 0; } #endif // mingw64 seems to define timespec (though mingw.org mingw doesn't), // protected by the _TIMESPEC_DEFINED macro. #ifndef _TIMESPEC_DEFINED struct timespec { int tv_sec; int tv_nsec; }; #endif #ifndef HAVE_DECL_NANOSLEEP #define HAVE_DECL_NANOSLEEP 0 #endif // latest mingw64 has nanosleep. Earlier mingw and MSVC do not #if !HAVE_DECL_NANOSLEEP inline int nanosleep(const struct timespec *req, struct timespec *rem) { Sleep(req->tv_sec * 1000 + req->tv_nsec / 1000000); return 0; } #endif #ifndef __MINGW32__ #if defined(_MSC_VER) && _MSC_VER < 1800 inline long long int strtoll(const char *nptr, char **endptr, int base) { return _strtoi64(nptr, endptr, base); } inline unsigned long long int strtoull(const char *nptr, char **endptr, int base) { return _strtoui64(nptr, endptr, base); } inline long long int strtoq(const char *nptr, char **endptr, int base) { return _strtoi64(nptr, endptr, base); } #endif inline unsigned long long int strtouq(const char *nptr, char **endptr, int base) { return _strtoui64(nptr, endptr, base); } inline long long atoll(const char *nptr) { return _atoi64(nptr); } #endif #define __THROW throw() /* ----------------------------------- TCMALLOC-SPECIFIC */ /* tcmalloc.cc calls this so we can patch VirtualAlloc() et al. */ extern void PatchWindowsFunctions(); #endif /* _WIN32 */ #undef inline #undef EXTERN_C #endif /* GOOGLE_BASE_WINDOWS_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/windows/nm-pdb.c
.c
8,830
274
/* Copyright (c) 2008, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: David Vitek * * Dump function addresses using Microsoft debug symbols. This works * on PDB files. Note that this program will download symbols to * c:\websymbols without asking. */ #define WIN32_LEAN_AND_MEAN #define _CRT_SECURE_NO_WARNINGS #define _CRT_SECURE_NO_DEPRECATE #include <stdio.h> #include <stdlib.h> #include <string.h> // for _strdup #include <windows.h> #include <dbghelp.h> // Unfortunately, there is no versioning info in dbghelp.h so I can // tell whether it has an old-style (circa VC7.1) IMAGEHLP_MODULE64 // struct, with only a few fields, or a new-style (circa VC8) // IMAGEHLP_MODULE64, with lots of fields. These fields are just used // for debugging, so it's fine to just assume the smaller struct, but // for most people, using a modern MSVC, the full struct is available. // If you are one of those people and would like this extra debugging // info, you can uncomment the line below. //#define VC8_OR_ABOVE #define SEARCH_CAP (1024*1024) #define WEBSYM "SRV*c:\\websymbols*http://msdl.microsoft.com/download/symbols" typedef struct { char *name; ULONG64 addr; ULONG flags; } SYM; typedef struct { ULONG64 module_base; SYM *syms; DWORD syms_len; DWORD syms_cap; } SYM_CONTEXT; static int sym_cmp(const void *_s1, const void *_s2) { const SYM *s1 = (const SYM *)_s1; const SYM *s2 = (const SYM *)_s2; if (s1->addr < s2->addr) return -1; if (s1->addr > s2->addr) return 1; return 0; } static BOOL CALLBACK EnumSymProc(PSYMBOL_INFO symbol_info, ULONG symbol_size, PVOID user_context) { SYM_CONTEXT *ctx = (SYM_CONTEXT*)user_context; if (symbol_info->Address < ctx->module_base || (symbol_info->Flags & SYMFLAG_TLSREL)) { return TRUE; } if (ctx->syms_len == ctx->syms_cap) { if (!ctx->syms_cap) ctx->syms_cap++; ctx->syms_cap *= 2; ctx->syms = realloc(ctx->syms, sizeof(ctx->syms[0]) * ctx->syms_cap); } ctx->syms[ctx->syms_len].name = _strdup(symbol_info->Name); ctx->syms[ctx->syms_len].addr = symbol_info->Address; ctx->syms[ctx->syms_len].flags = symbol_info->Flags; ctx->syms_len++; return TRUE; } static void MaybePrint(const char* var, const char* description) { if (var[0]) printf("%s: %s\n", description, var); } static void PrintAvailability(BOOL var, const char *description) { printf("%s: %s\n", description, (var ? "Available" : "Not available")); } static void ShowSymbolInfo(HANDLE process, ULONG64 module_base) { /* Get module information. */ IMAGEHLP_MODULE64 module_info; BOOL getmoduleinfo_rv; printf("Load Address: %I64x\n", module_base); memset(&module_info, 0, sizeof(module_info)); module_info.SizeOfStruct = sizeof(module_info); getmoduleinfo_rv = SymGetModuleInfo64(process, module_base, &module_info); if (!getmoduleinfo_rv) { printf("Error: SymGetModuleInfo64() failed. Error code: %u\n", GetLastError()); return; } /* Display information about symbols, based on kind of symbol. */ switch (module_info.SymType) { case SymNone: printf(("No symbols available for the module.\n")); break; case SymExport: printf(("Loaded symbols: Exports\n")); break; case SymCoff: printf(("Loaded symbols: COFF\n")); break; case SymCv: printf(("Loaded symbols: CodeView\n")); break; case SymSym: printf(("Loaded symbols: SYM\n")); break; case SymVirtual: printf(("Loaded symbols: Virtual\n")); break; case SymPdb: printf(("Loaded symbols: PDB\n")); break; case SymDia: printf(("Loaded symbols: DIA\n")); break; case SymDeferred: printf(("Loaded symbols: Deferred\n")); /* not actually loaded */ break; default: printf(("Loaded symbols: Unknown format.\n")); break; } MaybePrint("Image name", module_info.ImageName); MaybePrint("Loaded image name", module_info.LoadedImageName); #ifdef VC8_OR_ABOVE /* TODO(csilvers): figure out how to tell */ MaybePrint("PDB file name", module_info.LoadedPdbName); if (module_info.PdbUnmatched || module_info.DbgUnmatched) { /* This can only happen if the debug information is contained in a * separate file (.DBG or .PDB) */ printf(("Warning: Unmatched symbols.\n")); } #endif /* Contents */ #ifdef VC8_OR_ABOVE /* TODO(csilvers): figure out how to tell */ PrintAvailability("Line numbers", module_info.LineNumbers); PrintAvailability("Global symbols", module_info.GlobalSymbols); PrintAvailability("Type information", module_info.TypeInfo); #endif } void usage() { fprintf(stderr, "usage: nm-pdb [-C|--demangle] <module or filename>\n"); } int main(int argc, char *argv[]) { DWORD error; HANDLE process; ULONG64 module_base; SYM_CONTEXT ctx; int i; char* search; char* filename = NULL; int rv = 0; /* We may add SYMOPT_UNDNAME if --demangle is specified: */ DWORD symopts = SYMOPT_DEFERRED_LOADS | SYMOPT_DEBUG; for (i = 1; i < argc; i++) { if (strcmp(argv[i], "--demangle") == 0 || strcmp(argv[i], "-C") == 0) { symopts |= SYMOPT_UNDNAME; } else if (strcmp(argv[i], "--help") == 0) { usage(); exit(0); } else { break; } } if (i != argc - 1) { usage(); exit(1); } filename = argv[i]; process = GetCurrentProcess(); if (!SymInitialize(process, NULL, FALSE)) { error = GetLastError(); fprintf(stderr, "SymInitialize returned error : %d\n", error); return 1; } search = malloc(SEARCH_CAP); if (SymGetSearchPath(process, search, SEARCH_CAP)) { if (strlen(search) + sizeof(";" WEBSYM) > SEARCH_CAP) { fprintf(stderr, "Search path too long\n"); SymCleanup(process); return 1; } strcat(search, ";" WEBSYM); } else { error = GetLastError(); fprintf(stderr, "SymGetSearchPath returned error : %d\n", error); rv = 1; /* An error, but not a fatal one */ strcpy(search, WEBSYM); /* Use a default value */ } if (!SymSetSearchPath(process, search)) { error = GetLastError(); fprintf(stderr, "SymSetSearchPath returned error : %d\n", error); rv = 1; /* An error, but not a fatal one */ } SymSetOptions(symopts); module_base = SymLoadModuleEx(process, NULL, filename, NULL, 0, 0, NULL, 0); if (!module_base) { /* SymLoadModuleEx failed */ error = GetLastError(); fprintf(stderr, "SymLoadModuleEx returned error : %d for %s\n", error, filename); SymCleanup(process); return 1; } ShowSymbolInfo(process, module_base); memset(&ctx, 0, sizeof(ctx)); ctx.module_base = module_base; if (!SymEnumSymbols(process, module_base, NULL, EnumSymProc, &ctx)) { error = GetLastError(); fprintf(stderr, "SymEnumSymbols returned error: %d\n", error); rv = 1; } else { DWORD j; qsort(ctx.syms, ctx.syms_len, sizeof(ctx.syms[0]), sym_cmp); for (j = 0; j < ctx.syms_len; j++) { printf("%016I64x X %s\n", ctx.syms[j].addr, ctx.syms[j].name); } /* In a perfect world, maybe we'd clean up ctx's memory? */ } SymUnloadModule64(process, module_base); SymCleanup(process); return rv; }
C
3D
mcellteam/mcell
libs/gperftools/src/windows/mini_disassembler.h
.h
8,133
199
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2007, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Joi Sigurdsson * * Definition of MiniDisassembler. */ #ifndef GOOGLE_PERFTOOLS_MINI_DISASSEMBLER_H_ #define GOOGLE_PERFTOOLS_MINI_DISASSEMBLER_H_ #include "config.h" #include <windows.h> #include "mini_disassembler_types.h" // compatibility shim #include "base/logging.h" #define SIDESTEP_ASSERT(cond) RAW_DCHECK(cond, #cond) #define SIDESTEP_LOG(msg) RAW_VLOG(1, msg) namespace sidestep { // This small disassembler is very limited // in its functionality, and in fact does only the bare minimum required by the // preamble patching utility. It may be useful for other purposes, however. // // The limitations include at least the following: // -# No support for coprocessor opcodes, MMX, etc. // -# No machine-readable identification of opcodes or decoding of // assembly parameters. The name of the opcode (as a string) is given, // however, to aid debugging. // // You may ask what this little disassembler actually does, then? The answer is // that it does the following, which is exactly what the patching utility needs: // -# Indicates if opcode is a jump (any kind) or a return (any kind) // because this is important for the patching utility to determine if // a function is too short or there are jumps too early in it for it // to be preamble patched. // -# The opcode length is always calculated, so that the patching utility // can figure out where the next instruction starts, and whether it // already has enough instructions to replace with the absolute jump // to the patching code. // // The usage is quite simple; just create a MiniDisassembler and use its // Disassemble() method. // // If you would like to extend this disassembler, please refer to the // IA-32 Intel® Architecture Software Developer's Manual Volume 2: // Instruction Set Reference for information about operand decoding // etc. class PERFTOOLS_DLL_DECL MiniDisassembler { public: // Creates a new instance and sets defaults. // // @param operand_default_32_bits If true, the default operand size is // set to 32 bits, which is the default under Win32. Otherwise it is 16 bits. // @param address_default_32_bits If true, the default address size is // set to 32 bits, which is the default under Win32. Otherwise it is 16 bits. MiniDisassembler(bool operand_default_32_bits, bool address_default_32_bits); // Equivalent to MiniDisassembler(true, true); MiniDisassembler(); // Attempts to disassemble a single instruction starting from the // address in memory it is pointed to. // // @param start Address where disassembly should start. // @param instruction_bytes Variable that will be <b>incremented</b> by // the length in bytes of the instruction. // @return enItJump, enItReturn or enItGeneric on success. enItUnknown // if unable to disassemble, enItUnused if this seems to be an unused // opcode. In the last two (error) cases, cbInstruction will be set // to 0xffffffff. // // @post This instance of the disassembler is ready to be used again, // with unchanged defaults from creation time. InstructionType Disassemble(unsigned char* start, unsigned int& instruction_bytes); private: // Makes the disassembler ready for reuse. void Initialize(); // Sets the flags for address and operand sizes. // @return Number of prefix bytes. InstructionType ProcessPrefixes(unsigned char* start, unsigned int& size); // Sets the flag for whether we have ModR/M, and increments // operand_bytes_ if any are specifies by the opcode directly. // @return Number of opcode bytes. InstructionType ProcessOpcode(unsigned char* start, unsigned int table, unsigned int& size); // Checks the type of the supplied operand. Increments // operand_bytes_ if it directly indicates an immediate etc. // operand. Asserts have_modrm_ if the operand specifies // a ModR/M byte. bool ProcessOperand(int flag_operand); // Increments operand_bytes_ by size specified by ModR/M and // by SIB if present. // @return 0 in case of error, 1 if there is just a ModR/M byte, // 2 if there is a ModR/M byte and a SIB byte. bool ProcessModrm(unsigned char* start, unsigned int& size); // Processes the SIB byte that it is pointed to. // @param start Pointer to the SIB byte. // @param mod The mod field from the ModR/M byte. // @return 1 to indicate success (indicates 1 SIB byte) bool ProcessSib(unsigned char* start, unsigned char mod, unsigned int& size); // The instruction type we have decoded from the opcode. InstructionType instruction_type_; // Counts the number of bytes that is occupied by operands in // the current instruction (note: we don't care about how large // operands stored in registers etc. are). unsigned int operand_bytes_; // True iff there is a ModR/M byte in this instruction. bool have_modrm_; // True iff we need to decode the ModR/M byte (sometimes it just // points to a register, we can tell by the addressing mode). bool should_decode_modrm_; // Current operand size is 32 bits if true, 16 bits if false. bool operand_is_32_bits_; // Default operand size is 32 bits if true, 16 bits if false. bool operand_default_is_32_bits_; // Current address size is 32 bits if true, 16 bits if false. bool address_is_32_bits_; // Default address size is 32 bits if true, 16 bits if false. bool address_default_is_32_bits_; // Determines if 64 bit operands are supported (x64). bool operand_default_support_64_bits_; // Current operand size is 64 bits if true, 32 bits if false. bool operand_is_64_bits_; // Huge big opcode table based on the IA-32 manual, defined // in Ia32OpcodeMap.cc static const OpcodeTable s_ia32_opcode_map_[]; // Somewhat smaller table to help with decoding ModR/M bytes // when 16-bit addressing mode is being used. Defined in // Ia32ModrmMap.cc static const ModrmEntry s_ia16_modrm_map_[]; // Somewhat smaller table to help with decoding ModR/M bytes // when 32-bit addressing mode is being used. Defined in // Ia32ModrmMap.cc static const ModrmEntry s_ia32_modrm_map_[]; // Indicators of whether we got certain prefixes that certain // silly Intel instructions depend on in nonstandard ways for // their behaviors. bool got_f2_prefix_, got_f3_prefix_, got_66_prefix_; }; }; // namespace sidestep #endif // GOOGLE_PERFTOOLS_MINI_DISASSEMBLER_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/windows/get_mangled_names.cc
.cc
3,145
66
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2008, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // --- // Author: Craig Silverstein (opensource@google.com) // When you are porting perftools to a new compiler or architecture // (win64 vs win32) for instance, you'll need to change the mangled // symbol names for operator new and friends at the top of // patch_functions.cc. This file helps you do that. // // It does this by defining these functions with the proper signature. // All you need to do is compile this file and the run dumpbin on it. // (See http://msdn.microsoft.com/en-us/library/5x49w699.aspx for more // on dumpbin). To do this in MSVC, use the MSVC commandline shell: // http://msdn.microsoft.com/en-us/library/ms235639(VS.80).aspx) // // The run: // cl /c get_mangled_names.cc // dumpbin /symbols get_mangled_names.obj // // It will print out the mangled (and associated unmangled) names of // the 8 symbols you need to put at the top of patch_functions.cc #include <sys/types.h> // for size_t #include <new> // for nothrow_t static char m; // some dummy memory so new doesn't return NULL. void* operator new(size_t size) { return &m; } void operator delete(void* p) throw() { } void* operator new[](size_t size) { return &m; } void operator delete[](void* p) throw() { } void* operator new(size_t size, const std::nothrow_t&) throw() { return &m; } void operator delete(void* p, const std::nothrow_t&) throw() { } void* operator new[](size_t size, const std::nothrow_t&) throw() { return &m; } void operator delete[](void* p, const std::nothrow_t&) throw() { }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/windows/mingw.h
.h
2,881
75
/* -*- Mode: C; c-basic-offset: 2; indent-tabs-mode: nil -*- */ /* Copyright (c) 2007, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Craig Silverstein * * MinGW is an interesting mix of unix and windows. We use a normal * configure script, but still need the windows port.h to define some * stuff that MinGW doesn't support, like pthreads. */ #ifndef GOOGLE_PERFTOOLS_WINDOWS_MINGW_H_ #define GOOGLE_PERFTOOLS_WINDOWS_MINGW_H_ #ifdef __MINGW32__ // Older version of the mingw msvcrt don't define _aligned_malloc #if __MSVCRT_VERSION__ < 0x0700 # define PERFTOOLS_NO_ALIGNED_MALLOC 1 #endif // This must be defined before the windows.h is included. We need at // least 0x0400 for mutex.h to have access to TryLock, and at least // 0x0501 for patch_functions.cc to have access to GetModuleHandleEx. // (This latter is an optimization we could take out if need be.) #ifndef _WIN32_WINNT # define _WIN32_WINNT 0x0501 #endif #define HAVE_SNPRINTF 1 // Some mingw distributions have a pthreads wrapper, but it doesn't // work as well as native windows spinlocks (at least for us). So // pretend the pthreads wrapper doesn't exist, even when it does. #ifndef HAVE_PTHREAD_DESPITE_ASKING_FOR #undef HAVE_PTHREAD #endif #undef HAVE_FORK #define HAVE_PID_T #include "windows/port.h" #endif /* __MINGW32__ */ #endif /* GOOGLE_PERFTOOLS_WINDOWS_MINGW_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/windows/preamble_patcher_with_stub.cc
.cc
13,430
303
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2007, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Joi Sigurdsson * Author: Scott Francis * * Implementation of PreamblePatcher */ #include "preamble_patcher.h" #include "mini_disassembler.h" // Definitions of assembly statements we need #define ASM_JMP32REL 0xE9 #define ASM_INT3 0xCC #define ASM_NOP 0x90 // X64 opcodes #define ASM_MOVRAX_IMM 0xB8 #define ASM_REXW 0x48 #define ASM_JMP 0xFF #define ASM_JMP_RAX 0xE0 #define ASM_PUSH 0x68 #define ASM_RET 0xC3 namespace sidestep { SideStepError PreamblePatcher::RawPatchWithStub( void* target_function, void* replacement_function, unsigned char* preamble_stub, unsigned long stub_size, unsigned long* bytes_needed) { if ((NULL == target_function) || (NULL == replacement_function) || (NULL == preamble_stub)) { SIDESTEP_ASSERT(false && "Invalid parameters - either pTargetFunction or " "pReplacementFunction or pPreambleStub were NULL."); return SIDESTEP_INVALID_PARAMETER; } // TODO(V7:joi) Siggi and I just had a discussion and decided that both // patching and unpatching are actually unsafe. We also discussed a // method of making it safe, which is to freeze all other threads in the // process, check their thread context to see if their eip is currently // inside the block of instructions we need to copy to the stub, and if so // wait a bit and try again, then unfreeze all threads once we've patched. // Not implementing this for now since we're only using SideStep for unit // testing, but if we ever use it for production code this is what we // should do. // // NOTE: Stoyan suggests we can write 8 or even 10 bytes atomically using // FPU instructions, and on newer processors we could use cmpxchg8b or // cmpxchg16b. So it might be possible to do the patching/unpatching // atomically and avoid having to freeze other threads. Note though, that // doing it atomically does not help if one of the other threads happens // to have its eip in the middle of the bytes you change while you change // them. unsigned char* target = reinterpret_cast<unsigned char*>(target_function); unsigned int required_trampoline_bytes = 0; const unsigned int kRequiredStubJumpBytes = 5; const unsigned int kRequiredTargetPatchBytes = 5; // Initialize the stub with INT3's just in case. if (stub_size) { memset(preamble_stub, 0xcc, stub_size); } if (kIs64BitBinary) { // In 64-bit mode JMP instructions are always relative to RIP. If the // replacement - target offset is > 2GB, we can't JMP to the replacement // function. In this case, we're going to use a trampoline - that is, // we're going to do a relative jump to a small chunk of code in the stub // that will then do the absolute jump to the replacement function. By // doing this, we only need to patch 5 bytes in the target function, as // opposed to patching 12 bytes if we were to do an absolute jump. // // Note that the first byte of the trampoline is a NOP instruction. This // is used as a trampoline signature that will be detected when unpatching // the function. // // jmp <trampoline> // // trampoline: // nop // mov rax, <replacement_function> // jmp rax // __int64 replacement_target_offset = reinterpret_cast<__int64>( replacement_function) - reinterpret_cast<__int64>(target) - 5; if (replacement_target_offset > INT_MAX || replacement_target_offset < INT_MIN) { // The stub needs to be within 2GB of the target for the trampoline to // work! __int64 trampoline_offset = reinterpret_cast<__int64>(preamble_stub) - reinterpret_cast<__int64>(target) - 5; if (trampoline_offset > INT_MAX || trampoline_offset < INT_MIN) { // We're screwed. SIDESTEP_ASSERT(false && "Preamble stub is too far from target to patch."); return SIDESTEP_UNEXPECTED; } required_trampoline_bytes = 13; } } // Let's disassemble the preamble of the target function to see if we can // patch, and to see how much of the preamble we need to take. We need 5 // bytes for our jmp instruction, so let's find the minimum number of // instructions to get 5 bytes. MiniDisassembler disassembler; unsigned int preamble_bytes = 0; unsigned int stub_bytes = 0; while (preamble_bytes < kRequiredTargetPatchBytes) { unsigned int cur_bytes = 0; InstructionType instruction_type = disassembler.Disassemble(target + preamble_bytes, cur_bytes); if (IT_JUMP == instruction_type) { unsigned int jump_bytes = 0; SideStepError jump_ret = SIDESTEP_JUMP_INSTRUCTION; if (IsShortConditionalJump(target + preamble_bytes, cur_bytes)) { jump_ret = PatchShortConditionalJump(target + preamble_bytes, cur_bytes, preamble_stub + stub_bytes, &jump_bytes, stub_size - stub_bytes); } else if (IsShortJump(target + preamble_bytes, cur_bytes)) { jump_ret = PatchShortJump(target + preamble_bytes, cur_bytes, preamble_stub + stub_bytes, &jump_bytes, stub_size - stub_bytes); } else if (IsNearConditionalJump(target + preamble_bytes, cur_bytes) || IsNearRelativeJump(target + preamble_bytes, cur_bytes) || IsNearAbsoluteCall(target + preamble_bytes, cur_bytes) || IsNearRelativeCall(target + preamble_bytes, cur_bytes)) { jump_ret = PatchNearJumpOrCall(target + preamble_bytes, cur_bytes, preamble_stub + stub_bytes, &jump_bytes, stub_size - stub_bytes); } if (jump_ret != SIDESTEP_SUCCESS) { SIDESTEP_ASSERT(false && "Unable to patch because there is an unhandled branch " "instruction in the initial preamble bytes."); return SIDESTEP_JUMP_INSTRUCTION; } stub_bytes += jump_bytes; } else if (IT_RETURN == instruction_type) { SIDESTEP_ASSERT(false && "Unable to patch because function is too short"); return SIDESTEP_FUNCTION_TOO_SMALL; } else if (IT_GENERIC == instruction_type) { if (IsMovWithDisplacement(target + preamble_bytes, cur_bytes)) { unsigned int mov_bytes = 0; if (PatchMovWithDisplacement(target + preamble_bytes, cur_bytes, preamble_stub + stub_bytes, &mov_bytes, stub_size - stub_bytes) != SIDESTEP_SUCCESS) { return SIDESTEP_UNSUPPORTED_INSTRUCTION; } stub_bytes += mov_bytes; } else { memcpy(reinterpret_cast<void*>(preamble_stub + stub_bytes), reinterpret_cast<void*>(target + preamble_bytes), cur_bytes); stub_bytes += cur_bytes; } } else { SIDESTEP_ASSERT(false && "Disassembler encountered unsupported instruction " "(either unused or unknown"); return SIDESTEP_UNSUPPORTED_INSTRUCTION; } preamble_bytes += cur_bytes; } if (NULL != bytes_needed) *bytes_needed = stub_bytes + kRequiredStubJumpBytes + required_trampoline_bytes; // Inv: cbPreamble is the number of bytes (at least 5) that we need to take // from the preamble to have whole instructions that are 5 bytes or more // in size total. The size of the stub required is cbPreamble + // kRequiredStubJumpBytes (5) + required_trampoline_bytes (0 or 13) if (stub_bytes + kRequiredStubJumpBytes + required_trampoline_bytes > stub_size) { SIDESTEP_ASSERT(false); return SIDESTEP_INSUFFICIENT_BUFFER; } // Now, make a jmp instruction to the rest of the target function (minus the // preamble bytes we moved into the stub) and copy it into our preamble-stub. // find address to jump to, relative to next address after jmp instruction #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4244) #endif int relative_offset_to_target_rest = ((reinterpret_cast<unsigned char*>(target) + preamble_bytes) - (preamble_stub + stub_bytes + kRequiredStubJumpBytes)); #ifdef _MSC_VER #pragma warning(pop) #endif // jmp (Jump near, relative, displacement relative to next instruction) preamble_stub[stub_bytes] = ASM_JMP32REL; // copy the address memcpy(reinterpret_cast<void*>(preamble_stub + stub_bytes + 1), reinterpret_cast<void*>(&relative_offset_to_target_rest), 4); if (kIs64BitBinary && required_trampoline_bytes != 0) { // Construct the trampoline unsigned int trampoline_pos = stub_bytes + kRequiredStubJumpBytes; preamble_stub[trampoline_pos] = ASM_NOP; preamble_stub[trampoline_pos + 1] = ASM_REXW; preamble_stub[trampoline_pos + 2] = ASM_MOVRAX_IMM; memcpy(reinterpret_cast<void*>(preamble_stub + trampoline_pos + 3), reinterpret_cast<void*>(&replacement_function), sizeof(void *)); preamble_stub[trampoline_pos + 11] = ASM_JMP; preamble_stub[trampoline_pos + 12] = ASM_JMP_RAX; // Now update replacement_function to point to the trampoline replacement_function = preamble_stub + trampoline_pos; } // Inv: preamble_stub points to assembly code that will execute the // original function by first executing the first cbPreamble bytes of the // preamble, then jumping to the rest of the function. // Overwrite the first 5 bytes of the target function with a jump to our // replacement function. // (Jump near, relative, displacement relative to next instruction) target[0] = ASM_JMP32REL; // Find offset from instruction after jmp, to the replacement function. #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4244) #endif int offset_to_replacement_function = reinterpret_cast<unsigned char*>(replacement_function) - reinterpret_cast<unsigned char*>(target) - 5; #ifdef _MSC_VER #pragma warning(pop) #endif // complete the jmp instruction memcpy(reinterpret_cast<void*>(target + 1), reinterpret_cast<void*>(&offset_to_replacement_function), 4); // Set any remaining bytes that were moved to the preamble-stub to INT3 so // as not to cause confusion (otherwise you might see some strange // instructions if you look at the disassembly, or even invalid // instructions). Also, by doing this, we will break into the debugger if // some code calls into this portion of the code. If this happens, it // means that this function cannot be patched using this patcher without // further thought. if (preamble_bytes > kRequiredTargetPatchBytes) { memset(reinterpret_cast<void*>(target + kRequiredTargetPatchBytes), ASM_INT3, preamble_bytes - kRequiredTargetPatchBytes); } // Inv: The memory pointed to by target_function now points to a relative // jump instruction that jumps over to the preamble_stub. The preamble // stub contains the first stub_size bytes of the original target // function's preamble code, followed by a relative jump back to the next // instruction after the first cbPreamble bytes. // // In 64-bit mode the memory pointed to by target_function *may* point to a // relative jump instruction that jumps to a trampoline which will then // perform an absolute jump to the replacement function. The preamble stub // still contains the original target function's preamble code, followed by a // jump back to the instructions after the first preamble bytes. // return SIDESTEP_SUCCESS; } }; // namespace sidestep
Unknown
3D
mcellteam/mcell
libs/gperftools/src/windows/preamble_patcher_test.cc
.cc
14,198
369
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2011, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Joi Sigurdsson * Author: Scott Francis * * Unit tests for PreamblePatcher */ #include "config_for_unittests.h" #include "preamble_patcher.h" #include "mini_disassembler.h" #pragma warning(push) #pragma warning(disable:4553) #include "auto_testing_hook.h" #pragma warning(pop) #define WIN32_LEAN_AND_MEAN #include <windows.h> #include <tchar.h> // Turning off all optimizations for this file, since the official build's // "Whole program optimization" seems to cause the TestPatchUsingDynamicStub // test to crash with an access violation. We debugged this and found // that the optimized access a register that is changed by a call to the hook // function. #pragma optimize("", off) // A convenience macro to avoid a lot of casting in the tests. // I tried to make this a templated function, but windows complained: // error C2782: 'sidestep::SideStepError `anonymous-namespace'::Unpatch(T,T,T *)' : template parameter 'T' is ambiguous // could be 'int (int)' // or 'int (__cdecl *)(int)' // My life isn't long enough to try to figure out how to fix this. #define UNPATCH(target_function, replacement_function, original_function_stub) \ sidestep::PreamblePatcher::Unpatch((void*)(target_function), \ (void*)(replacement_function), \ (void*)(original_function)) namespace { // Function for testing - this is what we patch // // NOTE: Because of the way the compiler optimizes this function in // release builds, we need to use a different input value every time we // call it within a function, otherwise the compiler will just reuse the // last calculated incremented value. int __declspec(noinline) IncrementNumber(int i) { #ifdef _M_X64 __int64 i2 = i + 1; return (int) i2; #else return i + 1; #endif } extern "C" int TooShortFunction(int); extern "C" int JumpShortCondFunction(int); extern "C" int JumpNearCondFunction(int); extern "C" int JumpAbsoluteFunction(int); extern "C" int CallNearRelativeFunction(int); typedef int (*IncrementingFunc)(int); IncrementingFunc original_function = NULL; int HookIncrementNumber(int i) { SIDESTEP_ASSERT(original_function != NULL); int incremented_once = original_function(i); return incremented_once + 1; } // For the AutoTestingHook test, we can't use original_function, because // all that is encapsulated. // This function "increments" by 10, just to set it apart from the other // functions. int __declspec(noinline) AutoHookIncrementNumber(int i) { return i + 10; } }; // namespace namespace sidestep { bool TestDisassembler() { unsigned int instruction_size = 0; sidestep::MiniDisassembler disassembler; void * target = reinterpret_cast<unsigned char *>(IncrementNumber); void * new_target = PreamblePatcher::ResolveTarget(target); if (target != new_target) target = new_target; while (1) { sidestep::InstructionType instructionType = disassembler.Disassemble( reinterpret_cast<unsigned char *>(target) + instruction_size, instruction_size); if (sidestep::IT_RETURN == instructionType) { return true; } } } bool TestPatchWithLongJump() { original_function = NULL; void *p = ::VirtualAlloc(reinterpret_cast<void *>(0x0000020000000000), 4096, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE); SIDESTEP_EXPECT_TRUE(p != NULL); memset(p, 0xcc, 4096); SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS == sidestep::PreamblePatcher::Patch(IncrementNumber, (IncrementingFunc) p, &original_function)); SIDESTEP_ASSERT((*original_function)(1) == 2); SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS == UNPATCH(IncrementNumber, (IncrementingFunc)p, original_function)); ::VirtualFree(p, 0, MEM_RELEASE); return true; } bool TestPatchWithPreambleShortCondJump() { original_function = NULL; SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS == sidestep::PreamblePatcher::Patch(JumpShortCondFunction, HookIncrementNumber, &original_function)); (*original_function)(1); SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS == UNPATCH(JumpShortCondFunction, (void*)HookIncrementNumber, original_function)); return true; } bool TestPatchWithPreambleNearRelativeCondJump() { original_function = NULL; SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS == sidestep::PreamblePatcher::Patch(JumpNearCondFunction, HookIncrementNumber, &original_function)); (*original_function)(0); (*original_function)(1); SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS == UNPATCH(JumpNearCondFunction, HookIncrementNumber, original_function)); return true; } bool TestPatchWithPreambleAbsoluteJump() { original_function = NULL; SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS == sidestep::PreamblePatcher::Patch(JumpAbsoluteFunction, HookIncrementNumber, &original_function)); (*original_function)(0); (*original_function)(1); SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS == UNPATCH(JumpAbsoluteFunction, HookIncrementNumber, original_function)); return true; } bool TestPatchWithPreambleNearRelativeCall() { original_function = NULL; SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS == sidestep::PreamblePatcher::Patch( CallNearRelativeFunction, HookIncrementNumber, &original_function)); (*original_function)(0); (*original_function)(1); SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS == UNPATCH(CallNearRelativeFunction, HookIncrementNumber, original_function)); return true; } bool TestPatchUsingDynamicStub() { original_function = NULL; SIDESTEP_EXPECT_TRUE(IncrementNumber(1) == 2); SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS == sidestep::PreamblePatcher::Patch(IncrementNumber, HookIncrementNumber, &original_function)); SIDESTEP_EXPECT_TRUE(original_function); SIDESTEP_EXPECT_TRUE(IncrementNumber(2) == 4); SIDESTEP_EXPECT_TRUE(original_function(3) == 4); // Clearbox test to see that the function has been patched. sidestep::MiniDisassembler disassembler; unsigned int instruction_size = 0; SIDESTEP_EXPECT_TRUE(sidestep::IT_JUMP == disassembler.Disassemble( reinterpret_cast<unsigned char*>(IncrementNumber), instruction_size)); // Since we patched IncrementNumber, its first statement is a // jmp to the hook function. So verify that we now can not patch // IncrementNumber because it starts with a jump. #if 0 IncrementingFunc dummy = NULL; // TODO(joi@chromium.org): restore this test once flag is added to // disable JMP following SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_JUMP_INSTRUCTION == sidestep::PreamblePatcher::Patch(IncrementNumber, HookIncrementNumber, &dummy)); // This test disabled because code in preamble_patcher_with_stub.cc // asserts before returning the error code -- so there is no way // to get an error code here, in debug build. dummy = NULL; SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_FUNCTION_TOO_SMALL == sidestep::PreamblePatcher::Patch(TooShortFunction, HookIncrementNumber, &dummy)); #endif SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS == UNPATCH(IncrementNumber, HookIncrementNumber, original_function)); return true; } bool PatchThenUnpatch() { original_function = NULL; SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS == sidestep::PreamblePatcher::Patch(IncrementNumber, HookIncrementNumber, &original_function)); SIDESTEP_EXPECT_TRUE(original_function); SIDESTEP_EXPECT_TRUE(IncrementNumber(1) == 3); SIDESTEP_EXPECT_TRUE(original_function(2) == 3); SIDESTEP_EXPECT_TRUE(sidestep::SIDESTEP_SUCCESS == UNPATCH(IncrementNumber, HookIncrementNumber, original_function)); original_function = NULL; SIDESTEP_EXPECT_TRUE(IncrementNumber(3) == 4); return true; } bool AutoTestingHookTest() { SIDESTEP_EXPECT_TRUE(IncrementNumber(1) == 2); // Inner scope, so we can test what happens when the AutoTestingHook // goes out of scope { AutoTestingHook hook = MakeTestingHook(IncrementNumber, AutoHookIncrementNumber); (void) hook; SIDESTEP_EXPECT_TRUE(IncrementNumber(2) == 12); } SIDESTEP_EXPECT_TRUE(IncrementNumber(3) == 4); return true; } bool AutoTestingHookInContainerTest() { SIDESTEP_EXPECT_TRUE(IncrementNumber(1) == 2); // Inner scope, so we can test what happens when the AutoTestingHook // goes out of scope { AutoTestingHookHolder hook(MakeTestingHookHolder(IncrementNumber, AutoHookIncrementNumber)); (void) hook; SIDESTEP_EXPECT_TRUE(IncrementNumber(2) == 12); } SIDESTEP_EXPECT_TRUE(IncrementNumber(3) == 4); return true; } bool TestPreambleAllocation() { __int64 diff = 0; void* p1 = reinterpret_cast<void*>(0x110000000); void* p2 = reinterpret_cast<void*>(0x810000000); unsigned char* b1 = PreamblePatcher::AllocPreambleBlockNear(p1); SIDESTEP_EXPECT_TRUE(b1 != NULL); diff = reinterpret_cast<__int64>(p1) - reinterpret_cast<__int64>(b1); // Ensure blocks are within 2GB SIDESTEP_EXPECT_TRUE(diff <= INT_MAX && diff >= INT_MIN); unsigned char* b2 = PreamblePatcher::AllocPreambleBlockNear(p2); SIDESTEP_EXPECT_TRUE(b2 != NULL); diff = reinterpret_cast<__int64>(p2) - reinterpret_cast<__int64>(b2); SIDESTEP_EXPECT_TRUE(diff <= INT_MAX && diff >= INT_MIN); // Ensure we're reusing free blocks unsigned char* b3 = b1; unsigned char* b4 = b2; PreamblePatcher::FreePreambleBlock(b1); PreamblePatcher::FreePreambleBlock(b2); b1 = PreamblePatcher::AllocPreambleBlockNear(p1); SIDESTEP_EXPECT_TRUE(b1 == b3); b2 = PreamblePatcher::AllocPreambleBlockNear(p2); SIDESTEP_EXPECT_TRUE(b2 == b4); PreamblePatcher::FreePreambleBlock(b1); PreamblePatcher::FreePreambleBlock(b2); return true; } bool UnitTests() { return TestPatchWithPreambleNearRelativeCall() && TestPatchWithPreambleAbsoluteJump() && TestPatchWithPreambleNearRelativeCondJump() && TestPatchWithPreambleShortCondJump() && TestDisassembler() && TestPatchWithLongJump() && TestPatchUsingDynamicStub() && PatchThenUnpatch() && AutoTestingHookTest() && AutoTestingHookInContainerTest() && TestPreambleAllocation(); } }; // namespace sidestep int safe_vsnprintf(char *str, size_t size, const char *format, va_list ap) { if (size == 0) // not even room for a \0? return -1; // not what C99 says to do, but what windows does str[size-1] = '\0'; return _vsnprintf(str, size-1, format, ap); } int _tmain(int argc, _TCHAR* argv[]) { bool ret = sidestep::UnitTests(); printf("%s\n", ret ? "PASS" : "FAIL"); return ret ? 0 : -1; } #pragma optimize("", on)
Unknown
3D
mcellteam/mcell
libs/gperftools/src/windows/mini_disassembler.cc
.cc
16,054
433
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2007, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Joi Sigurdsson * * Implementation of MiniDisassembler. */ #include "mini_disassembler.h" namespace sidestep { MiniDisassembler::MiniDisassembler(bool operand_default_is_32_bits, bool address_default_is_32_bits) : operand_default_is_32_bits_(operand_default_is_32_bits), address_default_is_32_bits_(address_default_is_32_bits) { Initialize(); } MiniDisassembler::MiniDisassembler() : operand_default_is_32_bits_(true), address_default_is_32_bits_(true) { Initialize(); } InstructionType MiniDisassembler::Disassemble( unsigned char* start_byte, unsigned int& instruction_bytes) { // Clean up any state from previous invocations. Initialize(); // Start by processing any prefixes. unsigned char* current_byte = start_byte; unsigned int size = 0; InstructionType instruction_type = ProcessPrefixes(current_byte, size); if (IT_UNKNOWN == instruction_type) return instruction_type; current_byte += size; size = 0; // Invariant: We have stripped all prefixes, and the operand_is_32_bits_ // and address_is_32_bits_ flags are correctly set. instruction_type = ProcessOpcode(current_byte, 0, size); // Check for error processing instruction if ((IT_UNKNOWN == instruction_type_) || (IT_UNUSED == instruction_type_)) { return IT_UNKNOWN; } current_byte += size; // Invariant: operand_bytes_ indicates the total size of operands // specified by the opcode and/or ModR/M byte and/or SIB byte. // pCurrentByte points to the first byte after the ModR/M byte, or after // the SIB byte if it is present (i.e. the first byte of any operands // encoded in the instruction). // We get the total length of any prefixes, the opcode, and the ModR/M and // SIB bytes if present, by taking the difference of the original starting // address and the current byte (which points to the first byte of the // operands if present, or to the first byte of the next instruction if // they are not). Adding the count of bytes in the operands encoded in // the instruction gives us the full length of the instruction in bytes. instruction_bytes += operand_bytes_ + (current_byte - start_byte); // Return the instruction type, which was set by ProcessOpcode(). return instruction_type_; } void MiniDisassembler::Initialize() { operand_is_32_bits_ = operand_default_is_32_bits_; address_is_32_bits_ = address_default_is_32_bits_; #ifdef _M_X64 operand_default_support_64_bits_ = true; #else operand_default_support_64_bits_ = false; #endif operand_is_64_bits_ = false; operand_bytes_ = 0; have_modrm_ = false; should_decode_modrm_ = false; instruction_type_ = IT_UNKNOWN; got_f2_prefix_ = false; got_f3_prefix_ = false; got_66_prefix_ = false; } InstructionType MiniDisassembler::ProcessPrefixes(unsigned char* start_byte, unsigned int& size) { InstructionType instruction_type = IT_GENERIC; const Opcode& opcode = s_ia32_opcode_map_[0].table_[*start_byte]; switch (opcode.type_) { case IT_PREFIX_ADDRESS: address_is_32_bits_ = !address_default_is_32_bits_; goto nochangeoperand; case IT_PREFIX_OPERAND: operand_is_32_bits_ = !operand_default_is_32_bits_; nochangeoperand: case IT_PREFIX: if (0xF2 == (*start_byte)) got_f2_prefix_ = true; else if (0xF3 == (*start_byte)) got_f3_prefix_ = true; else if (0x66 == (*start_byte)) got_66_prefix_ = true; else if (operand_default_support_64_bits_ && (*start_byte) & 0x48) operand_is_64_bits_ = true; instruction_type = opcode.type_; size ++; // we got a prefix, so add one and check next byte ProcessPrefixes(start_byte + 1, size); default: break; // not a prefix byte } return instruction_type; } InstructionType MiniDisassembler::ProcessOpcode(unsigned char* start_byte, unsigned int table_index, unsigned int& size) { const OpcodeTable& table = s_ia32_opcode_map_[table_index]; // Get our table unsigned char current_byte = (*start_byte) >> table.shift_; current_byte = current_byte & table.mask_; // Mask out the bits we will use // Check whether the byte we have is inside the table we have. if (current_byte < table.min_lim_ || current_byte > table.max_lim_) { instruction_type_ = IT_UNKNOWN; return instruction_type_; } const Opcode& opcode = table.table_[current_byte]; if (IT_UNUSED == opcode.type_) { // This instruction is not used by the IA-32 ISA, so we indicate // this to the user. Probably means that we were pointed to // a byte in memory that was not the start of an instruction. instruction_type_ = IT_UNUSED; return instruction_type_; } else if (IT_REFERENCE == opcode.type_) { // We are looking at an opcode that has more bytes (or is continued // in the ModR/M byte). Recursively find the opcode definition in // the table for the opcode's next byte. size++; ProcessOpcode(start_byte + 1, opcode.table_index_, size); return instruction_type_; } const SpecificOpcode* specific_opcode = (SpecificOpcode*)&opcode; if (opcode.is_prefix_dependent_) { if (got_f2_prefix_ && opcode.opcode_if_f2_prefix_.mnemonic_ != 0) { specific_opcode = &opcode.opcode_if_f2_prefix_; } else if (got_f3_prefix_ && opcode.opcode_if_f3_prefix_.mnemonic_ != 0) { specific_opcode = &opcode.opcode_if_f3_prefix_; } else if (got_66_prefix_ && opcode.opcode_if_66_prefix_.mnemonic_ != 0) { specific_opcode = &opcode.opcode_if_66_prefix_; } } // Inv: The opcode type is known. instruction_type_ = specific_opcode->type_; // Let's process the operand types to see if we have any immediate // operands, and/or a ModR/M byte. ProcessOperand(specific_opcode->flag_dest_); ProcessOperand(specific_opcode->flag_source_); ProcessOperand(specific_opcode->flag_aux_); // Inv: We have processed the opcode and incremented operand_bytes_ // by the number of bytes of any operands specified by the opcode // that are stored in the instruction (not registers etc.). Now // we need to return the total number of bytes for the opcode and // for the ModR/M or SIB bytes if they are present. if (table.mask_ != 0xff) { if (have_modrm_) { // we're looking at a ModR/M byte so we're not going to // count that into the opcode size ProcessModrm(start_byte, size); return IT_GENERIC; } else { // need to count the ModR/M byte even if it's just being // used for opcode extension size++; return IT_GENERIC; } } else { if (have_modrm_) { // The ModR/M byte is the next byte. size++; ProcessModrm(start_byte + 1, size); return IT_GENERIC; } else { size++; return IT_GENERIC; } } } bool MiniDisassembler::ProcessOperand(int flag_operand) { bool succeeded = true; if (AM_NOT_USED == flag_operand) return succeeded; // Decide what to do based on the addressing mode. switch (flag_operand & AM_MASK) { // No ModR/M byte indicated by these addressing modes, and no // additional (e.g. immediate) parameters. case AM_A: // Direct address case AM_F: // EFLAGS register case AM_X: // Memory addressed by the DS:SI register pair case AM_Y: // Memory addressed by the ES:DI register pair case AM_IMPLICIT: // Parameter is implicit, occupies no space in // instruction break; // There is a ModR/M byte but it does not necessarily need // to be decoded. case AM_C: // reg field of ModR/M selects a control register case AM_D: // reg field of ModR/M selects a debug register case AM_G: // reg field of ModR/M selects a general register case AM_P: // reg field of ModR/M selects an MMX register case AM_R: // mod field of ModR/M may refer only to a general register case AM_S: // reg field of ModR/M selects a segment register case AM_T: // reg field of ModR/M selects a test register case AM_V: // reg field of ModR/M selects a 128-bit XMM register have_modrm_ = true; break; // In these addressing modes, there is a ModR/M byte and it needs to be // decoded. No other (e.g. immediate) params than indicated in ModR/M. case AM_E: // Operand is either a general-purpose register or memory, // specified by ModR/M byte case AM_M: // ModR/M byte will refer only to memory case AM_Q: // Operand is either an MMX register or memory (complex // evaluation), specified by ModR/M byte case AM_W: // Operand is either a 128-bit XMM register or memory (complex // eval), specified by ModR/M byte have_modrm_ = true; should_decode_modrm_ = true; break; // These addressing modes specify an immediate or an offset value // directly, so we need to look at the operand type to see how many // bytes. case AM_I: // Immediate data. case AM_J: // Jump to offset. case AM_O: // Operand is at offset. switch (flag_operand & OT_MASK) { case OT_B: // Byte regardless of operand-size attribute. operand_bytes_ += OS_BYTE; break; case OT_C: // Byte or word, depending on operand-size attribute. if (operand_is_32_bits_) operand_bytes_ += OS_WORD; else operand_bytes_ += OS_BYTE; break; case OT_D: // Doubleword, regardless of operand-size attribute. operand_bytes_ += OS_DOUBLE_WORD; break; case OT_DQ: // Double-quadword, regardless of operand-size attribute. operand_bytes_ += OS_DOUBLE_QUAD_WORD; break; case OT_P: // 32-bit or 48-bit pointer, depending on operand-size // attribute. if (operand_is_32_bits_) operand_bytes_ += OS_48_BIT_POINTER; else operand_bytes_ += OS_32_BIT_POINTER; break; case OT_PS: // 128-bit packed single-precision floating-point data. operand_bytes_ += OS_128_BIT_PACKED_SINGLE_PRECISION_FLOATING; break; case OT_Q: // Quadword, regardless of operand-size attribute. operand_bytes_ += OS_QUAD_WORD; break; case OT_S: // 6-byte pseudo-descriptor. operand_bytes_ += OS_PSEUDO_DESCRIPTOR; break; case OT_SD: // Scalar Double-Precision Floating-Point Value case OT_PD: // Unaligned packed double-precision floating point value operand_bytes_ += OS_DOUBLE_PRECISION_FLOATING; break; case OT_SS: // Scalar element of a 128-bit packed single-precision // floating data. // We simply return enItUnknown since we don't have to support // floating point succeeded = false; break; case OT_V: // Word, doubleword or quadword, depending on operand-size // attribute. if (operand_is_64_bits_ && flag_operand & AM_I && flag_operand & IOS_64) operand_bytes_ += OS_QUAD_WORD; else if (operand_is_32_bits_) operand_bytes_ += OS_DOUBLE_WORD; else operand_bytes_ += OS_WORD; break; case OT_W: // Word, regardless of operand-size attribute. operand_bytes_ += OS_WORD; break; // Can safely ignore these. case OT_A: // Two one-word operands in memory or two double-word // operands in memory case OT_PI: // Quadword MMX technology register (e.g. mm0) case OT_SI: // Doubleword integer register (e.g., eax) break; default: break; } break; default: break; } return succeeded; } bool MiniDisassembler::ProcessModrm(unsigned char* start_byte, unsigned int& size) { // If we don't need to decode, we just return the size of the ModR/M // byte (there is never a SIB byte in this case). if (!should_decode_modrm_) { size++; return true; } // We never care about the reg field, only the combination of the mod // and r/m fields, so let's start by packing those fields together into // 5 bits. unsigned char modrm = (*start_byte); unsigned char mod = modrm & 0xC0; // mask out top two bits to get mod field modrm = modrm & 0x07; // mask out bottom 3 bits to get r/m field mod = mod >> 3; // shift the mod field to the right place modrm = mod | modrm; // combine the r/m and mod fields as discussed mod = mod >> 3; // shift the mod field to bits 2..0 // Invariant: modrm contains the mod field in bits 4..3 and the r/m field // in bits 2..0, and mod contains the mod field in bits 2..0 const ModrmEntry* modrm_entry = 0; if (address_is_32_bits_) modrm_entry = &s_ia32_modrm_map_[modrm]; else modrm_entry = &s_ia16_modrm_map_[modrm]; // Invariant: modrm_entry points to information that we need to decode // the ModR/M byte. // Add to the count of operand bytes, if the ModR/M byte indicates // that some operands are encoded in the instruction. if (modrm_entry->is_encoded_in_instruction_) operand_bytes_ += modrm_entry->operand_size_; // Process the SIB byte if necessary, and return the count // of ModR/M and SIB bytes. if (modrm_entry->use_sib_byte_) { size++; return ProcessSib(start_byte + 1, mod, size); } else { size++; return true; } } bool MiniDisassembler::ProcessSib(unsigned char* start_byte, unsigned char mod, unsigned int& size) { // get the mod field from the 2..0 bits of the SIB byte unsigned char sib_base = (*start_byte) & 0x07; if (0x05 == sib_base) { switch (mod) { case 0x00: // mod == 00 case 0x02: // mod == 10 operand_bytes_ += OS_DOUBLE_WORD; break; case 0x01: // mod == 01 operand_bytes_ += OS_BYTE; break; case 0x03: // mod == 11 // According to the IA-32 docs, there does not seem to be a disp // value for this value of mod default: break; } } size++; return true; } }; // namespace sidestep
Unknown
3D
mcellteam/mcell
libs/gperftools/src/windows/google/tcmalloc.h
.h
1,688
35
/* Copyright (c) 2003, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The code has moved to gperftools/. Use that include-directory for * new code. */ #include <gperftools/tcmalloc.h>
Unknown
3D
mcellteam/mcell
libs/gperftools/src/windows/gperftools/tcmalloc.h
.h
6,986
156
// -*- Mode: C; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2003, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Sanjay Ghemawat <opensource@google.com> * .h file by Craig Silverstein <opensource@google.com> */ #ifndef TCMALLOC_TCMALLOC_H_ #define TCMALLOC_TCMALLOC_H_ #include <stddef.h> /* for size_t */ #ifdef __cplusplus #include <new> /* for std::nothrow_t, std::align_val_t */ #endif /* Define the version number so folks can check against it */ #define TC_VERSION_MAJOR 2 #define TC_VERSION_MINOR 7 #define TC_VERSION_PATCH "" #define TC_VERSION_STRING "gperftools 2.7" #ifndef PERFTOOLS_NOTHROW #if __cplusplus >= 201103L #define PERFTOOLS_NOTHROW noexcept #elif defined(__cplusplus) #define PERFTOOLS_NOTHROW throw() #else # ifdef __GNUC__ # define PERFTOOLS_NOTHROW __attribute__((__nothrow__)) # else # define PERFTOOLS_NOTHROW # endif #endif #endif #ifndef PERFTOOLS_DLL_DECL # ifdef _WIN32 # define PERFTOOLS_DLL_DECL __declspec(dllimport) # else # define PERFTOOLS_DLL_DECL # endif #endif #ifdef __cplusplus extern "C" { #endif /* * Returns a human-readable version string. If major, minor, * and/or patch are not NULL, they are set to the major version, * minor version, and patch-code (a string, usually ""). */ PERFTOOLS_DLL_DECL const char* tc_version(int* major, int* minor, const char** patch) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void* tc_malloc(size_t size) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void* tc_malloc_skip_new_handler(size_t size) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_free(void* ptr) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_free_sized(void *ptr, size_t size) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void* tc_realloc(void* ptr, size_t size) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void* tc_calloc(size_t nmemb, size_t size) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_cfree(void* ptr) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void* tc_memalign(size_t __alignment, size_t __size) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL int tc_posix_memalign(void** ptr, size_t align, size_t size) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void* tc_valloc(size_t __size) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void* tc_pvalloc(size_t __size) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_malloc_stats(void) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL int tc_mallopt(int cmd, int value) PERFTOOLS_NOTHROW; /* * This is an alias for MallocExtension::instance()->GetAllocatedSize(). * It is equivalent to * OS X: malloc_size() * glibc: malloc_usable_size() * Windows: _msize() */ PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) PERFTOOLS_NOTHROW; #ifdef __cplusplus PERFTOOLS_DLL_DECL int tc_set_new_mode(int flag) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void* tc_new(size_t size); PERFTOOLS_DLL_DECL void* tc_new_nothrow(size_t size, const std::nothrow_t&) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_delete(void* p) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_delete_sized(void* p, size_t size) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_delete_nothrow(void* p, const std::nothrow_t&) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void* tc_newarray(size_t size); PERFTOOLS_DLL_DECL void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_deletearray(void* p) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_deletearray_sized(void* p, size_t size) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_deletearray_nothrow(void* p, const std::nothrow_t&) PERFTOOLS_NOTHROW; #if defined(__cpp_aligned_new) || (defined(_MSVC_LANG) && _MSVC_LANG > 201402L) PERFTOOLS_DLL_DECL void* tc_new_aligned(size_t size, std::align_val_t al); PERFTOOLS_DLL_DECL void* tc_new_aligned_nothrow(size_t size, std::align_val_t al, const std::nothrow_t&) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_delete_aligned(void* p, std::align_val_t al) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_delete_sized_aligned(void* p, size_t size, std::align_val_t al) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_delete_aligned_nothrow(void* p, std::align_val_t al, const std::nothrow_t&) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void* tc_newarray_aligned(size_t size, std::align_val_t al); PERFTOOLS_DLL_DECL void* tc_newarray_aligned_nothrow(size_t size, std::align_val_t al, const std::nothrow_t&) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_deletearray_aligned(void* p, std::align_val_t al) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_deletearray_sized_aligned(void* p, size_t size, std::align_val_t al) PERFTOOLS_NOTHROW; PERFTOOLS_DLL_DECL void tc_deletearray_aligned_nothrow(void* p, std::align_val_t al, const std::nothrow_t&) PERFTOOLS_NOTHROW; #endif } #endif /* We're only un-defining for public */ #if !defined(GPERFTOOLS_CONFIG_H_) #undef PERFTOOLS_NOTHROW #endif /* GPERFTOOLS_CONFIG_H_ */ #endif /* #ifndef TCMALLOC_TCMALLOC_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/linuxthreads.h
.h
2,236
55
/* Copyright (c) 2005-2007, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Markus Gutschke */ #ifndef _LINUXTHREADS_H #define _LINUXTHREADS_H /* Include thread_lister.h to get the interface that we implement for linux. */ /* We currently only support certain platforms on Linux. Porting to other * related platforms should not be difficult. */ #if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \ defined(__mips__) || defined(__PPC__) || defined(__aarch64__) || \ defined(__s390__)) && defined(__linux) /* Define the THREADS symbol to make sure that there is exactly one core dumper * built into the library. */ #define THREADS "Linux /proc" #endif #endif /* _LINUXTHREADS_H */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/stl_allocator.h
.h
3,712
99
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2006, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Maxim Lifantsev */ #ifndef BASE_STL_ALLOCATOR_H_ #define BASE_STL_ALLOCATOR_H_ #include <config.h> #include <stddef.h> // for ptrdiff_t #include <limits> #include "base/logging.h" // Generic allocator class for STL objects // that uses a given type-less allocator Alloc, which must provide: // static void* Alloc::Allocate(size_t size); // static void Alloc::Free(void* ptr, size_t size); // // STL_Allocator<T, MyAlloc> provides the same thread-safety // guarantees as MyAlloc. // // Usage example: // set<T, less<T>, STL_Allocator<T, MyAlloc> > my_set; // CAVEAT: Parts of the code below are probably specific // to the STL version(s) we are using. // The code is simply lifted from what std::allocator<> provides. template <typename T, class Alloc> class STL_Allocator { public: typedef size_t size_type; typedef ptrdiff_t difference_type; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; typedef T value_type; template <class T1> struct rebind { typedef STL_Allocator<T1, Alloc> other; }; STL_Allocator() { } STL_Allocator(const STL_Allocator&) { } template <class T1> STL_Allocator(const STL_Allocator<T1, Alloc>&) { } ~STL_Allocator() { } pointer address(reference x) const { return &x; } const_pointer address(const_reference x) const { return &x; } pointer allocate(size_type n, const void* = 0) { RAW_DCHECK((n * sizeof(T)) / sizeof(T) == n, "n is too big to allocate"); return static_cast<T*>(Alloc::Allocate(n * sizeof(T))); } void deallocate(pointer p, size_type n) { Alloc::Free(p, n * sizeof(T)); } size_type max_size() const { return size_t(-1) / sizeof(T); } void construct(pointer p, const T& val) { ::new(p) T(val); } void construct(pointer p) { ::new(p) T(); } void destroy(pointer p) { p->~T(); } // There's no state, so these allocators are always equal bool operator==(const STL_Allocator&) const { return true; } }; #endif // BASE_STL_ALLOCATOR_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/dynamic_annotations.c
.c
7,689
180
/* Copyright (c) 2008-2009, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Kostya Serebryany */ #ifdef __cplusplus # error "This file should be built as pure C to avoid name mangling" #endif #include "config.h" #include <stdlib.h> #include <string.h> #include "base/dynamic_annotations.h" #include "getenv_safe.h" // for TCMallocGetenvSafe #ifdef __GNUC__ /* valgrind.h uses gcc extensions so it won't build with other compilers */ # ifdef HAVE_VALGRIND_H /* prefer the user's copy if they have it */ # include <valgrind.h> # else /* otherwise just use the copy that we have */ # include "third_party/valgrind.h" # endif #endif /* Compiler-based ThreadSanitizer defines DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL = 1 and provides its own definitions of the functions. */ #ifndef DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL # define DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL 0 #endif /* Each function is empty and called (via a macro) only in debug mode. The arguments are captured by dynamic tools at runtime. */ #if DYNAMIC_ANNOTATIONS_ENABLED == 1 \ && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 void AnnotateRWLockCreate(const char *file, int line, const volatile void *lock){} void AnnotateRWLockDestroy(const char *file, int line, const volatile void *lock){} void AnnotateRWLockAcquired(const char *file, int line, const volatile void *lock, long is_w){} void AnnotateRWLockReleased(const char *file, int line, const volatile void *lock, long is_w){} void AnnotateBarrierInit(const char *file, int line, const volatile void *barrier, long count, long reinitialization_allowed) {} void AnnotateBarrierWaitBefore(const char *file, int line, const volatile void *barrier) {} void AnnotateBarrierWaitAfter(const char *file, int line, const volatile void *barrier) {} void AnnotateBarrierDestroy(const char *file, int line, const volatile void *barrier) {} void AnnotateCondVarWait(const char *file, int line, const volatile void *cv, const volatile void *lock){} void AnnotateCondVarSignal(const char *file, int line, const volatile void *cv){} void AnnotateCondVarSignalAll(const char *file, int line, const volatile void *cv){} void AnnotatePublishMemoryRange(const char *file, int line, const volatile void *address, long size){} void AnnotateUnpublishMemoryRange(const char *file, int line, const volatile void *address, long size){} void AnnotatePCQCreate(const char *file, int line, const volatile void *pcq){} void AnnotatePCQDestroy(const char *file, int line, const volatile void *pcq){} void AnnotatePCQPut(const char *file, int line, const volatile void *pcq){} void AnnotatePCQGet(const char *file, int line, const volatile void *pcq){} void AnnotateNewMemory(const char *file, int line, const volatile void *mem, long size){} void AnnotateExpectRace(const char *file, int line, const volatile void *mem, const char *description){} void AnnotateBenignRace(const char *file, int line, const volatile void *mem, const char *description){} void AnnotateBenignRaceSized(const char *file, int line, const volatile void *mem, long size, const char *description) {} void AnnotateMutexIsUsedAsCondVar(const char *file, int line, const volatile void *mu){} void AnnotateTraceMemory(const char *file, int line, const volatile void *arg){} void AnnotateThreadName(const char *file, int line, const char *name){} void AnnotateIgnoreReadsBegin(const char *file, int line){} void AnnotateIgnoreReadsEnd(const char *file, int line){} void AnnotateIgnoreWritesBegin(const char *file, int line){} void AnnotateIgnoreWritesEnd(const char *file, int line){} void AnnotateEnableRaceDetection(const char *file, int line, int enable){} void AnnotateNoOp(const char *file, int line, const volatile void *arg){} void AnnotateFlushState(const char *file, int line){} #endif /* DYNAMIC_ANNOTATIONS_ENABLED == 1 && DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */ #if DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 static int GetRunningOnValgrind(void) { #ifdef RUNNING_ON_VALGRIND if (RUNNING_ON_VALGRIND) return 1; #endif const char *running_on_valgrind_str = TCMallocGetenvSafe("RUNNING_ON_VALGRIND"); if (running_on_valgrind_str) { return strcmp(running_on_valgrind_str, "0") != 0; } return 0; } /* See the comments in dynamic_annotations.h */ int RunningOnValgrind(void) { static volatile int running_on_valgrind = -1; int local_running_on_valgrind = running_on_valgrind; /* C doesn't have thread-safe initialization of statics, and we don't want to depend on pthread_once here, so hack it. */ ANNOTATE_BENIGN_RACE(&running_on_valgrind, "safe hack"); if (local_running_on_valgrind == -1) running_on_valgrind = local_running_on_valgrind = GetRunningOnValgrind(); return local_running_on_valgrind; } #endif /* DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 */ /* See the comments in dynamic_annotations.h */ double ValgrindSlowdown(void) { /* Same initialization hack as in RunningOnValgrind(). */ static volatile double slowdown = 0.0; double local_slowdown = slowdown; ANNOTATE_BENIGN_RACE(&slowdown, "safe hack"); if (RunningOnValgrind() == 0) { return 1.0; } if (local_slowdown == 0.0) { char *env = getenv("VALGRIND_SLOWDOWN"); slowdown = local_slowdown = env ? atof(env) : 50.0; } return local_slowdown; }
C
3D
mcellteam/mcell
libs/gperftools/src/base/linux_syscall_support.h
.h
125,759
2,914
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2005-2008, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Markus Gutschke */ /* This file includes Linux-specific support functions common to the * coredumper and the thread lister; primarily, this is a collection * of direct system calls, and a couple of symbols missing from * standard header files. * There are a few options that the including file can set to control * the behavior of this file: * * SYS_CPLUSPLUS: * The entire header file will normally be wrapped in 'extern "C" { }", * making it suitable for compilation as both C and C++ source. If you * do not want to do this, you can set the SYS_CPLUSPLUS macro to inhibit * the wrapping. N.B. doing so will suppress inclusion of all prerequisite * system header files, too. It is the caller's responsibility to provide * the necessary definitions. * * SYS_ERRNO: * All system calls will update "errno" unless overriden by setting the * SYS_ERRNO macro prior to including this file. SYS_ERRNO should be * an l-value. * * SYS_INLINE: * New symbols will be defined "static inline", unless overridden by * the SYS_INLINE macro. * * SYS_LINUX_SYSCALL_SUPPORT_H * This macro is used to avoid multiple inclusions of this header file. * If you need to include this file more than once, make sure to * unset SYS_LINUX_SYSCALL_SUPPORT_H before each inclusion. * * SYS_PREFIX: * New system calls will have a prefix of "sys_" unless overridden by * the SYS_PREFIX macro. Valid values for this macro are [0..9] which * results in prefixes "sys[0..9]_". It is also possible to set this * macro to -1, which avoids all prefixes. * * This file defines a few internal symbols that all start with "LSS_". * Do not access these symbols from outside this file. They are not part * of the supported API. * * NOTE: This is a stripped down version of the official opensource * version of linux_syscall_support.h, which lives at * http://code.google.com/p/linux-syscall-support/ * It includes only the syscalls that are used in perftools, plus a * few extra. Here's the breakdown: * 1) Perftools uses these: grep -rho 'sys_[a-z0-9_A-Z]* *(' src | sort -u * sys__exit( * sys_clone( * sys_close( * sys_fcntl( * sys_fstat( * sys_futex( * sys_getcpu( * sys_getdents64( * sys_getppid( * sys_gettid( * sys_lseek( * sys_mmap( * sys_mremap( * sys_munmap( * sys_open( * sys_pipe( * sys_prctl( * sys_ptrace( * sys_ptrace_detach( * sys_read( * sys_sched_yield( * sys_sigaction( * sys_sigaltstack( * sys_sigdelset( * sys_sigfillset( * sys_sigprocmask( * sys_socket( * sys_stat( * sys_waitpid( * 2) These are used as subroutines of the above: * sys_getpid -- gettid * sys_kill -- ptrace_detach * sys_restore -- sigaction * sys_restore_rt -- sigaction * sys_socketcall -- socket * sys_wait4 -- waitpid * 3) I left these in even though they're not used. They either * complement the above (write vs read) or are variants (rt_sigaction): * sys_fstat64 * sys_llseek * sys_mmap2 * sys_openat * sys_getdents * sys_rt_sigaction * sys_rt_sigprocmask * sys_sigaddset * sys_sigemptyset * sys_stat64 * sys_write */ #ifndef SYS_LINUX_SYSCALL_SUPPORT_H #define SYS_LINUX_SYSCALL_SUPPORT_H /* We currently only support x86-32, x86-64, ARM, MIPS, PPC/PPC64, Aarch64, s390 and s390x * on Linux. * Porting to other related platforms should not be difficult. */ #if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \ defined(__mips__) || defined(__mips64) || defined(__mips64el__) || defined(__PPC__) || \ defined(__aarch64__) || defined(__s390__)) \ && (defined(__linux)) #ifndef SYS_CPLUSPLUS #ifdef __cplusplus /* Some system header files in older versions of gcc neglect to properly * handle being included from C++. As it appears to be harmless to have * multiple nested 'extern "C"' blocks, just add another one here. */ extern "C" { #endif #include <errno.h> #include <signal.h> #include <stdarg.h> #include <stddef.h> #include <stdint.h> #include <string.h> #include <sys/ptrace.h> #include <sys/resource.h> #include <sys/time.h> #include <sys/types.h> #include <syscall.h> #include <unistd.h> #include <linux/unistd.h> #include <endian.h> #include <fcntl.h> #ifdef __mips__ /* Include definitions of the ABI currently in use. */ #include <sgidefs.h> #endif #endif /* As glibc often provides subtly incompatible data structures (and implicit * wrapper functions that convert them), we provide our own kernel data * structures for use by the system calls. * These structures have been developed by using Linux 2.6.23 headers for * reference. Note though, we do not care about exact API compatibility * with the kernel, and in fact the kernel often does not have a single * API that works across architectures. Instead, we try to mimic the glibc * API where reasonable, and only guarantee ABI compatibility with the * kernel headers. * Most notably, here are a few changes that were made to the structures * defined by kernel headers: * * - we only define structures, but not symbolic names for kernel data * types. For the latter, we directly use the native C datatype * (i.e. "unsigned" instead of "mode_t"). * - in a few cases, it is possible to define identical structures for * both 32bit (e.g. i386) and 64bit (e.g. x86-64) platforms by * standardizing on the 64bit version of the data types. In particular, * this means that we use "unsigned" where the 32bit headers say * "unsigned long". * - overall, we try to minimize the number of cases where we need to * conditionally define different structures. * - the "struct kernel_sigaction" class of structures have been * modified to more closely mimic glibc's API by introducing an * anonymous union for the function pointer. * - a small number of field names had to have an underscore appended to * them, because glibc defines a global macro by the same name. */ /* include/linux/dirent.h */ struct kernel_dirent64 { unsigned long long d_ino; long long d_off; unsigned short d_reclen; unsigned char d_type; char d_name[256]; }; /* include/linux/dirent.h */ struct kernel_dirent { long d_ino; long d_off; unsigned short d_reclen; char d_name[256]; }; /* include/linux/time.h */ struct kernel_timespec { long tv_sec; long tv_nsec; }; /* include/linux/time.h */ struct kernel_timeval { long tv_sec; long tv_usec; }; /* include/linux/resource.h */ struct kernel_rusage { struct kernel_timeval ru_utime; struct kernel_timeval ru_stime; long ru_maxrss; long ru_ixrss; long ru_idrss; long ru_isrss; long ru_minflt; long ru_majflt; long ru_nswap; long ru_inblock; long ru_oublock; long ru_msgsnd; long ru_msgrcv; long ru_nsignals; long ru_nvcsw; long ru_nivcsw; }; #if defined(__i386__) || defined(__arm__) \ || defined(__PPC__) || (defined(__s390__) && !defined(__s390x__)) /* include/asm-{arm,i386,mips,ppc}/signal.h */ struct kernel_old_sigaction { union { void (*sa_handler_)(int); void (*sa_sigaction_)(int, siginfo_t *, void *); }; unsigned long sa_mask; unsigned long sa_flags; void (*sa_restorer)(void); } __attribute__((packed,aligned(4))); #elif (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) #define kernel_old_sigaction kernel_sigaction #elif defined(__aarch64__) // No kernel_old_sigaction defined for arm64. #endif /* Some kernel functions (e.g. sigaction() in 2.6.23) require that the * exactly match the size of the signal set, even though the API was * intended to be extensible. We define our own KERNEL_NSIG to deal with * this. * Please note that glibc provides signals [1.._NSIG-1], whereas the * kernel (and this header) provides the range [1..KERNEL_NSIG]. The * actual number of signals is obviously the same, but the constants * differ by one. */ #ifdef __mips__ #define KERNEL_NSIG 128 #else #define KERNEL_NSIG 64 #endif /* include/asm-{arm,i386,mips,x86_64}/signal.h */ struct kernel_sigset_t { unsigned long sig[(KERNEL_NSIG + 8*sizeof(unsigned long) - 1)/ (8*sizeof(unsigned long))]; }; /* include/asm-{arm,generic,i386,mips,x86_64,ppc}/signal.h */ struct kernel_sigaction { #ifdef __mips__ unsigned long sa_flags; union { void (*sa_handler_)(int); void (*sa_sigaction_)(int, siginfo_t *, void *); }; struct kernel_sigset_t sa_mask; #else union { void (*sa_handler_)(int); void (*sa_sigaction_)(int, siginfo_t *, void *); }; unsigned long sa_flags; void (*sa_restorer)(void); struct kernel_sigset_t sa_mask; #endif }; /* include/asm-{arm,i386,mips,ppc,s390}/stat.h */ #ifdef __mips__ #if (_MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32) struct kernel_stat { #else struct kernel_stat64 { #endif unsigned st_dev; unsigned __pad0[3]; unsigned long long st_ino; unsigned st_mode; unsigned st_nlink; unsigned st_uid; unsigned st_gid; unsigned st_rdev; unsigned __pad1[3]; long long st_size; unsigned st_atime_; unsigned st_atime_nsec_; unsigned st_mtime_; unsigned st_mtime_nsec_; unsigned st_ctime_; unsigned st_ctime_nsec_; unsigned st_blksize; unsigned __pad2; unsigned long long st_blocks; }; #elif defined __PPC__ struct kernel_stat64 { unsigned long long st_dev; unsigned long long st_ino; unsigned st_nlink; unsigned st_mode; unsigned st_uid; unsigned st_gid; int __pad2; unsigned long long st_rdev; long long st_size; long long st_blksize; long long st_blocks; kernel_timespec st_atim; kernel_timespec st_mtim; kernel_timespec st_ctim; unsigned long __unused4; unsigned long __unused5; unsigned long __unused6; }; #else struct kernel_stat64 { unsigned long long st_dev; unsigned char __pad0[4]; unsigned __st_ino; unsigned st_mode; unsigned st_nlink; unsigned st_uid; unsigned st_gid; unsigned long long st_rdev; unsigned char __pad3[4]; long long st_size; unsigned st_blksize; unsigned long long st_blocks; unsigned st_atime_; unsigned st_atime_nsec_; unsigned st_mtime_; unsigned st_mtime_nsec_; unsigned st_ctime_; unsigned st_ctime_nsec_; unsigned long long st_ino; }; #endif /* include/asm-{arm,generic,i386,mips,x86_64,ppc,s390}/stat.h */ #if defined(__i386__) || defined(__arm__) struct kernel_stat { /* The kernel headers suggest that st_dev and st_rdev should be 32bit * quantities encoding 12bit major and 20bit minor numbers in an interleaved * format. In reality, we do not see useful data in the top bits. So, * we'll leave the padding in here, until we find a better solution. */ unsigned short st_dev; short pad1; unsigned st_ino; unsigned short st_mode; unsigned short st_nlink; unsigned short st_uid; unsigned short st_gid; unsigned short st_rdev; short pad2; unsigned st_size; unsigned st_blksize; unsigned st_blocks; unsigned st_atime_; unsigned st_atime_nsec_; unsigned st_mtime_; unsigned st_mtime_nsec_; unsigned st_ctime_; unsigned st_ctime_nsec_; unsigned __unused4; unsigned __unused5; }; #elif defined(__x86_64__) struct kernel_stat { uint64_t st_dev; uint64_t st_ino; uint64_t st_nlink; unsigned st_mode; unsigned st_uid; unsigned st_gid; unsigned __pad0; uint64_t st_rdev; int64_t st_size; int64_t st_blksize; int64_t st_blocks; uint64_t st_atime_; uint64_t st_atime_nsec_; uint64_t st_mtime_; uint64_t st_mtime_nsec_; uint64_t st_ctime_; uint64_t st_ctime_nsec_; int64_t __unused[3]; }; #elif defined(__PPC__) struct kernel_stat { unsigned long long st_dev; unsigned long st_ino; unsigned long st_nlink; unsigned long st_mode; unsigned st_uid; unsigned st_gid; int __pad2; unsigned long long st_rdev; long st_size; unsigned long st_blksize; unsigned long st_blocks; kernel_timespec st_atim; kernel_timespec st_mtim; kernel_timespec st_ctim; unsigned long __unused4; unsigned long __unused5; unsigned long __unused6; }; #elif defined(__mips__) \ && !(_MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32) struct kernel_stat { unsigned st_dev; int st_pad1[3]; unsigned st_ino; unsigned st_mode; unsigned st_nlink; unsigned st_uid; unsigned st_gid; unsigned st_rdev; int st_pad2[2]; long st_size; int st_pad3; long st_atime_; long st_atime_nsec_; long st_mtime_; long st_mtime_nsec_; long st_ctime_; long st_ctime_nsec_; int st_blksize; int st_blocks; int st_pad4[14]; }; #elif defined(__aarch64__) struct kernel_stat { unsigned long st_dev; unsigned long st_ino; unsigned int st_mode; unsigned int st_nlink; unsigned int st_uid; unsigned int st_gid; unsigned long st_rdev; unsigned long __pad1; long st_size; int st_blksize; int __pad2; long st_blocks; long st_atime_; unsigned long st_atime_nsec_; long st_mtime_; unsigned long st_mtime_nsec_; long st_ctime_; unsigned long st_ctime_nsec_; unsigned int __unused4; unsigned int __unused5; }; #elif defined(__s390x__) struct kernel_stat { unsigned long st_dev; unsigned long st_ino; unsigned long st_nlink; unsigned int st_mode; unsigned int st_uid; unsigned int st_gid; unsigned int __pad1; unsigned long st_rdev; unsigned long st_size; unsigned long st_atime_; unsigned long st_atime_nsec_; unsigned long st_mtime_; unsigned long st_mtime_nsec_; unsigned long st_ctime_; unsigned long st_ctime_nsec_; unsigned long st_blksize; long st_blocks; unsigned long __unused[3]; }; #elif defined(__s390__) struct kernel_stat { unsigned short st_dev; unsigned short __pad1; unsigned long st_ino; unsigned short st_mode; unsigned short st_nlink; unsigned short st_uid; unsigned short st_gid; unsigned short st_rdev; unsigned short __pad2; unsigned long st_size; unsigned long st_blksize; unsigned long st_blocks; unsigned long st_atime_; unsigned long st_atime_nsec_; unsigned long st_mtime_; unsigned long st_mtime_nsec_; unsigned long st_ctime_; unsigned long st_ctime_nsec_; unsigned long __unused4; unsigned long __unused5; }; #endif /* Definitions missing from the standard header files */ #ifndef O_DIRECTORY #if defined(__arm__) #define O_DIRECTORY 0040000 #else #define O_DIRECTORY 0200000 #endif #endif #ifndef PR_GET_DUMPABLE #define PR_GET_DUMPABLE 3 #endif #ifndef PR_SET_DUMPABLE #define PR_SET_DUMPABLE 4 #endif #ifndef AT_FDCWD #define AT_FDCWD (-100) #endif #ifndef AT_SYMLINK_NOFOLLOW #define AT_SYMLINK_NOFOLLOW 0x100 #endif #ifndef AT_REMOVEDIR #define AT_REMOVEDIR 0x200 #endif #ifndef MREMAP_FIXED #define MREMAP_FIXED 2 #endif #ifndef SA_RESTORER #define SA_RESTORER 0x04000000 #endif #if defined(__i386__) #ifndef __NR_rt_sigaction #define __NR_rt_sigaction 174 #define __NR_rt_sigprocmask 175 #endif #ifndef __NR_stat64 #define __NR_stat64 195 #endif #ifndef __NR_fstat64 #define __NR_fstat64 197 #endif #ifndef __NR_getdents64 #define __NR_getdents64 220 #endif #ifndef __NR_gettid #define __NR_gettid 224 #endif #ifndef __NR_futex #define __NR_futex 240 #endif #ifndef __NR_openat #define __NR_openat 295 #endif #ifndef __NR_getcpu #define __NR_getcpu 318 #endif /* End of i386 definitions */ #elif defined(__arm__) #ifndef __syscall #if defined(__thumb__) || defined(__ARM_EABI__) #define __SYS_REG(name) register long __sysreg __asm__("r6") = __NR_##name; #define __SYS_REG_LIST(regs...) [sysreg] "r" (__sysreg) , ##regs #define __syscall(name) "swi\t0" #define __syscall_safe(name) \ "push {r7}\n" \ "mov r7,%[sysreg]\n" \ __syscall(name)"\n" \ "pop {r7}" #else #define __SYS_REG(name) #define __SYS_REG_LIST(regs...) regs #define __syscall(name) "swi\t" __sys1(__NR_##name) "" #define __syscall_safe(name) __syscall(name) #endif #endif #ifndef __NR_rt_sigaction #define __NR_rt_sigaction (__NR_SYSCALL_BASE + 174) #define __NR_rt_sigprocmask (__NR_SYSCALL_BASE + 175) #endif #ifndef __NR_stat64 #define __NR_stat64 (__NR_SYSCALL_BASE + 195) #endif #ifndef __NR_fstat64 #define __NR_fstat64 (__NR_SYSCALL_BASE + 197) #endif #ifndef __NR_getdents64 #define __NR_getdents64 (__NR_SYSCALL_BASE + 217) #endif #ifndef __NR_gettid #define __NR_gettid (__NR_SYSCALL_BASE + 224) #endif #ifndef __NR_futex #define __NR_futex (__NR_SYSCALL_BASE + 240) #endif /* End of ARM definitions */ #elif defined(__x86_64__) #ifndef __NR_gettid #define __NR_gettid 186 #endif #ifndef __NR_futex #define __NR_futex 202 #endif #ifndef __NR_getdents64 #define __NR_getdents64 217 #endif #ifndef __NR_openat #define __NR_openat 257 #endif /* End of x86-64 definitions */ #elif defined(__mips__) #if _MIPS_SIM == _MIPS_SIM_ABI32 #ifndef __NR_rt_sigaction #define __NR_rt_sigaction (__NR_Linux + 194) #define __NR_rt_sigprocmask (__NR_Linux + 195) #endif #ifndef __NR_stat64 #define __NR_stat64 (__NR_Linux + 213) #endif #ifndef __NR_fstat64 #define __NR_fstat64 (__NR_Linux + 215) #endif #ifndef __NR_getdents64 #define __NR_getdents64 (__NR_Linux + 219) #endif #ifndef __NR_gettid #define __NR_gettid (__NR_Linux + 222) #endif #ifndef __NR_futex #define __NR_futex (__NR_Linux + 238) #endif #ifndef __NR_openat #define __NR_openat (__NR_Linux + 288) #endif #ifndef __NR_fstatat #define __NR_fstatat (__NR_Linux + 293) #endif #ifndef __NR_getcpu #define __NR_getcpu (__NR_Linux + 312) #endif /* End of MIPS (old 32bit API) definitions */ #elif (_MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32) #ifndef __NR_gettid #define __NR_gettid (__NR_Linux + 178) #endif #ifndef __NR_futex #define __NR_futex (__NR_Linux + 194) #endif #ifndef __NR_openat #define __NR_openat (__NR_Linux + 247) #endif #ifndef __NR_fstatat #define __NR_fstatat (__NR_Linux + 252) #endif #ifndef __NR_getcpu #define __NR_getcpu (__NR_Linux + 271) #endif /* End of MIPS (64bit API) definitions */ #else #ifndef __NR_gettid #define __NR_gettid (__NR_Linux + 178) #endif #ifndef __NR_futex #define __NR_futex (__NR_Linux + 194) #endif #ifndef __NR_openat #define __NR_openat (__NR_Linux + 251) #endif #ifndef __NR_fstatat #define __NR_fstatat (__NR_Linux + 256) #endif #ifndef __NR_getcpu #define __NR_getcpu (__NR_Linux + 275) #endif /* End of MIPS (new 32bit API) definitions */ #endif /* End of MIPS definitions */ #elif defined(__PPC__) #ifndef __NR_rt_sigaction #define __NR_rt_sigaction 173 #define __NR_rt_sigprocmask 174 #endif #ifndef __NR_stat64 #define __NR_stat64 195 #endif #ifndef __NR_fstat64 #define __NR_fstat64 197 #endif #ifndef __NR_socket #define __NR_socket 198 #endif #ifndef __NR_getdents64 #define __NR_getdents64 202 #endif #ifndef __NR_gettid #define __NR_gettid 207 #endif #ifndef __NR_futex #define __NR_futex 221 #endif #ifndef __NR_openat #define __NR_openat 286 #endif #ifndef __NR_getcpu #define __NR_getcpu 302 #endif /* End of powerpc defininitions */ #elif defined(__aarch64__) #ifndef __NR_fstatat #define __NR_fstatat 79 #endif /* End of aarch64 defininitions */ #elif defined(__s390__) #ifndef __NR_quotactl #define __NR_quotactl 131 #endif #ifndef __NR_rt_sigreturn #define __NR_rt_sigreturn 173 #endif #ifndef __NR_rt_sigaction #define __NR_rt_sigaction 174 #endif #ifndef __NR_rt_sigprocmask #define __NR_rt_sigprocmask 175 #endif #ifndef __NR_rt_sigpending #define __NR_rt_sigpending 176 #endif #ifndef __NR_rt_sigsuspend #define __NR_rt_sigsuspend 179 #endif #ifndef __NR_pread64 #define __NR_pread64 180 #endif #ifndef __NR_pwrite64 #define __NR_pwrite64 181 #endif #ifndef __NR_getdents64 #define __NR_getdents64 220 #endif #ifndef __NR_readahead #define __NR_readahead 222 #endif #ifndef __NR_setxattr #define __NR_setxattr 224 #endif #ifndef __NR_lsetxattr #define __NR_lsetxattr 225 #endif #ifndef __NR_getxattr #define __NR_getxattr 227 #endif #ifndef __NR_lgetxattr #define __NR_lgetxattr 228 #endif #ifndef __NR_listxattr #define __NR_listxattr 230 #endif #ifndef __NR_llistxattr #define __NR_llistxattr 231 #endif #ifndef __NR_gettid #define __NR_gettid 236 #endif #ifndef __NR_tkill #define __NR_tkill 237 #endif #ifndef __NR_futex #define __NR_futex 238 #endif #ifndef __NR_sched_setaffinity #define __NR_sched_setaffinity 239 #endif #ifndef __NR_sched_getaffinity #define __NR_sched_getaffinity 240 #endif #ifndef __NR_set_tid_address #define __NR_set_tid_address 252 #endif #ifndef __NR_clock_gettime #define __NR_clock_gettime 260 #endif #ifndef __NR_clock_getres #define __NR_clock_getres 261 #endif #ifndef __NR_statfs64 #define __NR_statfs64 265 #endif #ifndef __NR_fstatfs64 #define __NR_fstatfs64 266 #endif #ifndef __NR_ioprio_set #define __NR_ioprio_set 282 #endif #ifndef __NR_ioprio_get #define __NR_ioprio_get 283 #endif #ifndef __NR_openat #define __NR_openat 288 #endif #ifndef __NR_unlinkat #define __NR_unlinkat 294 #endif #ifndef __NR_move_pages #define __NR_move_pages 310 #endif #ifndef __NR_getcpu #define __NR_getcpu 311 #endif #ifndef __NR_fallocate #define __NR_fallocate 314 #endif /* Some syscalls are named/numbered differently between s390 and s390x. */ #ifdef __s390x__ # ifndef __NR_getrlimit # define __NR_getrlimit 191 # endif # ifndef __NR_setresuid # define __NR_setresuid 208 # endif # ifndef __NR_getresuid # define __NR_getresuid 209 # endif # ifndef __NR_setresgid # define __NR_setresgid 210 # endif # ifndef __NR_getresgid # define __NR_getresgid 211 # endif # ifndef __NR_setfsuid # define __NR_setfsuid 215 # endif # ifndef __NR_setfsgid # define __NR_setfsgid 216 # endif # ifndef __NR_fadvise64 # define __NR_fadvise64 253 # endif # ifndef __NR_newfstatat # define __NR_newfstatat 293 # endif #else /* __s390x__ */ # ifndef __NR_getrlimit # define __NR_getrlimit 76 # endif # ifndef __NR_setfsuid # define __NR_setfsuid 138 # endif # ifndef __NR_setfsgid # define __NR_setfsgid 139 # endif # ifndef __NR_setresuid # define __NR_setresuid 164 # endif # ifndef __NR_getresuid # define __NR_getresuid 165 # endif # ifndef __NR_setresgid # define __NR_setresgid 170 # endif # ifndef __NR_getresgid # define __NR_getresgid 171 # endif # ifndef __NR_ugetrlimit # define __NR_ugetrlimit 191 # endif # ifndef __NR_mmap2 # define __NR_mmap2 192 # endif # ifndef __NR_setresuid32 # define __NR_setresuid32 208 # endif # ifndef __NR_getresuid32 # define __NR_getresuid32 209 # endif # ifndef __NR_setresgid32 # define __NR_setresgid32 210 # endif # ifndef __NR_getresgid32 # define __NR_getresgid32 211 # endif # ifndef __NR_setfsuid32 # define __NR_setfsuid32 215 # endif # ifndef __NR_setfsgid32 # define __NR_setfsgid32 216 # endif # ifndef __NR_fadvise64_64 # define __NR_fadvise64_64 264 # endif # ifndef __NR_fstatat64 # define __NR_fstatat64 293 # endif #endif /* __s390__ */ /* End of s390/s390x definitions */ #endif /* After forking, we must make sure to only call system calls. */ #if __BOUNDED_POINTERS__ #error "Need to port invocations of syscalls for bounded ptrs" #else /* The core dumper and the thread lister get executed after threads * have been suspended. As a consequence, we cannot call any functions * that acquire locks. Unfortunately, libc wraps most system calls * (e.g. in order to implement pthread_atfork, and to make calls * cancellable), which means we cannot call these functions. Instead, * we have to call syscall() directly. */ #undef LSS_ERRNO #ifdef SYS_ERRNO /* Allow the including file to override the location of errno. This can * be useful when using clone() with the CLONE_VM option. */ #define LSS_ERRNO SYS_ERRNO #else #define LSS_ERRNO errno #endif #undef LSS_INLINE #ifdef SYS_INLINE #define LSS_INLINE SYS_INLINE #else #define LSS_INLINE static inline #endif /* Allow the including file to override the prefix used for all new * system calls. By default, it will be set to "sys_". */ #undef LSS_NAME #ifndef SYS_PREFIX #define LSS_NAME(name) sys_##name #elif SYS_PREFIX < 0 #define LSS_NAME(name) name #elif SYS_PREFIX == 0 #define LSS_NAME(name) sys0_##name #elif SYS_PREFIX == 1 #define LSS_NAME(name) sys1_##name #elif SYS_PREFIX == 2 #define LSS_NAME(name) sys2_##name #elif SYS_PREFIX == 3 #define LSS_NAME(name) sys3_##name #elif SYS_PREFIX == 4 #define LSS_NAME(name) sys4_##name #elif SYS_PREFIX == 5 #define LSS_NAME(name) sys5_##name #elif SYS_PREFIX == 6 #define LSS_NAME(name) sys6_##name #elif SYS_PREFIX == 7 #define LSS_NAME(name) sys7_##name #elif SYS_PREFIX == 8 #define LSS_NAME(name) sys8_##name #elif SYS_PREFIX == 9 #define LSS_NAME(name) sys9_##name #endif #undef LSS_RETURN #if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \ defined(__aarch64__) || defined(__s390__)) /* Failing system calls return a negative result in the range of * -1..-4095. These are "errno" values with the sign inverted. */ #define LSS_RETURN(type, res) \ do { \ if ((unsigned long)(res) >= (unsigned long)(-4095)) { \ LSS_ERRNO = -(res); \ res = -1; \ } \ return (type) (res); \ } while (0) #elif defined(__mips__) /* On MIPS, failing system calls return -1, and set errno in a * separate CPU register. */ #define LSS_RETURN(type, res, err) \ do { \ if (err) { \ LSS_ERRNO = (res); \ res = -1; \ } \ return (type) (res); \ } while (0) #elif defined(__PPC__) /* On PPC, failing system calls return -1, and set errno in a * separate CPU register. See linux/unistd.h. */ #define LSS_RETURN(type, res, err) \ do { \ if (err & 0x10000000 ) { \ LSS_ERRNO = (res); \ res = -1; \ } \ return (type) (res); \ } while (0) #endif #if defined(__i386__) #if defined(NO_FRAME_POINTER) && (100 * __GNUC__ + __GNUC_MINOR__ >= 404) /* This only works for GCC-4.4 and above -- the first version to use .cfi directives for dwarf unwind info. */ #define CFI_ADJUST_CFA_OFFSET(adjust) \ ".cfi_adjust_cfa_offset " #adjust "\n" #else #define CFI_ADJUST_CFA_OFFSET(adjust) /**/ #endif /* In PIC mode (e.g. when building shared libraries), gcc for i386 * reserves ebx. Unfortunately, most distribution ship with implementations * of _syscallX() which clobber ebx. * Also, most definitions of _syscallX() neglect to mark "memory" as being * clobbered. This causes problems with compilers, that do a better job * at optimizing across __asm__ calls. * So, we just have to redefine all of the _syscallX() macros. */ #undef LSS_BODY #define LSS_BODY(type,args...) \ long __res; \ __asm__ __volatile__("push %%ebx\n" \ CFI_ADJUST_CFA_OFFSET(4) \ "movl %2,%%ebx\n" \ "int $0x80\n" \ "pop %%ebx\n" \ CFI_ADJUST_CFA_OFFSET(-4) \ args \ : "esp", "memory"); \ LSS_RETURN(type,__res) #undef _syscall0 #define _syscall0(type,name) \ type LSS_NAME(name)(void) { \ long __res; \ __asm__ volatile("int $0x80" \ : "=a" (__res) \ : "0" (__NR_##name) \ : "memory"); \ LSS_RETURN(type,__res); \ } #undef _syscall1 #define _syscall1(type,name,type1,arg1) \ type LSS_NAME(name)(type1 arg1) { \ LSS_BODY(type, \ : "=a" (__res) \ : "0" (__NR_##name), "ri" ((long)(arg1))); \ } #undef _syscall2 #define _syscall2(type,name,type1,arg1,type2,arg2) \ type LSS_NAME(name)(type1 arg1,type2 arg2) { \ LSS_BODY(type, \ : "=a" (__res) \ : "0" (__NR_##name),"ri" ((long)(arg1)), "c" ((long)(arg2))); \ } #undef _syscall3 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ type LSS_NAME(name)(type1 arg1,type2 arg2,type3 arg3) { \ LSS_BODY(type, \ : "=a" (__res) \ : "0" (__NR_##name), "ri" ((long)(arg1)), "c" ((long)(arg2)), \ "d" ((long)(arg3))); \ } #undef _syscall4 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ LSS_BODY(type, \ : "=a" (__res) \ : "0" (__NR_##name), "ri" ((long)(arg1)), "c" ((long)(arg2)), \ "d" ((long)(arg3)),"S" ((long)(arg4))); \ } #undef _syscall5 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type5 arg5) { \ long __res; \ __asm__ __volatile__("push %%ebx\n" \ "movl %2,%%ebx\n" \ "movl %1,%%eax\n" \ "int $0x80\n" \ "pop %%ebx" \ : "=a" (__res) \ : "i" (__NR_##name), "ri" ((long)(arg1)), \ "c" ((long)(arg2)), "d" ((long)(arg3)), \ "S" ((long)(arg4)), "D" ((long)(arg5)) \ : "esp", "memory"); \ LSS_RETURN(type,__res); \ } #undef _syscall6 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5,type6,arg6) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type5 arg5, type6 arg6) { \ long __res; \ struct { long __a1; long __a6; } __s = { (long)arg1, (long) arg6 }; \ __asm__ __volatile__("push %%ebp\n" \ "push %%ebx\n" \ "movl 4(%2),%%ebp\n" \ "movl 0(%2), %%ebx\n" \ "movl %1,%%eax\n" \ "int $0x80\n" \ "pop %%ebx\n" \ "pop %%ebp" \ : "=a" (__res) \ : "i" (__NR_##name), "0" ((long)(&__s)), \ "c" ((long)(arg2)), "d" ((long)(arg3)), \ "S" ((long)(arg4)), "D" ((long)(arg5)) \ : "esp", "memory"); \ LSS_RETURN(type,__res); \ } LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { long __res; __asm__ __volatile__(/* if (fn == NULL) * return -EINVAL; */ "movl %3,%%ecx\n" "jecxz 1f\n" /* if (child_stack == NULL) * return -EINVAL; */ "movl %4,%%ecx\n" "jecxz 1f\n" /* Set up alignment of the child stack: * child_stack = (child_stack & ~0xF) - 20; */ "andl $-16,%%ecx\n" "subl $20,%%ecx\n" /* Push "arg" and "fn" onto the stack that will be * used by the child. */ "movl %6,%%eax\n" "movl %%eax,4(%%ecx)\n" "movl %3,%%eax\n" "movl %%eax,(%%ecx)\n" /* %eax = syscall(%eax = __NR_clone, * %ebx = flags, * %ecx = child_stack, * %edx = parent_tidptr, * %esi = newtls, * %edi = child_tidptr) * Also, make sure that %ebx gets preserved as it is * used in PIC mode. */ "movl %8,%%esi\n" "movl %7,%%edx\n" "movl %5,%%eax\n" "movl %9,%%edi\n" "pushl %%ebx\n" "movl %%eax,%%ebx\n" "movl %2,%%eax\n" "int $0x80\n" /* In the parent: restore %ebx * In the child: move "fn" into %ebx */ "popl %%ebx\n" /* if (%eax != 0) * return %eax; */ "test %%eax,%%eax\n" "jnz 1f\n" /* In the child, now. Terminate frame pointer chain. */ "movl $0,%%ebp\n" /* Call "fn". "arg" is already on the stack. */ "call *%%ebx\n" /* Call _exit(%ebx). Unfortunately older versions * of gcc restrict the number of arguments that can * be passed to asm(). So, we need to hard-code the * system call number. */ "movl %%eax,%%ebx\n" "movl $1,%%eax\n" "int $0x80\n" /* Return to parent. */ "1:\n" : "=a" (__res) : "0"(-EINVAL), "i"(__NR_clone), "m"(fn), "m"(child_stack), "m"(flags), "m"(arg), "m"(parent_tidptr), "m"(newtls), "m"(child_tidptr) : "esp", "memory", "ecx", "edx", "esi", "edi"); LSS_RETURN(int, __res); } LSS_INLINE void (*LSS_NAME(restore_rt)(void))(void) { /* On i386, the kernel does not know how to return from a signal * handler. Instead, it relies on user space to provide a * restorer function that calls the {rt_,}sigreturn() system call. * Unfortunately, we cannot just reference the glibc version of this * function, as glibc goes out of its way to make it inaccessible. */ void (*res)(void); __asm__ __volatile__("call 2f\n" "0:.align 16\n" "1:movl %1,%%eax\n" "int $0x80\n" "2:popl %0\n" "addl $(1b-0b),%0\n" : "=a" (res) : "i" (__NR_rt_sigreturn)); return res; } LSS_INLINE void (*LSS_NAME(restore)(void))(void) { /* On i386, the kernel does not know how to return from a signal * handler. Instead, it relies on user space to provide a * restorer function that calls the {rt_,}sigreturn() system call. * Unfortunately, we cannot just reference the glibc version of this * function, as glibc goes out of its way to make it inaccessible. */ void (*res)(void); __asm__ __volatile__("call 2f\n" "0:.align 16\n" "1:pop %%eax\n" "movl %1,%%eax\n" "int $0x80\n" "2:popl %0\n" "addl $(1b-0b),%0\n" : "=a" (res) : "i" (__NR_sigreturn)); return res; } #elif defined(__x86_64__) /* There are no known problems with any of the _syscallX() macros * currently shipping for x86_64, but we still need to be able to define * our own version so that we can override the location of the errno * location (e.g. when using the clone() system call with the CLONE_VM * option). */ #undef LSS_ENTRYPOINT #define LSS_ENTRYPOINT "syscall\n" /* The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. * We need to explicitly cast to an unsigned 64 bit type to avoid implicit * sign extension. We can't cast pointers directly because those are * 32 bits, and gcc will dump ugly warnings about casting from a pointer * to an integer of a different size. */ #undef LSS_SYSCALL_ARG #define LSS_SYSCALL_ARG(a) ((uint64_t)(uintptr_t)(a)) #undef _LSS_RETURN #define _LSS_RETURN(type, res, cast) \ do { \ if ((uint64_t)(res) >= (uint64_t)(-4095)) { \ LSS_ERRNO = -(res); \ res = -1; \ } \ return (type)(cast)(res); \ } while (0) #undef LSS_RETURN #define LSS_RETURN(type, res) _LSS_RETURN(type, res, uintptr_t) #undef _LSS_BODY #define _LSS_BODY(nr, type, name, cast, ...) \ long long __res; \ __asm__ __volatile__(LSS_BODY_ASM##nr LSS_ENTRYPOINT \ : "=a" (__res) \ : "0" (__NR_##name) LSS_BODY_ARG##nr(__VA_ARGS__) \ : LSS_BODY_CLOBBER##nr "r11", "rcx", "memory"); \ _LSS_RETURN(type, __res, cast) #undef LSS_BODY #define LSS_BODY(nr, type, name, args...) \ _LSS_BODY(nr, type, name, uintptr_t, ## args) #undef LSS_BODY_ASM0 #undef LSS_BODY_ASM1 #undef LSS_BODY_ASM2 #undef LSS_BODY_ASM3 #undef LSS_BODY_ASM4 #undef LSS_BODY_ASM5 #undef LSS_BODY_ASM6 #define LSS_BODY_ASM0 #define LSS_BODY_ASM1 LSS_BODY_ASM0 #define LSS_BODY_ASM2 LSS_BODY_ASM1 #define LSS_BODY_ASM3 LSS_BODY_ASM2 #define LSS_BODY_ASM4 LSS_BODY_ASM3 "movq %5,%%r10;" #define LSS_BODY_ASM5 LSS_BODY_ASM4 "movq %6,%%r8;" #define LSS_BODY_ASM6 LSS_BODY_ASM5 "movq %7,%%r9;" #undef LSS_BODY_CLOBBER0 #undef LSS_BODY_CLOBBER1 #undef LSS_BODY_CLOBBER2 #undef LSS_BODY_CLOBBER3 #undef LSS_BODY_CLOBBER4 #undef LSS_BODY_CLOBBER5 #undef LSS_BODY_CLOBBER6 #define LSS_BODY_CLOBBER0 #define LSS_BODY_CLOBBER1 LSS_BODY_CLOBBER0 #define LSS_BODY_CLOBBER2 LSS_BODY_CLOBBER1 #define LSS_BODY_CLOBBER3 LSS_BODY_CLOBBER2 #define LSS_BODY_CLOBBER4 LSS_BODY_CLOBBER3 "r10", #define LSS_BODY_CLOBBER5 LSS_BODY_CLOBBER4 "r8", #define LSS_BODY_CLOBBER6 LSS_BODY_CLOBBER5 "r9", #undef LSS_BODY_ARG0 #undef LSS_BODY_ARG1 #undef LSS_BODY_ARG2 #undef LSS_BODY_ARG3 #undef LSS_BODY_ARG4 #undef LSS_BODY_ARG5 #undef LSS_BODY_ARG6 #define LSS_BODY_ARG0() #define LSS_BODY_ARG1(arg1) \ LSS_BODY_ARG0(), "D" (arg1) #define LSS_BODY_ARG2(arg1, arg2) \ LSS_BODY_ARG1(arg1), "S" (arg2) #define LSS_BODY_ARG3(arg1, arg2, arg3) \ LSS_BODY_ARG2(arg1, arg2), "d" (arg3) #define LSS_BODY_ARG4(arg1, arg2, arg3, arg4) \ LSS_BODY_ARG3(arg1, arg2, arg3), "r" (arg4) #define LSS_BODY_ARG5(arg1, arg2, arg3, arg4, arg5) \ LSS_BODY_ARG4(arg1, arg2, arg3, arg4), "r" (arg5) #define LSS_BODY_ARG6(arg1, arg2, arg3, arg4, arg5, arg6) \ LSS_BODY_ARG5(arg1, arg2, arg3, arg4, arg5), "r" (arg6) #undef _syscall0 #define _syscall0(type,name) \ type LSS_NAME(name)() { \ LSS_BODY(0, type, name); \ } #undef _syscall1 #define _syscall1(type,name,type1,arg1) \ type LSS_NAME(name)(type1 arg1) { \ LSS_BODY(1, type, name, LSS_SYSCALL_ARG(arg1)); \ } #undef _syscall2 #define _syscall2(type,name,type1,arg1,type2,arg2) \ type LSS_NAME(name)(type1 arg1, type2 arg2) { \ LSS_BODY(2, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2));\ } #undef _syscall3 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ LSS_BODY(3, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2), \ LSS_SYSCALL_ARG(arg3)); \ } #undef _syscall4 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ LSS_BODY(4, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2), \ LSS_SYSCALL_ARG(arg3), LSS_SYSCALL_ARG(arg4));\ } #undef _syscall5 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type5 arg5) { \ LSS_BODY(5, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2), \ LSS_SYSCALL_ARG(arg3), LSS_SYSCALL_ARG(arg4), \ LSS_SYSCALL_ARG(arg5)); \ } #undef _syscall6 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5,type6,arg6) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type5 arg5, type6 arg6) { \ LSS_BODY(6, type, name, LSS_SYSCALL_ARG(arg1), LSS_SYSCALL_ARG(arg2), \ LSS_SYSCALL_ARG(arg3), LSS_SYSCALL_ARG(arg4), \ LSS_SYSCALL_ARG(arg5), LSS_SYSCALL_ARG(arg6));\ } LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { long long __res; { __asm__ __volatile__(/* if (fn == NULL) * return -EINVAL; */ "testq %4,%4\n" "jz 1f\n" /* if (child_stack == NULL) * return -EINVAL; */ "testq %5,%5\n" "jz 1f\n" /* Set up alignment of the child stack: * child_stack = (child_stack & ~0xF) - 16; */ "andq $-16,%5\n" "subq $16,%5\n" /* Push "arg" and "fn" onto the stack that will be * used by the child. */ "movq %7,8(%5)\n" "movq %4,0(%5)\n" /* %rax = syscall(%rax = __NR_clone, * %rdi = flags, * %rsi = child_stack, * %rdx = parent_tidptr, * %r8 = new_tls, * %r10 = child_tidptr) */ "movq %2,%%rax\n" "movq %9,%%r8\n" "movq %10,%%r10\n" "syscall\n" /* if (%rax != 0) * return; */ "testq %%rax,%%rax\n" "jnz 1f\n" /* In the child. Terminate frame pointer chain. */ "xorq %%rbp,%%rbp\n" /* Call "fn(arg)". */ "popq %%rax\n" "popq %%rdi\n" "call *%%rax\n" /* Call _exit(%ebx). */ "movq %%rax,%%rdi\n" "movq %3,%%rax\n" "syscall\n" /* Return to parent. */ "1:\n" : "=a" (__res) : "0"(-EINVAL), "i"(__NR_clone), "i"(__NR_exit), "r"(LSS_SYSCALL_ARG(fn)), "S"(LSS_SYSCALL_ARG(child_stack)), "D"(LSS_SYSCALL_ARG(flags)), "r"(LSS_SYSCALL_ARG(arg)), "d"(LSS_SYSCALL_ARG(parent_tidptr)), "r"(LSS_SYSCALL_ARG(newtls)), "r"(LSS_SYSCALL_ARG(child_tidptr)) : "memory", "r8", "r10", "r11", "rcx"); } LSS_RETURN(int, __res); } LSS_INLINE void (*LSS_NAME(restore_rt)(void))(void) { /* On x86-64, the kernel does not know how to return from * a signal handler. Instead, it relies on user space to provide a * restorer function that calls the rt_sigreturn() system call. * Unfortunately, we cannot just reference the glibc version of this * function, as glibc goes out of its way to make it inaccessible. */ long long res; __asm__ __volatile__("call 2f\n" "0:.align 16\n" "1:movq %1,%%rax\n" "syscall\n" "2:popq %0\n" "addq $(1b-0b),%0\n" : "=a" (res) : "i" (__NR_rt_sigreturn)); return (void (*)(void))(uintptr_t)res; } #elif defined(__arm__) /* Most definitions of _syscallX() neglect to mark "memory" as being * clobbered. This causes problems with compilers, that do a better job * at optimizing across __asm__ calls. * So, we just have to redefine all fo the _syscallX() macros. */ #undef LSS_REG #define LSS_REG(r,a) register long __r##r __asm__("r"#r) = (long)a /* r0..r3 are scratch registers and not preserved across function * calls. We need to first evaluate the first 4 syscall arguments * and store them on stack. They must be loaded into r0..r3 after * all function calls to avoid r0..r3 being clobbered. */ #undef LSS_SAVE_ARG #define LSS_SAVE_ARG(r,a) long __tmp##r = (long)a #undef LSS_LOAD_ARG #define LSS_LOAD_ARG(r) register long __r##r __asm__("r"#r) = __tmp##r #undef LSS_BODY #define LSS_BODY(type, name, args...) \ register long __res_r0 __asm__("r0"); \ long __res; \ __SYS_REG(name) \ __asm__ __volatile__ (__syscall_safe(name) \ : "=r"(__res_r0) \ : __SYS_REG_LIST(args) \ : "lr", "memory"); \ __res = __res_r0; \ LSS_RETURN(type, __res) #undef _syscall0 #define _syscall0(type, name) \ type LSS_NAME(name)() { \ LSS_BODY(type, name); \ } #undef _syscall1 #define _syscall1(type, name, type1, arg1) \ type LSS_NAME(name)(type1 arg1) { \ /* There is no need for using a volatile temp. */ \ LSS_REG(0, arg1); \ LSS_BODY(type, name, "r"(__r0)); \ } #undef _syscall2 #define _syscall2(type, name, type1, arg1, type2, arg2) \ type LSS_NAME(name)(type1 arg1, type2 arg2) { \ LSS_SAVE_ARG(0, arg1); \ LSS_SAVE_ARG(1, arg2); \ LSS_LOAD_ARG(0); \ LSS_LOAD_ARG(1); \ LSS_BODY(type, name, "r"(__r0), "r"(__r1)); \ } #undef _syscall3 #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ LSS_SAVE_ARG(0, arg1); \ LSS_SAVE_ARG(1, arg2); \ LSS_SAVE_ARG(2, arg3); \ LSS_LOAD_ARG(0); \ LSS_LOAD_ARG(1); \ LSS_LOAD_ARG(2); \ LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2)); \ } #undef _syscall4 #define _syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ type4, arg4) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ LSS_SAVE_ARG(0, arg1); \ LSS_SAVE_ARG(1, arg2); \ LSS_SAVE_ARG(2, arg3); \ LSS_SAVE_ARG(3, arg4); \ LSS_LOAD_ARG(0); \ LSS_LOAD_ARG(1); \ LSS_LOAD_ARG(2); \ LSS_LOAD_ARG(3); \ LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3)); \ } #undef _syscall5 #define _syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ type4, arg4, type5, arg5) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type5 arg5) { \ LSS_SAVE_ARG(0, arg1); \ LSS_SAVE_ARG(1, arg2); \ LSS_SAVE_ARG(2, arg3); \ LSS_SAVE_ARG(3, arg4); \ LSS_REG(4, arg5); \ LSS_LOAD_ARG(0); \ LSS_LOAD_ARG(1); \ LSS_LOAD_ARG(2); \ LSS_LOAD_ARG(3); \ LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \ "r"(__r4)); \ } #undef _syscall6 #define _syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ type4, arg4, type5, arg5, type6, arg6) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type5 arg5, type6 arg6) { \ LSS_SAVE_ARG(0, arg1); \ LSS_SAVE_ARG(1, arg2); \ LSS_SAVE_ARG(2, arg3); \ LSS_SAVE_ARG(3, arg4); \ LSS_REG(4, arg5); \ LSS_REG(5, arg6); \ LSS_LOAD_ARG(0); \ LSS_LOAD_ARG(1); \ LSS_LOAD_ARG(2); \ LSS_LOAD_ARG(3); \ LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \ "r"(__r4), "r"(__r5)); \ } LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { register long __res __asm__("r5"); { if (fn == NULL || child_stack == NULL) { __res = -EINVAL; goto clone_exit; } /* stash first 4 arguments on stack first because we can only load * them after all function calls. */ int tmp_flags = flags; int * tmp_stack = (int*) child_stack; void * tmp_ptid = parent_tidptr; void * tmp_tls = newtls; register int *__ctid __asm__("r4") = child_tidptr; /* Push "arg" and "fn" onto the stack that will be * used by the child. */ *(--tmp_stack) = (int) arg; *(--tmp_stack) = (int) fn; /* We must load r0..r3 last after all possible function calls. */ register int __flags __asm__("r0") = tmp_flags; register void *__stack __asm__("r1") = tmp_stack; register void *__ptid __asm__("r2") = tmp_ptid; register void *__tls __asm__("r3") = tmp_tls; /* %r0 = syscall(%r0 = flags, * %r1 = child_stack, * %r2 = parent_tidptr, * %r3 = newtls, * %r4 = child_tidptr) */ __SYS_REG(clone) __asm__ __volatile__(/* %r0 = syscall(%r0 = flags, * %r1 = child_stack, * %r2 = parent_tidptr, * %r3 = newtls, * %r4 = child_tidptr) */ "push {r7}\n" "mov r7,%1\n" __syscall(clone)"\n" /* if (%r0 != 0) * return %r0; */ "movs %0,r0\n" "bne 1f\n" /* In the child, now. Call "fn(arg)". */ "ldr r0,[sp, #4]\n" "mov lr,pc\n" "ldr pc,[sp]\n" /* Call _exit(%r0), which never returns. We only * need to set r7 for EABI syscall ABI but we do * this always to simplify code sharing between * old and new syscall ABIs. */ "mov r7,%2\n" __syscall(exit)"\n" /* Pop r7 from the stack only in the parent. */ "1: pop {r7}\n" : "=r" (__res) : "r"(__sysreg), "i"(__NR_exit), "r"(__stack), "r"(__flags), "r"(__ptid), "r"(__tls), "r"(__ctid) : "cc", "lr", "memory"); } clone_exit: LSS_RETURN(int, __res); } #elif defined(__mips__) #undef LSS_REG #define LSS_REG(r,a) register unsigned long __r##r __asm__("$"#r) = \ (unsigned long)(a) #if _MIPS_SIM == _MIPS_SIM_ABI32 // See http://sources.redhat.com/ml/libc-alpha/2004-10/msg00050.html // or http://www.linux-mips.org/archives/linux-mips/2004-10/msg00142.html #define MIPS_SYSCALL_CLOBBERS "$1", "$3", "$8", "$9", "$10", "$11", "$12",\ "$13", "$14", "$15", "$24", "$25", "memory" #else #define MIPS_SYSCALL_CLOBBERS "$1", "$3", "$10", "$11", "$12", "$13", \ "$14", "$15", "$24", "$25", "memory" #endif #undef LSS_BODY #define LSS_BODY(type,name,r7,...) \ register unsigned long __v0 __asm__("$2") = __NR_##name; \ __asm__ __volatile__ ("syscall\n" \ : "=&r"(__v0), r7 (__r7) \ : "0"(__v0), ##__VA_ARGS__ \ : MIPS_SYSCALL_CLOBBERS); \ LSS_RETURN(type, __v0, __r7) #undef _syscall0 #define _syscall0(type, name) \ type LSS_NAME(name)() { \ register unsigned long __r7 __asm__("$7"); \ LSS_BODY(type, name, "=r"); \ } #undef _syscall1 #define _syscall1(type, name, type1, arg1) \ type LSS_NAME(name)(type1 arg1) { \ register unsigned long __r7 __asm__("$7"); \ LSS_REG(4, arg1); LSS_BODY(type, name, "=r", "r"(__r4)); \ } #undef _syscall2 #define _syscall2(type, name, type1, arg1, type2, arg2) \ type LSS_NAME(name)(type1 arg1, type2 arg2) { \ register unsigned long __r7 __asm__("$7"); \ LSS_REG(4, arg1); LSS_REG(5, arg2); \ LSS_BODY(type, name, "=r", "r"(__r4), "r"(__r5)); \ } #undef _syscall3 #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ register unsigned long __r7 __asm__("$7"); \ LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \ LSS_BODY(type, name, "=r", "r"(__r4), "r"(__r5), "r"(__r6)); \ } #undef _syscall4 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \ LSS_REG(7, arg4); \ LSS_BODY(type, name, "+r", "r"(__r4), "r"(__r5), "r"(__r6)); \ } #undef _syscall5 #if _MIPS_SIM == _MIPS_SIM_ABI32 /* The old 32bit MIPS system call API passes the fifth and sixth argument * on the stack, whereas the new APIs use registers "r8" and "r9". */ #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type5 arg5) { \ LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \ LSS_REG(7, arg4); \ register unsigned long __v0 __asm__("$2"); \ __asm__ __volatile__ (".set noreorder\n" \ "lw $2, %6\n" \ "subu $29, 32\n" \ "sw $2, 16($29)\n" \ "li $2, %2\n" \ "syscall\n" \ "addiu $29, 32\n" \ ".set reorder\n" \ : "=&r"(__v0), "+r" (__r7) \ : "i" (__NR_##name), "r"(__r4), "r"(__r5), \ "r"(__r6), "m" ((unsigned long)arg5) \ : MIPS_SYSCALL_CLOBBERS); \ LSS_RETURN(type, __v0, __r7); \ } #else #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type5 arg5) { \ LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \ LSS_REG(7, arg4); LSS_REG(8, arg5); \ LSS_BODY(type, name, "+r", "r"(__r4), "r"(__r5), "r"(__r6), \ "r"(__r8)); \ } #endif #undef _syscall6 #if _MIPS_SIM == _MIPS_SIM_ABI32 /* The old 32bit MIPS system call API passes the fifth and sixth argument * on the stack, whereas the new APIs use registers "r8" and "r9". */ #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5,type6,arg6) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type5 arg5, type6 arg6) { \ LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \ LSS_REG(7, arg4); \ register unsigned long __v0 __asm__("$2"); \ __asm__ __volatile__ (".set noreorder\n" \ "lw $2, %6\n" \ "lw $8, %7\n" \ "subu $29, 32\n" \ "sw $2, 16($29)\n" \ "sw $8, 20($29)\n" \ "li $2, %2\n" \ "syscall\n" \ "addiu $29, 32\n" \ ".set reorder\n" \ : "=&r"(__v0), "+r" (__r7) \ : "i" (__NR_##name), "r"(__r4), "r"(__r5), \ "r"(__r6), "m" ((unsigned long)arg5), \ "m" ((unsigned long)arg6) \ : MIPS_SYSCALL_CLOBBERS); \ LSS_RETURN(type, __v0, __r7); \ } #else #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5,type6,arg6) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type5 arg5,type6 arg6) { \ LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \ LSS_REG(7, arg4); LSS_REG(8, arg5); LSS_REG(9, arg6); \ LSS_BODY(type, name, "+r", "r"(__r4), "r"(__r5), "r"(__r6), \ "r"(__r8), "r"(__r9)); \ } #endif LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { register unsigned long __v0 __asm__("$2"); register unsigned long __r7 __asm__("$7") = (unsigned long)newtls; { register int __flags __asm__("$4") = flags; register void *__stack __asm__("$5") = child_stack; register void *__ptid __asm__("$6") = parent_tidptr; register int *__ctid __asm__("$8") = child_tidptr; __asm__ __volatile__( #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32 "subu $29,24\n" #elif _MIPS_SIM == _MIPS_SIM_NABI32 "sub $29,16\n" #else "dsubu $29,16\n" #endif /* if (fn == NULL || child_stack == NULL) * return -EINVAL; */ "li %0,%2\n" "beqz %5,1f\n" "beqz %6,1f\n" /* Push "arg" and "fn" onto the stack that will be * used by the child. */ #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32 "subu %6,32\n" "sw %5,0(%6)\n" "sw %8,4(%6)\n" #elif _MIPS_SIM == _MIPS_SIM_NABI32 "sub %6,32\n" "sw %5,0(%6)\n" "sw %8,8(%6)\n" #else "dsubu %6,32\n" "sd %5,0(%6)\n" "sd %8,8(%6)\n" #endif /* $7 = syscall($4 = flags, * $5 = child_stack, * $6 = parent_tidptr, * $7 = newtls, * $8 = child_tidptr) */ "li $2,%3\n" "syscall\n" /* if ($7 != 0) * return $2; */ "bnez $7,1f\n" "bnez $2,1f\n" /* In the child, now. Call "fn(arg)". */ #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32 "lw $25,0($29)\n" "lw $4,4($29)\n" #elif _MIPS_SIM == _MIPS_SIM_NABI32 "lw $25,0($29)\n" "lw $4,8($29)\n" #else "ld $25,0($29)\n" "ld $4,8($29)\n" #endif "jalr $25\n" /* Call _exit($2) */ "move $4,$2\n" "li $2,%4\n" "syscall\n" "1:\n" #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32 "addu $29, 24\n" #elif _MIPS_SIM == _MIPS_SIM_NABI32 "add $29, 16\n" #else "daddu $29,16\n" #endif : "=&r" (__v0), "=r" (__r7) : "i"(-EINVAL), "i"(__NR_clone), "i"(__NR_exit), "r"(fn), "r"(__stack), "r"(__flags), "r"(arg), "r"(__ptid), "r"(__r7), "r"(__ctid) : "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "memory"); } LSS_RETURN(int, __v0, __r7); } #elif defined (__PPC__) #undef LSS_LOADARGS_0 #define LSS_LOADARGS_0(name, dummy...) \ __sc_0 = __NR_##name #undef LSS_LOADARGS_1 #define LSS_LOADARGS_1(name, arg1) \ LSS_LOADARGS_0(name); \ __sc_3 = (unsigned long) (arg1) #undef LSS_LOADARGS_2 #define LSS_LOADARGS_2(name, arg1, arg2) \ LSS_LOADARGS_1(name, arg1); \ __sc_4 = (unsigned long) (arg2) #undef LSS_LOADARGS_3 #define LSS_LOADARGS_3(name, arg1, arg2, arg3) \ LSS_LOADARGS_2(name, arg1, arg2); \ __sc_5 = (unsigned long) (arg3) #undef LSS_LOADARGS_4 #define LSS_LOADARGS_4(name, arg1, arg2, arg3, arg4) \ LSS_LOADARGS_3(name, arg1, arg2, arg3); \ __sc_6 = (unsigned long) (arg4) #undef LSS_LOADARGS_5 #define LSS_LOADARGS_5(name, arg1, arg2, arg3, arg4, arg5) \ LSS_LOADARGS_4(name, arg1, arg2, arg3, arg4); \ __sc_7 = (unsigned long) (arg5) #undef LSS_LOADARGS_6 #define LSS_LOADARGS_6(name, arg1, arg2, arg3, arg4, arg5, arg6) \ LSS_LOADARGS_5(name, arg1, arg2, arg3, arg4, arg5); \ __sc_8 = (unsigned long) (arg6) #undef LSS_ASMINPUT_0 #define LSS_ASMINPUT_0 "0" (__sc_0) #undef LSS_ASMINPUT_1 #define LSS_ASMINPUT_1 LSS_ASMINPUT_0, "1" (__sc_3) #undef LSS_ASMINPUT_2 #define LSS_ASMINPUT_2 LSS_ASMINPUT_1, "2" (__sc_4) #undef LSS_ASMINPUT_3 #define LSS_ASMINPUT_3 LSS_ASMINPUT_2, "3" (__sc_5) #undef LSS_ASMINPUT_4 #define LSS_ASMINPUT_4 LSS_ASMINPUT_3, "4" (__sc_6) #undef LSS_ASMINPUT_5 #define LSS_ASMINPUT_5 LSS_ASMINPUT_4, "5" (__sc_7) #undef LSS_ASMINPUT_6 #define LSS_ASMINPUT_6 LSS_ASMINPUT_5, "6" (__sc_8) #undef LSS_BODY #define LSS_BODY(nr, type, name, args...) \ long __sc_ret, __sc_err; \ { \ register unsigned long __sc_0 __asm__ ("r0"); \ register unsigned long __sc_3 __asm__ ("r3"); \ register unsigned long __sc_4 __asm__ ("r4"); \ register unsigned long __sc_5 __asm__ ("r5"); \ register unsigned long __sc_6 __asm__ ("r6"); \ register unsigned long __sc_7 __asm__ ("r7"); \ register unsigned long __sc_8 __asm__ ("r8"); \ \ LSS_LOADARGS_##nr(name, args); \ __asm__ __volatile__ \ ("sc\n\t" \ "mfcr %0" \ : "=&r" (__sc_0), \ "=&r" (__sc_3), "=&r" (__sc_4), \ "=&r" (__sc_5), "=&r" (__sc_6), \ "=&r" (__sc_7), "=&r" (__sc_8) \ : LSS_ASMINPUT_##nr \ : "cr0", "ctr", "memory", \ "r9", "r10", "r11", "r12"); \ __sc_ret = __sc_3; \ __sc_err = __sc_0; \ } \ LSS_RETURN(type, __sc_ret, __sc_err) #undef _syscall0 #define _syscall0(type, name) \ type LSS_NAME(name)(void) { \ LSS_BODY(0, type, name); \ } #undef _syscall1 #define _syscall1(type, name, type1, arg1) \ type LSS_NAME(name)(type1 arg1) { \ LSS_BODY(1, type, name, arg1); \ } #undef _syscall2 #define _syscall2(type, name, type1, arg1, type2, arg2) \ type LSS_NAME(name)(type1 arg1, type2 arg2) { \ LSS_BODY(2, type, name, arg1, arg2); \ } #undef _syscall3 #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ LSS_BODY(3, type, name, arg1, arg2, arg3); \ } #undef _syscall4 #define _syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ type4, arg4) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ LSS_BODY(4, type, name, arg1, arg2, arg3, arg4); \ } #undef _syscall5 #define _syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ type4, arg4, type5, arg5) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type5 arg5) { \ LSS_BODY(5, type, name, arg1, arg2, arg3, arg4, arg5); \ } #undef _syscall6 #define _syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ type4, arg4, type5, arg5, type6, arg6) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type5 arg5, type6 arg6) { \ LSS_BODY(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6); \ } /* clone function adapted from glibc 2.18 clone.S */ LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { long __ret, __err; { #if defined(__PPC64__) /* Stack frame offsets. */ #if _CALL_ELF != 2 #define FRAME_MIN_SIZE 112 #define FRAME_TOC_SAVE 40 #else #define FRAME_MIN_SIZE 32 #define FRAME_TOC_SAVE 24 #endif register int (*__fn)(void *) __asm__ ("r3") = fn; register void *__cstack __asm__ ("r4") = child_stack; register int __flags __asm__ ("r5") = flags; register void * __arg __asm__ ("r6") = arg; register int * __ptidptr __asm__ ("r7") = parent_tidptr; register void * __newtls __asm__ ("r8") = newtls; register int * __ctidptr __asm__ ("r9") = child_tidptr; __asm__ __volatile__( /* check for fn == NULL * and child_stack == NULL */ "cmpdi cr0, %6, 0\n\t" "cmpdi cr1, %7, 0\n\t" "cror cr0*4+eq, cr1*4+eq, cr0*4+eq\n\t" "beq- cr0, 1f\n\t" /* set up stack frame for child */ "clrrdi %7, %7, 4\n\t" "li 0, 0\n\t" "stdu 0, -%13(%7)\n\t" /* fn, arg, child_stack are saved acrVoss the syscall */ "mr 28, %6\n\t" "mr 29, %7\n\t" "mr 27, %9\n\t" /* syscall r3 == flags r4 == child_stack r5 == parent_tidptr r6 == newtls r7 == child_tidptr */ "mr 3, %8\n\t" "mr 5, %10\n\t" "mr 6, %11\n\t" "mr 7, %12\n\t" "li 0, %4\n\t" "sc\n\t" /* Test if syscall was successful */ "cmpdi cr1, 3, 0\n\t" "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t" "bne- cr1, 1f\n\t" /* Do the function call */ "std 2, %14(1)\n\t" #if _CALL_ELF != 2 "ld 0, 0(28)\n\t" "ld 2, 8(28)\n\t" "mtctr 0\n\t" #else "mr 12, 28\n\t" "mtctr 12\n\t" #endif "mr 3, 27\n\t" "bctrl\n\t" "ld 2, %14(1)\n\t" /* Call _exit(r3) */ "li 0, %5\n\t" "sc\n\t" /* Return to parent */ "1:\n\t" "mr %0, 3\n\t" : "=r" (__ret), "=r" (__err) : "0" (-1), "i" (EINVAL), "i" (__NR_clone), "i" (__NR_exit), "r" (__fn), "r" (__cstack), "r" (__flags), "r" (__arg), "r" (__ptidptr), "r" (__newtls), "r" (__ctidptr), "i" (FRAME_MIN_SIZE), "i" (FRAME_TOC_SAVE) : "cr0", "cr1", "memory", "ctr", "r0", "r29", "r27", "r28"); #else register int (*__fn)(void *) __asm__ ("r8") = fn; register void *__cstack __asm__ ("r4") = child_stack; register int __flags __asm__ ("r3") = flags; register void * __arg __asm__ ("r9") = arg; register int * __ptidptr __asm__ ("r5") = parent_tidptr; register void * __newtls __asm__ ("r6") = newtls; register int * __ctidptr __asm__ ("r7") = child_tidptr; __asm__ __volatile__( /* check for fn == NULL * and child_stack == NULL */ "cmpwi cr0, %6, 0\n\t" "cmpwi cr1, %7, 0\n\t" "cror cr0*4+eq, cr1*4+eq, cr0*4+eq\n\t" "beq- cr0, 1f\n\t" /* set up stack frame for child */ "clrrwi %7, %7, 4\n\t" "li 0, 0\n\t" "stwu 0, -16(%7)\n\t" /* fn, arg, child_stack are saved across the syscall: r28-30 */ "mr 28, %6\n\t" "mr 29, %7\n\t" "mr 27, %9\n\t" /* syscall */ "li 0, %4\n\t" /* flags already in r3 * child_stack already in r4 * ptidptr already in r5 * newtls already in r6 * ctidptr already in r7 */ "sc\n\t" /* Test if syscall was successful */ "cmpwi cr1, 3, 0\n\t" "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t" "bne- cr1, 1f\n\t" /* Do the function call */ "mtctr 28\n\t" "mr 3, 27\n\t" "bctrl\n\t" /* Call _exit(r3) */ "li 0, %5\n\t" "sc\n\t" /* Return to parent */ "1:\n" "mfcr %1\n\t" "mr %0, 3\n\t" : "=r" (__ret), "=r" (__err) : "0" (-1), "1" (EINVAL), "i" (__NR_clone), "i" (__NR_exit), "r" (__fn), "r" (__cstack), "r" (__flags), "r" (__arg), "r" (__ptidptr), "r" (__newtls), "r" (__ctidptr) : "cr0", "cr1", "memory", "ctr", "r0", "r29", "r27", "r28"); #endif } LSS_RETURN(int, __ret, __err); } #elif defined(__aarch64__) #undef LSS_REG #define LSS_REG(r,a) register long __x##r __asm__("x"#r) = (long)a #undef LSS_BODY #define LSS_BODY(type,name,args...) \ register long __res_x0 __asm__("x0"); \ long __res; \ __asm__ __volatile__ ("mov x8, %1\n" \ "svc 0x0\n" \ : "=r"(__res_x0) \ : "i"(__NR_##name) , ## args \ : "memory"); \ __res = __res_x0; \ LSS_RETURN(type, __res) #undef _syscall0 #define _syscall0(type, name) \ type LSS_NAME(name)(void) { \ LSS_BODY(type, name); \ } #undef _syscall1 #define _syscall1(type, name, type1, arg1) \ type LSS_NAME(name)(type1 arg1) { \ LSS_REG(0, arg1); LSS_BODY(type, name, "r"(__x0)); \ } #undef _syscall2 #define _syscall2_long(type, name, svc, type1, arg1, type2, arg2) \ type LSS_NAME(name)(type1 arg1, type2 arg2) { \ LSS_REG(0, arg1); LSS_REG(1, arg2); \ LSS_BODY(type, svc, "r"(__x0), "r"(__x1)); \ } #define _syscall2(type, name, type1, arg1, type2, arg2) \ _syscall2_long(type, name, name, type1, arg1, type2, arg2) #undef _syscall3 #define _syscall3_long(type, name, svc, type1, arg1, type2, arg2, \ type3, arg3) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ LSS_BODY(type, svc, "r"(__x0), "r"(__x1), "r"(__x2)); \ } #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ _syscall3_long(type, name, name, type1, arg1, type2, arg2, \ type3, arg3) #undef _syscall4 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \ LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ LSS_REG(3, arg4); \ LSS_BODY(type, name, "r"(__x0), "r"(__x1), "r"(__x2), "r"(__x3)); \ } #undef _syscall5 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type5 arg5) { \ LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ LSS_REG(3, arg4); LSS_REG(4, arg5); \ LSS_BODY(type, name, "r"(__x0), "r"(__x1), "r"(__x2), "r"(__x3), \ "r"(__x4)); \ } #undef _syscall6 #define _syscall6_long(type,name,svc,type1,arg1,type2,arg2,type3,arg3, \ type4,arg4,type5,arg5,type6,arg6) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \ type5 arg5, type6 arg6) { \ LSS_REG(0, arg1); LSS_REG(1, arg2); LSS_REG(2, arg3); \ LSS_REG(3, arg4); LSS_REG(4, arg5); LSS_REG(5, arg6); \ LSS_BODY(type, svc, "r"(__x0), "r"(__x1), "x"(__x2), "r"(__x3), \ "r"(__x4), "r"(__x5)); \ } #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \ type5,arg5,type6,arg6) \ _syscall6_long(type,name,name,type1,arg1,type2,arg2,type3,arg3, \ type4,arg4,type5,arg5,type6,arg6) /* clone function adapted from glibc 2.18 clone.S */ LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { long __res; { register int (*__fn)(void *) __asm__("x0") = fn; register void *__stack __asm__("x1") = child_stack; register int __flags __asm__("x2") = flags; register void *__arg __asm__("x3") = arg; register int *__ptid __asm__("x4") = parent_tidptr; register void *__tls __asm__("x5") = newtls; register int *__ctid __asm__("x6") = child_tidptr; __asm__ __volatile__(/* if (fn == NULL || child_stack == NULL) * return -EINVAL; */ "cbz x0,1f\n" "cbz x1,1f\n" /* Push "arg" and "fn" onto the stack that will be * used by the child. */ "stp x0,x3, [x1, #-16]!\n" "mov x0,x2\n" /* flags */ "mov x2,x4\n" /* ptid */ "mov x3,x5\n" /* tls */ "mov x4,x6\n" /* ctid */ "mov x8,%9\n" /* clone */ "svc 0x0\n" /* if (%r0 != 0) * return %r0; */ "cmp x0, #0\n" "bne 2f\n" /* In the child, now. Call "fn(arg)". */ "ldp x1, x0, [sp], #16\n" "blr x1\n" /* Call _exit(%r0). */ "mov x8, %10\n" "svc 0x0\n" "1:\n" "mov x8, %1\n" "2:\n" : "=r" (__res) : "i"(-EINVAL), "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg), "r"(__ptid), "r"(__tls), "r"(__ctid), "i"(__NR_clone), "i"(__NR_exit) : "x30", "memory"); } LSS_RETURN(int, __res); } #elif defined(__s390__) #undef LSS_REG #define LSS_REG(r, a) register unsigned long __r##r __asm__("r"#r) = (unsigned long) a #undef LSS_BODY #define LSS_BODY(type, name, args...) \ register unsigned long __nr __asm__("r1") \ = (unsigned long)(__NR_##name); \ register long __res_r2 __asm__("r2"); \ long __res; \ __asm__ __volatile__ \ ("svc 0\n\t" \ : "=d"(__res_r2) \ : "d"(__nr), ## args \ : "memory"); \ __res = __res_r2; \ LSS_RETURN(type, __res) #undef _syscall0 #define _syscall0(type, name) \ type LSS_NAME(name)(void) { \ LSS_BODY(type, name); \ } #undef _syscall1 #define _syscall1(type, name, type1, arg1) \ type LSS_NAME(name)(type1 arg1) { \ LSS_REG(2, arg1); \ LSS_BODY(type, name, "0"(__r2)); \ } #undef _syscall2 #define _syscall2(type, name, type1, arg1, type2, arg2) \ type LSS_NAME(name)(type1 arg1, type2 arg2) { \ LSS_REG(2, arg1); LSS_REG(3, arg2); \ LSS_BODY(type, name, "0"(__r2), "d"(__r3)); \ } #undef _syscall3 #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \ LSS_REG(2, arg1); LSS_REG(3, arg2); LSS_REG(4, arg3); \ LSS_BODY(type, name, "0"(__r2), "d"(__r3), "d"(__r4)); \ } #undef _syscall4 #define _syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \ type4, arg4) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, \ type4 arg4) { \ LSS_REG(2, arg1); LSS_REG(3, arg2); LSS_REG(4, arg3); \ LSS_REG(5, arg4); \ LSS_BODY(type, name, "0"(__r2), "d"(__r3), "d"(__r4), \ "d"(__r5)); \ } #undef _syscall5 #define _syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \ type4, arg4, type5, arg5) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, \ type4 arg4, type5 arg5) { \ LSS_REG(2, arg1); LSS_REG(3, arg2); LSS_REG(4, arg3); \ LSS_REG(5, arg4); LSS_REG(6, arg5); \ LSS_BODY(type, name, "0"(__r2), "d"(__r3), "d"(__r4), \ "d"(__r5), "d"(__r6)); \ } #undef _syscall6 #define _syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \ type4, arg4, type5, arg5, type6, arg6) \ type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, \ type4 arg4, type5 arg5, type6 arg6) { \ LSS_REG(2, arg1); LSS_REG(3, arg2); LSS_REG(4, arg3); \ LSS_REG(5, arg4); LSS_REG(6, arg5); LSS_REG(7, arg6); \ LSS_BODY(type, name, "0"(__r2), "d"(__r3), "d"(__r4), \ "d"(__r5), "d"(__r6), "d"(__r7)); \ } LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { long __ret; { register int (*__fn)(void *) __asm__ ("r1") = fn; register void *__cstack __asm__ ("r2") = child_stack; register int __flags __asm__ ("r3") = flags; register void *__arg __asm__ ("r0") = arg; register int *__ptidptr __asm__ ("r4") = parent_tidptr; register void *__newtls __asm__ ("r6") = newtls; register int *__ctidptr __asm__ ("r5") = child_tidptr; __asm__ __volatile__ ( #ifndef __s390x__ /* arg already in r0 */ "ltr %4, %4\n\t" /* check fn, which is already in r1 */ "jz 1f\n\t" /* NULL function pointer, return -EINVAL */ "ltr %5, %5\n\t" /* check child_stack, which is already in r2 */ "jz 1f\n\t" /* NULL stack pointer, return -EINVAL */ /* flags already in r3 */ /* parent_tidptr already in r4 */ /* child_tidptr already in r5 */ /* newtls already in r6 */ "svc %2\n\t" /* invoke clone syscall */ "ltr %0,%%r2\n\t" /* load return code into __ret and test */ "jnz 1f\n\t" /* return to parent if non-zero */ /* start child thread */ "lr %%r2, %7\n\t" /* set first parameter to void *arg */ "ahi %%r15, -96\n\t" /* make room on the stack for the save area */ "xc 0(4,%%r15), 0(%%r15)\n\t" "basr %%r14, %4\n\t" /* jump to fn */ "svc %3\n" /* invoke exit syscall */ "1:\n" #else /* arg already in r0 */ "ltgr %4, %4\n\t" /* check fn, which is already in r1 */ "jz 1f\n\t" /* NULL function pointer, return -EINVAL */ "ltgr %5, %5\n\t" /* check child_stack, which is already in r2 */ "jz 1f\n\t" /* NULL stack pointer, return -EINVAL */ /* flags already in r3 */ /* parent_tidptr already in r4 */ /* child_tidptr already in r5 */ /* newtls already in r6 */ "svc %2\n\t" /* invoke clone syscall */ "ltgr %0, %%r2\n\t" /* load return code into __ret and test */ "jnz 1f\n\t" /* return to parent if non-zero */ /* start child thread */ "lgr %%r2, %7\n\t" /* set first parameter to void *arg */ "aghi %%r15, -160\n\t" /* make room on the stack for the save area */ "xc 0(8,%%r15), 0(%%r15)\n\t" "basr %%r14, %4\n\t" /* jump to fn */ "svc %3\n" /* invoke exit syscall */ "1:\n" #endif : "=r" (__ret) : "0" (-EINVAL), "i" (__NR_clone), "i" (__NR_exit), "d" (__fn), "d" (__cstack), "d" (__flags), "d" (__arg), "d" (__ptidptr), "d" (__newtls), "d" (__ctidptr) : "cc", "r14", "memory" ); } LSS_RETURN(int, __ret); } #endif #define __NR__exit __NR_exit #define __NR__gettid __NR_gettid #define __NR__mremap __NR_mremap LSS_INLINE _syscall1(int, close, int, f) LSS_INLINE _syscall1(int, _exit, int, e) #if defined(__aarch64__) && defined (__ILP32__) /* aarch64_ilp32 uses fcntl64 for sys_fcntl() */ LSS_INLINE _syscall3_long(int, fcntl, fcntl64, int, f, int, c, long, a) #else LSS_INLINE _syscall3(int, fcntl, int, f, int, c, long, a) #endif #if defined(__aarch64__) && defined (__ILP32__) /* aarch64_ilp32 uses fstat64 for sys_fstat() */ LSS_INLINE _syscall2_long(int, fstat, fstat64, int, f, struct kernel_stat*, b) #else LSS_INLINE _syscall2(int, fstat, int, f, struct kernel_stat*, b) #endif LSS_INLINE _syscall6(int, futex, int*, a, int, o, int, v, struct kernel_timespec*, t, int*, a2, int, v3) #ifdef __NR_getdents64 LSS_INLINE _syscall3(int, getdents64, int, f, struct kernel_dirent64*, d, int, c) #define KERNEL_DIRENT kernel_dirent64 #define GETDENTS sys_getdents64 #else LSS_INLINE _syscall3(int, getdents, int, f, struct kernel_dirent*, d, int, c) #define KERNEL_DIRENT kernel_dirent #define GETDENTS sys_getdents #endif LSS_INLINE _syscall0(pid_t, getpid) LSS_INLINE _syscall0(pid_t, getppid) LSS_INLINE _syscall0(pid_t, _gettid) LSS_INLINE _syscall2(int, kill, pid_t, p, int, s) #if defined(__x86_64__) /* Need to make sure off_t isn't truncated to 32-bits under x32. */ LSS_INLINE off_t LSS_NAME(lseek)(int f, off_t o, int w) { _LSS_BODY(3, off_t, lseek, off_t, LSS_SYSCALL_ARG(f), (uint64_t)(o), LSS_SYSCALL_ARG(w)); } #elif defined(__aarch64__) && defined (__ILP32__) /* aarch64_ilp32 uses llseek for sys_lseek() */ LSS_INLINE _syscall3_long(off_t, lseek, llseek, int, f, off_t, o, int, w) #else LSS_INLINE _syscall3(off_t, lseek, int, f, off_t, o, int, w) #endif LSS_INLINE _syscall2(int, munmap, void*, s, size_t, l) LSS_INLINE _syscall5(void*, _mremap, void*, o, size_t, os, size_t, ns, unsigned long, f, void *, a) LSS_INLINE _syscall2(int, prctl, int, o, long, a) LSS_INLINE _syscall4(long, ptrace, int, r, pid_t, p, void *, a, void *, d) LSS_INLINE _syscall3(ssize_t, read, int, f, void *, b, size_t, c) LSS_INLINE _syscall4(int, rt_sigaction, int, s, const struct kernel_sigaction*, a, struct kernel_sigaction*, o, size_t, c) LSS_INLINE _syscall4(int, rt_sigprocmask, int, h, const struct kernel_sigset_t*, s, struct kernel_sigset_t*, o, size_t, c); LSS_INLINE _syscall0(int, sched_yield) LSS_INLINE _syscall2(int, sigaltstack, const stack_t*, s, const stack_t*, o) #if defined(__NR_fstatat) LSS_INLINE _syscall4(int, fstatat, int, d, const char *, p, struct kernel_stat*, b, int, flags) LSS_INLINE int LSS_NAME(stat)(const char* p, struct kernel_stat* b) { return LSS_NAME(fstatat)(AT_FDCWD,p,b,0); } #else LSS_INLINE _syscall2(int, stat, const char*, f, struct kernel_stat*, b) #endif LSS_INLINE _syscall3(ssize_t, write, int, f, const void *, b, size_t, c) #if defined(__NR_getcpu) LSS_INLINE _syscall3(long, getcpu, unsigned *, cpu, unsigned *, node, void *, unused); #endif #if defined(__x86_64__) || defined(__aarch64__) || \ (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI32) LSS_INLINE _syscall3(int, socket, int, d, int, t, int, p) #endif #if defined(__x86_64__) || defined(__s390x__) LSS_INLINE int LSS_NAME(sigaction)(int signum, const struct kernel_sigaction *act, struct kernel_sigaction *oldact) { #if defined(__x86_64__) /* On x86_64, the kernel requires us to always set our own * SA_RESTORER in order to be able to return from a signal handler. * This function must have a "magic" signature that the "gdb" * (and maybe the kernel?) can recognize. */ if (act != NULL && !(act->sa_flags & SA_RESTORER)) { struct kernel_sigaction a = *act; a.sa_flags |= SA_RESTORER; a.sa_restorer = LSS_NAME(restore_rt)(); return LSS_NAME(rt_sigaction)(signum, &a, oldact, (KERNEL_NSIG+7)/8); } else #endif return LSS_NAME(rt_sigaction)(signum, act, oldact, (KERNEL_NSIG+7)/8); } LSS_INLINE int LSS_NAME(sigprocmask)(int how, const struct kernel_sigset_t *set, struct kernel_sigset_t *oldset) { return LSS_NAME(rt_sigprocmask)(how, set, oldset, (KERNEL_NSIG+7)/8); } #endif #if (defined(__aarch64__)) || \ (defined(__mips__) \ && (_MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32)) LSS_INLINE int LSS_NAME(sigaction)(int signum, const struct kernel_sigaction *act, struct kernel_sigaction *oldact) { return LSS_NAME(rt_sigaction)(signum, act, oldact, (KERNEL_NSIG+7)/8); } LSS_INLINE int LSS_NAME(sigprocmask)(int how, const struct kernel_sigset_t *set, struct kernel_sigset_t *oldset) { return LSS_NAME(rt_sigprocmask)(how, set, oldset, (KERNEL_NSIG+7)/8); } #endif #ifdef __NR_wait4 LSS_INLINE _syscall4(pid_t, wait4, pid_t, p, int*, s, int, o, struct kernel_rusage*, r) LSS_INLINE pid_t LSS_NAME(waitpid)(pid_t pid, int *status, int options){ return LSS_NAME(wait4)(pid, status, options, 0); } #else LSS_INLINE _syscall3(pid_t, waitpid, pid_t, p, int*, s, int, o) #endif #ifdef __NR_openat LSS_INLINE _syscall4(int, openat, int, d, const char *, p, int, f, int, m) LSS_INLINE int LSS_NAME(open)(const char* p, int f, int m) { return LSS_NAME(openat)(AT_FDCWD,p,f,m ); } #else LSS_INLINE _syscall3(int, open, const char*, p, int, f, int, m) #endif LSS_INLINE int LSS_NAME(sigemptyset)(struct kernel_sigset_t *set) { memset(&set->sig, 0, sizeof(set->sig)); return 0; } LSS_INLINE int LSS_NAME(sigfillset)(struct kernel_sigset_t *set) { memset(&set->sig, -1, sizeof(set->sig)); return 0; } LSS_INLINE int LSS_NAME(sigaddset)(struct kernel_sigset_t *set, int signum) { if (signum < 1 || signum > (int)(8*sizeof(set->sig))) { LSS_ERRNO = EINVAL; return -1; } else { set->sig[(signum - 1)/(8*sizeof(set->sig[0]))] |= 1UL << ((signum - 1) % (8*sizeof(set->sig[0]))); return 0; } } LSS_INLINE int LSS_NAME(sigdelset)(struct kernel_sigset_t *set, int signum) { if (signum < 1 || signum > (int)(8*sizeof(set->sig))) { LSS_ERRNO = EINVAL; return -1; } else { set->sig[(signum - 1)/(8*sizeof(set->sig[0]))] &= ~(1UL << ((signum - 1) % (8*sizeof(set->sig[0])))); return 0; } } #if defined(__i386__) || \ defined(__arm__) || \ (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ defined(__PPC__) || \ (defined(__s390__) && !defined(__s390x__)) #define __NR__sigaction __NR_sigaction #define __NR__sigprocmask __NR_sigprocmask LSS_INLINE _syscall2(int, fstat64, int, f, struct kernel_stat64 *, b) LSS_INLINE _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo, loff_t *, res, uint, wh) #if defined(__s390__) && !defined(__s390x__) /* On s390, mmap2() arguments are passed in memory. */ LSS_INLINE void* LSS_NAME(_mmap2)(void *s, size_t l, int p, int f, int d, off_t o) { unsigned long buf[6] = { (unsigned long) s, (unsigned long) l, (unsigned long) p, (unsigned long) f, (unsigned long) d, (unsigned long) o }; LSS_REG(2, buf); LSS_BODY(void*, mmap2, "0"(__r2)); } #elif !defined(__PPC64__) #define __NR__mmap2 __NR_mmap2 LSS_INLINE _syscall6(void*, _mmap2, void*, s, size_t, l, int, p, int, f, int, d, off_t, o) #endif LSS_INLINE _syscall3(int, _sigaction, int, s, const struct kernel_old_sigaction*, a, struct kernel_old_sigaction*, o) LSS_INLINE _syscall3(int, _sigprocmask, int, h, const unsigned long*, s, unsigned long*, o) LSS_INLINE _syscall2(int, stat64, const char *, p, struct kernel_stat64 *, b) LSS_INLINE int LSS_NAME(sigaction)(int signum, const struct kernel_sigaction *act, struct kernel_sigaction *oldact) { int old_errno = LSS_ERRNO; int rc; struct kernel_sigaction a; if (act != NULL) { a = *act; #ifdef __i386__ /* On i386, the kernel requires us to always set our own * SA_RESTORER when using realtime signals. Otherwise, it does not * know how to return from a signal handler. This function must have * a "magic" signature that the "gdb" (and maybe the kernel?) can * recognize. * Apparently, a SA_RESTORER is implicitly set by the kernel, when * using non-realtime signals. * * TODO: Test whether ARM needs a restorer */ if (!(a.sa_flags & SA_RESTORER)) { a.sa_flags |= SA_RESTORER; a.sa_restorer = (a.sa_flags & SA_SIGINFO) ? LSS_NAME(restore_rt)() : LSS_NAME(restore)(); } #endif } rc = LSS_NAME(rt_sigaction)(signum, act ? &a : act, oldact, (KERNEL_NSIG+7)/8); if (rc < 0 && LSS_ERRNO == ENOSYS) { struct kernel_old_sigaction oa, ooa, *ptr_a = &oa, *ptr_oa = &ooa; if (!act) { ptr_a = NULL; } else { oa.sa_handler_ = act->sa_handler_; memcpy(&oa.sa_mask, &act->sa_mask, sizeof(oa.sa_mask)); #ifndef __mips__ oa.sa_restorer = act->sa_restorer; #endif oa.sa_flags = act->sa_flags; } if (!oldact) { ptr_oa = NULL; } LSS_ERRNO = old_errno; rc = LSS_NAME(_sigaction)(signum, ptr_a, ptr_oa); if (rc == 0 && oldact) { if (act) { memcpy(oldact, act, sizeof(*act)); } else { memset(oldact, 0, sizeof(*oldact)); } oldact->sa_handler_ = ptr_oa->sa_handler_; oldact->sa_flags = ptr_oa->sa_flags; memcpy(&oldact->sa_mask, &ptr_oa->sa_mask, sizeof(ptr_oa->sa_mask)); #ifndef __mips__ oldact->sa_restorer = ptr_oa->sa_restorer; #endif } } return rc; } LSS_INLINE int LSS_NAME(sigprocmask)(int how, const struct kernel_sigset_t *set, struct kernel_sigset_t *oldset) { int olderrno = LSS_ERRNO; int rc = LSS_NAME(rt_sigprocmask)(how, set, oldset, (KERNEL_NSIG+7)/8); if (rc < 0 && LSS_ERRNO == ENOSYS) { LSS_ERRNO = olderrno; if (oldset) { LSS_NAME(sigemptyset)(oldset); } rc = LSS_NAME(_sigprocmask)(how, set ? &set->sig[0] : NULL, oldset ? &oldset->sig[0] : NULL); } return rc; } #endif #if defined(__i386__) || \ defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ (defined(__PPC__) && !defined(__PPC64__)) || \ (defined(__s390__) && !defined(__s390x__)) /* On these architectures, implement mmap() with mmap2(). */ LSS_INLINE void* LSS_NAME(mmap)(void *s, size_t l, int p, int f, int d, int64_t o) { if (o % 4096) { LSS_ERRNO = EINVAL; return (void *) -1; } return LSS_NAME(_mmap2)(s, l, p, f, d, (o / 4096)); } #elif defined(__s390x__) /* On s390x, mmap() arguments are passed in memory. */ LSS_INLINE void* LSS_NAME(mmap)(void *s, size_t l, int p, int f, int d, int64_t o) { unsigned long buf[6] = { (unsigned long) s, (unsigned long) l, (unsigned long) p, (unsigned long) f, (unsigned long) d, (unsigned long) o }; LSS_REG(2, buf); LSS_BODY(void*, mmap, "0"(__r2)); } #elif defined(__x86_64__) /* Need to make sure __off64_t isn't truncated to 32-bits under x32. */ LSS_INLINE void* LSS_NAME(mmap)(void *s, size_t l, int p, int f, int d, int64_t o) { LSS_BODY(6, void*, mmap, LSS_SYSCALL_ARG(s), LSS_SYSCALL_ARG(l), LSS_SYSCALL_ARG(p), LSS_SYSCALL_ARG(f), LSS_SYSCALL_ARG(d), (uint64_t)(o)); } #elif defined(__aarch64__) && defined (__ILP32__) /* aarch64_ilp32 uses mmap2 for sys_mmap() */ LSS_INLINE _syscall6_long(void*, mmap, mmap2, void*, addr, size_t, length, int, prot, int, flags, int, fd, int64_t, offset) #else /* Remaining 64-bit architectures. */ LSS_INLINE _syscall6(void*, mmap, void*, addr, size_t, length, int, prot, int, flags, int, fd, int64_t, offset) #endif #if defined(__i386__) || \ defined(__PPC__) || \ (defined(__arm__) && !defined(__ARM_EABI__)) || \ (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ defined(__s390__) /* See sys_socketcall in net/socket.c in kernel source. * It de-multiplexes on its first arg and unpacks the arglist * array in its second arg. */ LSS_INLINE _syscall2(int, socketcall, int, c, unsigned long*, a) LSS_INLINE int LSS_NAME(socket)(int domain, int type, int protocol) { unsigned long args[3] = { (unsigned long) domain, (unsigned long) type, (unsigned long) protocol }; return LSS_NAME(socketcall)(1, args); } #elif defined(__ARM_EABI__) LSS_INLINE _syscall3(int, socket, int, d, int, t, int, p) #endif #if defined(__mips__) /* sys_pipe() on MIPS has non-standard calling conventions, as it returns * both file handles through CPU registers. */ LSS_INLINE int LSS_NAME(pipe)(int *p) { register unsigned long __v0 __asm__("$2") = __NR_pipe; register unsigned long __v1 __asm__("$3"); register unsigned long __r7 __asm__("$7"); __asm__ __volatile__ ("syscall\n" : "=&r"(__v0), "=&r"(__v1), "+r" (__r7) : "0"(__v0) : "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "memory"); if (__r7) { LSS_ERRNO = __v0; return -1; } else { p[0] = __v0; p[1] = __v1; return 0; } } #elif defined(__NR_pipe2) LSS_INLINE _syscall2(int, pipe2, int *, p, int, f ) LSS_INLINE int LSS_NAME(pipe)( int * p) { return LSS_NAME(pipe2)(p, 0); } #else LSS_INLINE _syscall1(int, pipe, int *, p) #endif LSS_INLINE pid_t LSS_NAME(gettid)() { pid_t tid = LSS_NAME(_gettid)(); if (tid != -1) { return tid; } return LSS_NAME(getpid)(); } LSS_INLINE void *LSS_NAME(mremap)(void *old_address, size_t old_size, size_t new_size, int flags, ...) { va_list ap; void *new_address, *rc; va_start(ap, flags); new_address = va_arg(ap, void *); rc = LSS_NAME(_mremap)(old_address, old_size, new_size, flags, new_address); va_end(ap); return rc; } LSS_INLINE int LSS_NAME(ptrace_detach)(pid_t pid) { /* PTRACE_DETACH can sometimes forget to wake up the tracee and it * then sends job control signals to the real parent, rather than to * the tracer. We reduce the risk of this happening by starting a * whole new time slice, and then quickly sending a SIGCONT signal * right after detaching from the tracee. */ int rc, err; LSS_NAME(sched_yield)(); rc = LSS_NAME(ptrace)(PTRACE_DETACH, pid, (void *)0, (void *)0); err = LSS_ERRNO; LSS_NAME(kill)(pid, SIGCONT); LSS_ERRNO = err; return rc; } #endif #if defined(__cplusplus) && !defined(SYS_CPLUSPLUS) } #endif #endif #endif
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/elf_mem_image.cc
.cc
14,642
435
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2008, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Paul Pluzhnikov // // Allow dynamic symbol lookup in an in-memory Elf image. // #include "base/elf_mem_image.h" #ifdef HAVE_ELF_MEM_IMAGE // defined in elf_mem_image.h #include <stddef.h> // for size_t, ptrdiff_t #include "base/logging.h" // From binutils/include/elf/common.h (this doesn't appear to be documented // anywhere else). // // /* This flag appears in a Versym structure. It means that the symbol // is hidden, and is only visible with an explicit version number. // This is a GNU extension. */ // #define VERSYM_HIDDEN 0x8000 // // /* This is the mask for the rest of the Versym information. */ // #define VERSYM_VERSION 0x7fff #define VERSYM_VERSION 0x7fff namespace base { namespace { template <int N> class ElfClass { public: static const int kElfClass = -1; static int ElfBind(const ElfW(Sym) *) { CHECK(false); // << "Unexpected word size"; return 0; } static int ElfType(const ElfW(Sym) *) { CHECK(false); // << "Unexpected word size"; return 0; } }; template <> class ElfClass<32> { public: static const int kElfClass = ELFCLASS32; static int ElfBind(const ElfW(Sym) *symbol) { return ELF32_ST_BIND(symbol->st_info); } static int ElfType(const ElfW(Sym) *symbol) { return ELF32_ST_TYPE(symbol->st_info); } }; template <> class ElfClass<64> { public: static const int kElfClass = ELFCLASS64; static int ElfBind(const ElfW(Sym) *symbol) { return ELF64_ST_BIND(symbol->st_info); } static int ElfType(const ElfW(Sym) *symbol) { return ELF64_ST_TYPE(symbol->st_info); } }; typedef ElfClass<__WORDSIZE> CurrentElfClass; // Extract an element from one of the ELF tables, cast it to desired type. // This is just a simple arithmetic and a glorified cast. // Callers are responsible for bounds checking. template <class T> const T* GetTableElement(const ElfW(Ehdr) *ehdr, ElfW(Off) table_offset, ElfW(Word) element_size, size_t index) { return reinterpret_cast<const T*>(reinterpret_cast<const char *>(ehdr) + table_offset + index * element_size); } } // namespace const void *const ElfMemImage::kInvalidBase = reinterpret_cast<const void *>(~0L); ElfMemImage::ElfMemImage(const void *base) { CHECK(base != kInvalidBase); Init(base); } int ElfMemImage::GetNumSymbols() const { if (!hash_) { return 0; } // See http://www.caldera.com/developers/gabi/latest/ch5.dynamic.html#hash return hash_[1]; } const ElfW(Sym) *ElfMemImage::GetDynsym(int index) const { CHECK_LT(index, GetNumSymbols()); return dynsym_ + index; } const ElfW(Versym) *ElfMemImage::GetVersym(int index) const { CHECK_LT(index, GetNumSymbols()); return versym_ + index; } const ElfW(Phdr) *ElfMemImage::GetPhdr(int index) const { CHECK_LT(index, ehdr_->e_phnum); return GetTableElement<ElfW(Phdr)>(ehdr_, ehdr_->e_phoff, ehdr_->e_phentsize, index); } const char *ElfMemImage::GetDynstr(ElfW(Word) offset) const { CHECK_LT(offset, strsize_); return dynstr_ + offset; } const void *ElfMemImage::GetSymAddr(const ElfW(Sym) *sym) const { if (sym->st_shndx == SHN_UNDEF || sym->st_shndx >= SHN_LORESERVE) { // Symbol corresponds to "special" (e.g. SHN_ABS) section. return reinterpret_cast<const void *>(sym->st_value); } CHECK_LT(link_base_, sym->st_value); return GetTableElement<char>(ehdr_, 0, 1, sym->st_value) - link_base_; } const ElfW(Verdef) *ElfMemImage::GetVerdef(int index) const { CHECK_LE(index, verdefnum_); const ElfW(Verdef) *version_definition = verdef_; while (version_definition->vd_ndx < index && version_definition->vd_next) { const char *const version_definition_as_char = reinterpret_cast<const char *>(version_definition); version_definition = reinterpret_cast<const ElfW(Verdef) *>(version_definition_as_char + version_definition->vd_next); } return version_definition->vd_ndx == index ? version_definition : NULL; } const ElfW(Verdaux) *ElfMemImage::GetVerdefAux( const ElfW(Verdef) *verdef) const { return reinterpret_cast<const ElfW(Verdaux) *>(verdef+1); } const char *ElfMemImage::GetVerstr(ElfW(Word) offset) const { CHECK_LT(offset, strsize_); return dynstr_ + offset; } void ElfMemImage::Init(const void *base) { ehdr_ = NULL; dynsym_ = NULL; dynstr_ = NULL; versym_ = NULL; verdef_ = NULL; hash_ = NULL; strsize_ = 0; verdefnum_ = 0; link_base_ = ~0L; // Sentinel: PT_LOAD .p_vaddr can't possibly be this. if (!base) { return; } const intptr_t base_as_uintptr_t = reinterpret_cast<uintptr_t>(base); // Fake VDSO has low bit set. const bool fake_vdso = ((base_as_uintptr_t & 1) != 0); base = reinterpret_cast<const void *>(base_as_uintptr_t & ~1); const char *const base_as_char = reinterpret_cast<const char *>(base); if (base_as_char[EI_MAG0] != ELFMAG0 || base_as_char[EI_MAG1] != ELFMAG1 || base_as_char[EI_MAG2] != ELFMAG2 || base_as_char[EI_MAG3] != ELFMAG3) { RAW_DCHECK(false, "no ELF magic"); // at %p", base); return; } int elf_class = base_as_char[EI_CLASS]; if (elf_class != CurrentElfClass::kElfClass) { DCHECK_EQ(elf_class, CurrentElfClass::kElfClass); return; } switch (base_as_char[EI_DATA]) { case ELFDATA2LSB: { if (__LITTLE_ENDIAN != __BYTE_ORDER) { DCHECK_EQ(__LITTLE_ENDIAN, __BYTE_ORDER); // << ": wrong byte order"; return; } break; } case ELFDATA2MSB: { if (__BIG_ENDIAN != __BYTE_ORDER) { DCHECK_EQ(__BIG_ENDIAN, __BYTE_ORDER); // << ": wrong byte order"; return; } break; } default: { RAW_DCHECK(false, "unexpected data encoding"); // << base_as_char[EI_DATA]; return; } } ehdr_ = reinterpret_cast<const ElfW(Ehdr) *>(base); const ElfW(Phdr) *dynamic_program_header = NULL; for (int i = 0; i < ehdr_->e_phnum; ++i) { const ElfW(Phdr) *const program_header = GetPhdr(i); switch (program_header->p_type) { case PT_LOAD: if (link_base_ == ~0L) { link_base_ = program_header->p_vaddr; } break; case PT_DYNAMIC: dynamic_program_header = program_header; break; } } if (link_base_ == ~0L || !dynamic_program_header) { RAW_DCHECK(~0L != link_base_, "no PT_LOADs in VDSO"); RAW_DCHECK(dynamic_program_header, "no PT_DYNAMIC in VDSO"); // Mark this image as not present. Can not recur infinitely. Init(0); return; } ptrdiff_t relocation = base_as_char - reinterpret_cast<const char *>(link_base_); ElfW(Dyn) *dynamic_entry = reinterpret_cast<ElfW(Dyn) *>(dynamic_program_header->p_vaddr + relocation); for (; dynamic_entry->d_tag != DT_NULL; ++dynamic_entry) { ElfW(Xword) value = dynamic_entry->d_un.d_val; if (fake_vdso) { // A complication: in the real VDSO, dynamic entries are not relocated // (it wasn't loaded by a dynamic loader). But when testing with a // "fake" dlopen()ed vdso library, the loader relocates some (but // not all!) of them before we get here. if (dynamic_entry->d_tag == DT_VERDEF) { // The only dynamic entry (of the ones we care about) libc-2.3.6 // loader doesn't relocate. value += relocation; } } else { // Real VDSO. Everything needs to be relocated. value += relocation; } switch (dynamic_entry->d_tag) { case DT_HASH: hash_ = reinterpret_cast<ElfW(Word) *>(value); break; case DT_SYMTAB: dynsym_ = reinterpret_cast<ElfW(Sym) *>(value); break; case DT_STRTAB: dynstr_ = reinterpret_cast<const char *>(value); break; case DT_VERSYM: versym_ = reinterpret_cast<ElfW(Versym) *>(value); break; case DT_VERDEF: verdef_ = reinterpret_cast<ElfW(Verdef) *>(value); break; case DT_VERDEFNUM: verdefnum_ = dynamic_entry->d_un.d_val; break; case DT_STRSZ: strsize_ = dynamic_entry->d_un.d_val; break; default: // Unrecognized entries explicitly ignored. break; } } if (!hash_ || !dynsym_ || !dynstr_ || !versym_ || !verdef_ || !verdefnum_ || !strsize_) { RAW_DCHECK(hash_, "invalid VDSO (no DT_HASH)"); RAW_DCHECK(dynsym_, "invalid VDSO (no DT_SYMTAB)"); RAW_DCHECK(dynstr_, "invalid VDSO (no DT_STRTAB)"); RAW_DCHECK(versym_, "invalid VDSO (no DT_VERSYM)"); RAW_DCHECK(verdef_, "invalid VDSO (no DT_VERDEF)"); RAW_DCHECK(verdefnum_, "invalid VDSO (no DT_VERDEFNUM)"); RAW_DCHECK(strsize_, "invalid VDSO (no DT_STRSZ)"); // Mark this image as not present. Can not recur infinitely. Init(0); return; } } bool ElfMemImage::LookupSymbol(const char *name, const char *version, int type, SymbolInfo *info) const { for (SymbolIterator it = begin(); it != end(); ++it) { if (strcmp(it->name, name) == 0 && strcmp(it->version, version) == 0 && CurrentElfClass::ElfType(it->symbol) == type) { if (info) { *info = *it; } return true; } } return false; } bool ElfMemImage::LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const { for (SymbolIterator it = begin(); it != end(); ++it) { const char *const symbol_start = reinterpret_cast<const char *>(it->address); const char *const symbol_end = symbol_start + it->symbol->st_size; if (symbol_start <= address && address < symbol_end) { if (info_out) { // Client wants to know details for that symbol (the usual case). if (CurrentElfClass::ElfBind(it->symbol) == STB_GLOBAL) { // Strong symbol; just return it. *info_out = *it; return true; } else { // Weak or local. Record it, but keep looking for a strong one. *info_out = *it; } } else { // Client only cares if there is an overlapping symbol. return true; } } } return false; } ElfMemImage::SymbolIterator::SymbolIterator(const void *const image, int index) : index_(index), image_(image) { } const ElfMemImage::SymbolInfo *ElfMemImage::SymbolIterator::operator->() const { return &info_; } const ElfMemImage::SymbolInfo& ElfMemImage::SymbolIterator::operator*() const { return info_; } bool ElfMemImage::SymbolIterator::operator==(const SymbolIterator &rhs) const { return this->image_ == rhs.image_ && this->index_ == rhs.index_; } bool ElfMemImage::SymbolIterator::operator!=(const SymbolIterator &rhs) const { return !(*this == rhs); } ElfMemImage::SymbolIterator &ElfMemImage::SymbolIterator::operator++() { this->Update(1); return *this; } ElfMemImage::SymbolIterator ElfMemImage::begin() const { SymbolIterator it(this, 0); it.Update(0); return it; } ElfMemImage::SymbolIterator ElfMemImage::end() const { return SymbolIterator(this, GetNumSymbols()); } void ElfMemImage::SymbolIterator::Update(int increment) { const ElfMemImage *image = reinterpret_cast<const ElfMemImage *>(image_); CHECK(image->IsPresent() || increment == 0); if (!image->IsPresent()) { return; } index_ += increment; if (index_ >= image->GetNumSymbols()) { index_ = image->GetNumSymbols(); return; } const ElfW(Sym) *symbol = image->GetDynsym(index_); const ElfW(Versym) *version_symbol = image->GetVersym(index_); CHECK(symbol && version_symbol); const char *const symbol_name = image->GetDynstr(symbol->st_name); const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION; const ElfW(Verdef) *version_definition = NULL; const char *version_name = ""; if (symbol->st_shndx == SHN_UNDEF) { // Undefined symbols reference DT_VERNEED, not DT_VERDEF, and // version_index could well be greater than verdefnum_, so calling // GetVerdef(version_index) may trigger assertion. } else { version_definition = image->GetVerdef(version_index); } if (version_definition) { // I am expecting 1 or 2 auxiliary entries: 1 for the version itself, // optional 2nd if the version has a parent. CHECK_LE(1, version_definition->vd_cnt); CHECK_LE(version_definition->vd_cnt, 2); const ElfW(Verdaux) *version_aux = image->GetVerdefAux(version_definition); version_name = image->GetVerstr(version_aux->vda_name); } info_.name = symbol_name; info_.version = version_name; info_.address = image->GetSymAddr(symbol); info_.symbol = symbol; } } // namespace base #endif // HAVE_ELF_MEM_IMAGE
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/thread_lister.h
.h
3,748
84
/* -*- Mode: c; c-basic-offset: 2; indent-tabs-mode: nil -*- */ /* Copyright (c) 2005-2007, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Markus Gutschke */ #ifndef _THREAD_LISTER_H #define _THREAD_LISTER_H #include <stdarg.h> #include <sys/types.h> #ifdef __cplusplus extern "C" { #endif typedef int (*ListAllProcessThreadsCallBack)(void *parameter, int num_threads, pid_t *thread_pids, va_list ap); /* This function gets the list of all linux threads of the current process * passes them to the 'callback' along with the 'parameter' pointer; at the * call back call time all the threads are paused via * PTRACE_ATTACH. * The callback is executed from a separate thread which shares only the * address space, the filesystem, and the filehandles with the caller. Most * notably, it does not share the same pid and ppid; and if it terminates, * the rest of the application is still there. 'callback' is supposed to do * or arrange for TCMalloc_ResumeAllProcessThreads. This happens automatically, if * the thread raises a synchronous signal (e.g. SIGSEGV); asynchronous * signals are blocked. If the 'callback' decides to unblock them, it must * ensure that they cannot terminate the application, or that * TCMalloc_ResumeAllProcessThreads will get called. * It is an error for the 'callback' to make any library calls that could * acquire locks. Most notably, this means that most system calls have to * avoid going through libc. Also, this means that it is not legal to call * exit() or abort(). * We return -1 on error and the return value of 'callback' on success. */ int TCMalloc_ListAllProcessThreads(void *parameter, ListAllProcessThreadsCallBack callback, ...); /* This function resumes the list of all linux threads that * TCMalloc_ListAllProcessThreads pauses before giving to its * callback. The function returns non-zero if at least one thread was * suspended and has now been resumed. */ int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids); #ifdef __cplusplus } #endif #endif /* _THREAD_LISTER_H */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/elf_mem_image.h
.h
5,179
136
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2008, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Paul Pluzhnikov // // Allow dynamic symbol lookup for in-memory Elf images. #ifndef BASE_ELF_MEM_IMAGE_H_ #define BASE_ELF_MEM_IMAGE_H_ #include <config.h> #ifdef HAVE_FEATURES_H #include <features.h> // for __GLIBC__ #endif // Maybe one day we can rewrite this file not to require the elf // symbol extensions in glibc, but for right now we need them. #if defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) #define HAVE_ELF_MEM_IMAGE 1 #include <stdlib.h> #include <link.h> // for ElfW namespace base { // An in-memory ELF image (may not exist on disk). class ElfMemImage { public: // Sentinel: there could never be an elf image at this address. static const void *const kInvalidBase; // Information about a single vdso symbol. // All pointers are into .dynsym, .dynstr, or .text of the VDSO. // Do not free() them or modify through them. struct SymbolInfo { const char *name; // E.g. "__vdso_getcpu" const char *version; // E.g. "LINUX_2.6", could be "" // for unversioned symbol. const void *address; // Relocated symbol address. const ElfW(Sym) *symbol; // Symbol in the dynamic symbol table. }; // Supports iteration over all dynamic symbols. class SymbolIterator { public: friend class ElfMemImage; const SymbolInfo *operator->() const; const SymbolInfo &operator*() const; SymbolIterator& operator++(); bool operator!=(const SymbolIterator &rhs) const; bool operator==(const SymbolIterator &rhs) const; private: SymbolIterator(const void *const image, int index); void Update(int incr); SymbolInfo info_; int index_; const void *const image_; }; explicit ElfMemImage(const void *base); void Init(const void *base); bool IsPresent() const { return ehdr_ != NULL; } const ElfW(Phdr)* GetPhdr(int index) const; const ElfW(Sym)* GetDynsym(int index) const; const ElfW(Versym)* GetVersym(int index) const; const ElfW(Verdef)* GetVerdef(int index) const; const ElfW(Verdaux)* GetVerdefAux(const ElfW(Verdef) *verdef) const; const char* GetDynstr(ElfW(Word) offset) const; const void* GetSymAddr(const ElfW(Sym) *sym) const; const char* GetVerstr(ElfW(Word) offset) const; int GetNumSymbols() const; SymbolIterator begin() const; SymbolIterator end() const; // Look up versioned dynamic symbol in the image. // Returns false if image is not present, or doesn't contain given // symbol/version/type combination. // If info_out != NULL, additional details are filled in. bool LookupSymbol(const char *name, const char *version, int symbol_type, SymbolInfo *info_out) const; // Find info about symbol (if any) which overlaps given address. // Returns true if symbol was found; false if image isn't present // or doesn't have a symbol overlapping given address. // If info_out != NULL, additional details are filled in. bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const; private: const ElfW(Ehdr) *ehdr_; const ElfW(Sym) *dynsym_; const ElfW(Versym) *versym_; const ElfW(Verdef) *verdef_; const ElfW(Word) *hash_; const char *dynstr_; size_t strsize_; size_t verdefnum_; ElfW(Addr) link_base_; // Link-time base (p_vaddr of first PT_LOAD). }; } // namespace base #endif // __ELF__ and __GLIBC__ and !__native_client__ #endif // BASE_ELF_MEM_IMAGE_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/atomicops-internals-gcc.h
.h
6,875
204
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2014, Linaro // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // // Author: Riku Voipio, riku.voipio@linaro.org // // atomic primitives implemented with gcc atomic intrinsics: // http://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html // #ifndef BASE_ATOMICOPS_INTERNALS_GCC_GENERIC_H_ #define BASE_ATOMICOPS_INTERNALS_GCC_GENERIC_H_ #include <stdio.h> #include <stdlib.h> #include "base/basictypes.h" typedef int32_t Atomic32; namespace base { namespace subtle { typedef int64_t Atomic64; inline void MemoryBarrier() { __sync_synchronize(); } inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev_value = old_value; __atomic_compare_exchange_n(ptr, &prev_value, new_value, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); return prev_value; } inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, __ATOMIC_RELAXED); } inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, __ATOMIC_ACQUIRE); } inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { return __atomic_exchange_n(const_cast<Atomic32*>(ptr), new_value, __ATOMIC_RELEASE); } inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev_value = old_value; __atomic_compare_exchange_n(ptr, &prev_value, new_value, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); return prev_value; } inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev_value = old_value; __atomic_compare_exchange_n(ptr, &prev_value, new_value, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED); return prev_value; } inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; MemoryBarrier(); } inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { MemoryBarrier(); *ptr = value; } inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { Atomic32 value = *ptr; MemoryBarrier(); return value; } inline Atomic32 Release_Load(volatile const Atomic32* ptr) { MemoryBarrier(); return *ptr; } // 64-bit versions inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev_value = old_value; __atomic_compare_exchange_n(ptr, &prev_value, new_value, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); return prev_value; } inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, __ATOMIC_RELAXED); } inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, __ATOMIC_ACQUIRE); } inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { return __atomic_exchange_n(const_cast<Atomic64*>(ptr), new_value, __ATOMIC_RELEASE); } inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev_value = old_value; __atomic_compare_exchange_n(ptr, &prev_value, new_value, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); return prev_value; } inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev_value = old_value; __atomic_compare_exchange_n(ptr, &prev_value, new_value, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED); return prev_value; } inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; } inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; MemoryBarrier(); } inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { MemoryBarrier(); *ptr = value; } inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; } inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { Atomic64 value = *ptr; MemoryBarrier(); return value; } inline Atomic64 Release_Load(volatile const Atomic64* ptr) { MemoryBarrier(); return *ptr; } } // namespace base::subtle } // namespace base #endif // BASE_ATOMICOPS_INTERNALS_GCC_GENERIC_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/vdso_support.cc
.cc
5,217
143
// Copyright (c) 2008, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Paul Pluzhnikov // // Allow dynamic symbol lookup in the kernel VDSO page. // // VDSOSupport -- a class representing kernel VDSO (if present). // #include "base/vdso_support.h" #ifdef HAVE_VDSO_SUPPORT // defined in vdso_support.h #include <fcntl.h> #include <stddef.h> // for ptrdiff_t #include "base/atomicops.h" // for MemoryBarrier #include "base/logging.h" #include "base/dynamic_annotations.h" #include "base/basictypes.h" // for COMPILE_ASSERT using base::subtle::MemoryBarrier; #ifndef AT_SYSINFO_EHDR #define AT_SYSINFO_EHDR 33 #endif namespace base { const void *VDSOSupport::vdso_base_ = ElfMemImage::kInvalidBase; VDSOSupport::VDSOSupport() // If vdso_base_ is still set to kInvalidBase, we got here // before VDSOSupport::Init has been called. Call it now. : image_(vdso_base_ == ElfMemImage::kInvalidBase ? Init() : vdso_base_) { } // NOTE: we can't use GoogleOnceInit() below, because we can be // called by tcmalloc, and none of the *once* stuff may be functional yet. // // In addition, we hope that the VDSOSupportHelper constructor // causes this code to run before there are any threads, and before // InitGoogle() has executed any chroot or setuid calls. // // Finally, even if there is a race here, it is harmless, because // the operation should be idempotent. const void *VDSOSupport::Init() { if (vdso_base_ == ElfMemImage::kInvalidBase) { // Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[] // on stack, and so glibc works as if VDSO was not present. // But going directly to kernel via /proc/self/auxv below bypasses // Valgrind zapping. So we check for Valgrind separately. if (RunningOnValgrind()) { vdso_base_ = NULL; return NULL; } int fd = open("/proc/self/auxv", O_RDONLY); if (fd == -1) { // Kernel too old to have a VDSO. vdso_base_ = NULL; return NULL; } ElfW(auxv_t) aux; while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) { if (aux.a_type == AT_SYSINFO_EHDR) { COMPILE_ASSERT(sizeof(vdso_base_) == sizeof(aux.a_un.a_val), unexpected_sizeof_pointer_NE_sizeof_a_val); vdso_base_ = reinterpret_cast<void *>(aux.a_un.a_val); break; } } close(fd); if (vdso_base_ == ElfMemImage::kInvalidBase) { // Didn't find AT_SYSINFO_EHDR in auxv[]. vdso_base_ = NULL; } } return vdso_base_; } const void *VDSOSupport::SetBase(const void *base) { CHECK(base != ElfMemImage::kInvalidBase); const void *old_base = vdso_base_; vdso_base_ = base; image_.Init(base); return old_base; } bool VDSOSupport::LookupSymbol(const char *name, const char *version, int type, SymbolInfo *info) const { return image_.LookupSymbol(name, version, type, info); } bool VDSOSupport::LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const { return image_.LookupSymbolByAddress(address, info_out); } // We need to make sure VDSOSupport::Init() is called before // the main() runs, since it might do something like setuid or // chroot. If VDSOSupport // is used in any global constructor, this will happen, since // VDSOSupport's constructor calls Init. But if not, we need to // ensure it here, with a global constructor of our own. This // is an allowed exception to the normal rule against non-trivial // global constructors. static class VDSOInitHelper { public: VDSOInitHelper() { VDSOSupport::Init(); } } vdso_init_helper; } #endif // HAVE_VDSO_SUPPORT
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/arm_instruction_set_select.h
.h
2,799
85
// Copyright (c) 2011, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Author: Alexander Levitskiy // // Generalizes the plethora of ARM flavors available to an easier to manage set // Defs reference is at https://wiki.edubuntu.org/ARM/Thumb2PortingHowto #ifndef ARM_INSTRUCTION_SET_SELECT_H_ #define ARM_INSTRUCTION_SET_SELECT_H_ #if defined(__ARM_ARCH_8A__) # define ARMV8 1 #endif #if defined(ARMV8) || \ defined(__ARM_ARCH_7__) || \ defined(__ARM_ARCH_7R__) || \ defined(__ARM_ARCH_7A__) # define ARMV7 1 #endif #if defined(ARMV7) || \ defined(__ARM_ARCH_6__) || \ defined(__ARM_ARCH_6J__) || \ defined(__ARM_ARCH_6K__) || \ defined(__ARM_ARCH_6Z__) || \ defined(__ARM_ARCH_6T2__) || \ defined(__ARM_ARCH_6ZK__) # define ARMV6 1 #endif #if defined(ARMV6) || \ defined(__ARM_ARCH_5T__) || \ defined(__ARM_ARCH_5E__) || \ defined(__ARM_ARCH_5TE__) || \ defined(__ARM_ARCH_5TEJ__) # define ARMV5 1 #endif #if defined(ARMV5) || \ defined(__ARM_ARCH_4__) || \ defined(__ARM_ARCH_4T__) # define ARMV4 1 #endif #if defined(ARMV4) || \ defined(__ARM_ARCH_3__) || \ defined(__ARM_ARCH_3M__) # define ARMV3 1 #endif #if defined(ARMV3) || \ defined(__ARM_ARCH_2__) # define ARMV2 1 #endif #endif // ARM_INSTRUCTION_SET_SELECT_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/googleinit.h
.h
2,941
75
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Jacob Hoffman-Andrews #ifndef _GOOGLEINIT_H #define _GOOGLEINIT_H #include "base/logging.h" class GoogleInitializer { public: typedef void (*VoidFunction)(void); GoogleInitializer(const char* name, VoidFunction ctor, VoidFunction dtor) : name_(name), destructor_(dtor) { RAW_VLOG(10, "<GoogleModuleObject> constructing: %s\n", name_); if (ctor) ctor(); } ~GoogleInitializer() { RAW_VLOG(10, "<GoogleModuleObject> destroying: %s\n", name_); if (destructor_) destructor_(); } private: const char* const name_; const VoidFunction destructor_; }; #define REGISTER_MODULE_INITIALIZER(name, body) \ namespace { \ static void google_init_module_##name () { body; } \ GoogleInitializer google_initializer_module_##name(#name, \ google_init_module_##name, NULL); \ } #define REGISTER_MODULE_DESTRUCTOR(name, body) \ namespace { \ static void google_destruct_module_##name () { body; } \ GoogleInitializer google_destructor_module_##name(#name, \ NULL, google_destruct_module_##name); \ } #endif /* _GOOGLEINIT_H */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/atomicops-internals-macosx.h
.h
11,908
371
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2006, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Implementation of atomic operations for Mac OS X. This file should not // be included directly. Clients should instead include // "base/atomicops.h". #ifndef BASE_ATOMICOPS_INTERNALS_MACOSX_H_ #define BASE_ATOMICOPS_INTERNALS_MACOSX_H_ typedef int32_t Atomic32; // MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different // on the Mac, even when they are the same size. Similarly, on __ppc64__, // AtomicWord and Atomic64 are always different. Thus, we need explicit // casting. #ifdef __LP64__ #define AtomicWordCastType base::subtle::Atomic64 #else #define AtomicWordCastType Atomic32 #endif #if defined(__LP64__) || defined(__i386__) #define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic* #endif #include <libkern/OSAtomic.h> namespace base { namespace subtle { #if !defined(__LP64__) && defined(__ppc__) // The Mac 64-bit OSAtomic implementations are not available for 32-bit PowerPC, // while the underlying assembly instructions are available only some // implementations of PowerPC. // The following inline functions will fail with the error message at compile // time ONLY IF they are called. So it is safe to use this header if user // code only calls AtomicWord and Atomic32 operations. // // NOTE(vchen): Implementation notes to implement the atomic ops below may // be found in "PowerPC Virtual Environment Architecture, Book II, // Version 2.02", January 28, 2005, Appendix B, page 46. Unfortunately, // extra care must be taken to ensure data are properly 8-byte aligned, and // that data are returned correctly according to Mac OS X ABI specs. inline int64_t OSAtomicCompareAndSwap64( int64_t oldValue, int64_t newValue, int64_t *theValue) { __asm__ __volatile__( "_OSAtomicCompareAndSwap64_not_supported_for_32_bit_ppc\n\t"); return 0; } inline int64_t OSAtomicAdd64(int64_t theAmount, int64_t *theValue) { __asm__ __volatile__( "_OSAtomicAdd64_not_supported_for_32_bit_ppc\n\t"); return 0; } inline int64_t OSAtomicCompareAndSwap64Barrier( int64_t oldValue, int64_t newValue, int64_t *theValue) { int64_t prev = OSAtomicCompareAndSwap64(oldValue, newValue, theValue); OSMemoryBarrier(); return prev; } inline int64_t OSAtomicAdd64Barrier( int64_t theAmount, int64_t *theValue) { int64_t new_val = OSAtomicAdd64(theAmount, theValue); OSMemoryBarrier(); return new_val; } #endif typedef int64_t Atomic64; inline void MemoryBarrier() { OSMemoryBarrier(); } // 32-bit Versions. inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev_value; do { if (OSAtomicCompareAndSwap32(old_value, new_value, const_cast<Atomic32*>(ptr))) { return old_value; } prev_value = *ptr; } while (prev_value == old_value); return prev_value; } inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value) { Atomic32 old_value; do { old_value = *ptr; } while (!OSAtomicCompareAndSwap32(old_value, new_value, const_cast<Atomic32*>(ptr))); return old_value; } inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value) { Atomic32 old_value; do { old_value = *ptr; } while (!OSAtomicCompareAndSwap32Barrier(old_value, new_value, const_cast<Atomic32*>(ptr))); return old_value; } inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value) { return Acquire_AtomicExchange(ptr, new_value); } inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev_value; do { if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, const_cast<Atomic32*>(ptr))) { return old_value; } prev_value = *ptr; } while (prev_value == old_value); return prev_value; } inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value) { return Acquire_CompareAndSwap(ptr, old_value, new_value); } inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { *ptr = value; MemoryBarrier(); } inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { MemoryBarrier(); *ptr = value; } inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { Atomic32 value = *ptr; MemoryBarrier(); return value; } inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { MemoryBarrier(); return *ptr; } // 64-bit version inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev_value; do { if (OSAtomicCompareAndSwap64(old_value, new_value, const_cast<Atomic64*>(ptr))) { return old_value; } prev_value = *ptr; } while (prev_value == old_value); return prev_value; } inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, Atomic64 new_value) { Atomic64 old_value; do { old_value = *ptr; } while (!OSAtomicCompareAndSwap64(old_value, new_value, const_cast<Atomic64*>(ptr))); return old_value; } inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr, Atomic64 new_value) { Atomic64 old_value; do { old_value = *ptr; } while (!OSAtomicCompareAndSwap64Barrier(old_value, new_value, const_cast<Atomic64*>(ptr))); return old_value; } inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr, Atomic64 new_value) { return Acquire_AtomicExchange(ptr, new_value); } inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev_value; do { if (OSAtomicCompareAndSwap64Barrier(old_value, new_value, const_cast<Atomic64*>(ptr))) { return old_value; } prev_value = *ptr; } while (prev_value == old_value); return prev_value; } inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, Atomic64 old_value, Atomic64 new_value) { // The lib kern interface does not distinguish between // Acquire and Release memory barriers; they are equivalent. return Acquire_CompareAndSwap(ptr, old_value, new_value); } #ifdef __LP64__ // 64-bit implementation on 64-bit platform inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; } inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { *ptr = value; MemoryBarrier(); } inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { MemoryBarrier(); *ptr = value; } inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; } inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { Atomic64 value = *ptr; MemoryBarrier(); return value; } inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { MemoryBarrier(); return *ptr; } #else // 64-bit implementation on 32-bit platform #if defined(__ppc__) inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { __asm__ __volatile__( "_NoBarrier_Store_not_supported_for_32_bit_ppc\n\t"); } inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { __asm__ __volatile__( "_NoBarrier_Load_not_supported_for_32_bit_ppc\n\t"); return 0; } #elif defined(__i386__) inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic "movq %%mm0, %0\n\t" // moves (ptr could be read-only) "emms\n\t" // Reset FP registers : "=m" (*ptr) : "m" (value) : // mark the FP stack and mmx registers as clobbered "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)", "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7"); } inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { Atomic64 value; __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic "movq %%mm0, %0\n\t" // moves (ptr could be read-only) "emms\n\t" // Reset FP registers : "=m" (value) : "m" (*ptr) : // mark the FP stack and mmx registers as clobbered "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)", "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7"); return value; } #endif inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { NoBarrier_Store(ptr, value); MemoryBarrier(); } inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { MemoryBarrier(); NoBarrier_Store(ptr, value); } inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { Atomic64 value = NoBarrier_Load(ptr); MemoryBarrier(); return value; } inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { MemoryBarrier(); return NoBarrier_Load(ptr); } #endif // __LP64__ } // namespace base::subtle } // namespace base #endif // BASE_ATOMICOPS_INTERNALS_MACOSX_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/atomicops-internals-x86.h
.h
13,991
392
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2006, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Sanjay Ghemawat */ // Implementation of atomic operations for x86. This file should not // be included directly. Clients should instead include // "base/atomicops.h". #ifndef BASE_ATOMICOPS_INTERNALS_X86_H_ #define BASE_ATOMICOPS_INTERNALS_X86_H_ #include "base/basictypes.h" typedef int32_t Atomic32; #define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic* // NOTE(vchen): x86 does not need to define AtomicWordCastType, because it // already matches Atomic32 or Atomic64, depending on the platform. // This struct is not part of the public API of this module; clients may not // use it. // Features of this x86. Values may not be correct before main() is run, // but are set conservatively. struct AtomicOps_x86CPUFeatureStruct { bool has_sse2; // Processor has SSE2. bool has_cmpxchg16b; // Processor supports cmpxchg16b instruction. }; ATTRIBUTE_VISIBILITY_HIDDEN extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") namespace base { namespace subtle { typedef int64_t Atomic64; // 32-bit low-level operations on any platform. inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev; __asm__ __volatile__("lock; cmpxchgl %1,%2" : "=a" (prev) : "q" (new_value), "m" (*ptr), "0" (old_value) : "memory"); return prev; } inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg. : "=r" (new_value) : "m" (*ptr), "0" (new_value) : "memory"); return new_value; // Now it's the previous value. } inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { Atomic32 old_val = NoBarrier_AtomicExchange(ptr, new_value); return old_val; } inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { // xchgl already has release memory barrier semantics. return NoBarrier_AtomicExchange(ptr, new_value); } inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); return x; } inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } #if defined(__x86_64__) // 64-bit implementations of memory barrier can be simpler, because it // "mfence" is guaranteed to exist. inline void MemoryBarrier() { __asm__ __volatile__("mfence" : : : "memory"); } inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; MemoryBarrier(); } #else inline void MemoryBarrier() { if (AtomicOps_Internalx86CPUFeatures.has_sse2) { __asm__ __volatile__("mfence" : : : "memory"); } else { // mfence is faster but not present on PIII Atomic32 x = 0; Acquire_AtomicExchange(&x, 0); } } inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { if (AtomicOps_Internalx86CPUFeatures.has_sse2) { *ptr = value; __asm__ __volatile__("mfence" : : : "memory"); } else { Acquire_AtomicExchange(ptr, value); } } #endif inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { ATOMICOPS_COMPILER_BARRIER(); *ptr = value; // An x86 store acts as a release barrier. // See comments in Atomic64 version of Release_Store(), below. } inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { Atomic32 value = *ptr; // An x86 load acts as a acquire barrier. // See comments in Atomic64 version of Release_Store(), below. ATOMICOPS_COMPILER_BARRIER(); return value; } inline Atomic32 Release_Load(volatile const Atomic32* ptr) { MemoryBarrier(); return *ptr; } #if defined(__x86_64__) // 64-bit low-level operations on 64-bit platform. inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev; __asm__ __volatile__("lock; cmpxchgq %1,%2" : "=a" (prev) : "q" (new_value), "m" (*ptr), "0" (old_value) : "memory"); return prev; } inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg. : "=r" (new_value) : "m" (*ptr), "0" (new_value) : "memory"); return new_value; // Now it's the previous value. } inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { Atomic64 old_val = NoBarrier_AtomicExchange(ptr, new_value); return old_val; } inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { // xchgq already has release memory barrier semantics. return NoBarrier_AtomicExchange(ptr, new_value); } inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; } inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; MemoryBarrier(); } inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { ATOMICOPS_COMPILER_BARRIER(); *ptr = value; // An x86 store acts as a release barrier // for current AMD/Intel chips as of Jan 2008. // See also Acquire_Load(), below. // When new chips come out, check: // IA-32 Intel Architecture Software Developer's Manual, Volume 3: // System Programming Guide, Chatper 7: Multiple-processor management, // Section 7.2, Memory Ordering. // Last seen at: // http://developer.intel.com/design/pentium4/manuals/index_new.htm // // x86 stores/loads fail to act as barriers for a few instructions (clflush // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are // not generated by the compiler, and are rare. Users of these instructions // need to know about cache behaviour in any case since all of these involve // either flushing cache lines or non-temporal cache hints. } inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; } inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { Atomic64 value = *ptr; // An x86 load acts as a acquire barrier, // for current AMD/Intel chips as of Jan 2008. // See also Release_Store(), above. ATOMICOPS_COMPILER_BARRIER(); return value; } inline Atomic64 Release_Load(volatile const Atomic64* ptr) { MemoryBarrier(); return *ptr; } #else // defined(__x86_64__) // 64-bit low-level operations on 32-bit platform. #if !((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) // For compilers older than gcc 4.1, we use inline asm. // // Potential pitfalls: // // 1. %ebx points to Global offset table (GOT) with -fPIC. // We need to preserve this register. // 2. When explicit registers are used in inline asm, the // compiler may not be aware of it and might try to reuse // the same register for another argument which has constraints // that allow it ("r" for example). inline Atomic64 __sync_val_compare_and_swap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev; __asm__ __volatile__("push %%ebx\n\t" "movl (%3), %%ebx\n\t" // Move 64-bit new_value into "movl 4(%3), %%ecx\n\t" // ecx:ebx "lock; cmpxchg8b (%1)\n\t"// If edx:eax (old_value) same "pop %%ebx\n\t" : "=A" (prev) // as contents of ptr: : "D" (ptr), // ecx:ebx => ptr "0" (old_value), // else: "S" (&new_value) // old *ptr => edx:eax : "memory", "%ecx"); return prev; } #endif // Compiler < gcc-4.1 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_val, Atomic64 new_val) { return __sync_val_compare_and_swap(ptr, old_val, new_val); } inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_val) { Atomic64 old_val; do { old_val = *ptr; } while (__sync_val_compare_and_swap(ptr, old_val, new_val) != old_val); return old_val; } inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_val) { Atomic64 old_val = NoBarrier_AtomicExchange(ptr, new_val); return old_val; } inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_val) { return NoBarrier_AtomicExchange(ptr, new_val); } inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic "movq %%mm0, %0\n\t" // moves (ptr could be read-only) "emms\n\t" // Empty mmx state/Reset FP regs : "=m" (*ptr) : "m" (value) : // mark the FP stack and mmx registers as clobbered "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)", "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7"); } inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { NoBarrier_Store(ptr, value); MemoryBarrier(); } inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { ATOMICOPS_COMPILER_BARRIER(); NoBarrier_Store(ptr, value); } inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { Atomic64 value; __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic "movq %%mm0, %0\n\t" // moves (ptr could be read-only) "emms\n\t" // Empty mmx state/Reset FP regs : "=m" (value) : "m" (*ptr) : // mark the FP stack and mmx registers as clobbered "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)", "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7"); return value; } inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { Atomic64 value = NoBarrier_Load(ptr); ATOMICOPS_COMPILER_BARRIER(); return value; } inline Atomic64 Release_Load(volatile const Atomic64* ptr) { MemoryBarrier(); return NoBarrier_Load(ptr); } #endif // defined(__x86_64__) inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); return x; } inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } } // namespace base::subtle } // namespace base #undef ATOMICOPS_COMPILER_BARRIER #endif // BASE_ATOMICOPS_INTERNALS_X86_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/atomicops-internals-arm-v6plus.h
.h
10,294
331
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2011, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // // Author: Sasha Levitskiy // based on atomicops-internals by Sanjay Ghemawat // // This file is an internal atomic implementation, use base/atomicops.h instead. // // This code implements ARM atomics for architectures V6 and newer. #ifndef BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_ #define BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_ #include <stdio.h> #include <stdlib.h> #include "base/basictypes.h" // For COMPILE_ASSERT // The LDREXD and STREXD instructions in ARM all v7 variants or above. In v6, // only some variants support it. For simplicity, we only use exclusive // 64-bit load/store in V7 or above. #if defined(ARMV7) # define BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD #endif typedef int32_t Atomic32; namespace base { namespace subtle { typedef int64_t Atomic64; // 32-bit low-level ops inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 oldval, res; do { __asm__ __volatile__( "ldrex %1, [%3]\n" "mov %0, #0\n" "teq %1, %4\n" // The following IT (if-then) instruction is needed for the subsequent // conditional instruction STREXEQ when compiling in THUMB mode. // In ARM mode, the compiler/assembler will not generate any code for it. "it eq\n" "strexeq %0, %5, [%3]\n" : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr) : "r" (ptr), "Ir" (old_value), "r" (new_value) : "cc"); } while (res); return oldval; } inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { Atomic32 tmp, old; __asm__ __volatile__( "1:\n" "ldrex %1, [%2]\n" "strex %0, %3, [%2]\n" "teq %0, #0\n" "bne 1b" : "=&r" (tmp), "=&r" (old) : "r" (ptr), "r" (new_value) : "cc", "memory"); return old; } inline void MemoryBarrier() { #if !defined(ARMV7) uint32_t dest = 0; __asm__ __volatile__("mcr p15,0,%0,c7,c10,5" :"=&r"(dest) : : "memory"); #else __asm__ __volatile__("dmb" : : : "memory"); #endif } inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { Atomic32 old_value = NoBarrier_AtomicExchange(ptr, new_value); MemoryBarrier(); return old_value; } inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { MemoryBarrier(); return NoBarrier_AtomicExchange(ptr, new_value); } inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value); MemoryBarrier(); return value; } inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { MemoryBarrier(); return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; MemoryBarrier(); } inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { MemoryBarrier(); *ptr = value; } inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { Atomic32 value = *ptr; MemoryBarrier(); return value; } inline Atomic32 Release_Load(volatile const Atomic32* ptr) { MemoryBarrier(); return *ptr; } // 64-bit versions are only available if LDREXD and STREXD instructions // are available. #ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD #define BASE_HAS_ATOMIC64 1 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 oldval, res; do { __asm__ __volatile__( "ldrexd %1, [%3]\n" "mov %0, #0\n" "teq %Q1, %Q4\n" // The following IT (if-then) instructions are needed for the subsequent // conditional instructions when compiling in THUMB mode. // In ARM mode, the compiler/assembler will not generate any code for it. "it eq\n" "teqeq %R1, %R4\n" "it eq\n" "strexdeq %0, %5, [%3]\n" : "=&r" (res), "=&r" (oldval), "+Q" (*ptr) : "r" (ptr), "Ir" (old_value), "r" (new_value) : "cc"); } while (res); return oldval; } inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { int store_failed; Atomic64 old; __asm__ __volatile__( "1:\n" "ldrexd %1, [%2]\n" "strexd %0, %3, [%2]\n" "teq %0, #0\n" "bne 1b" : "=&r" (store_failed), "=&r" (old) : "r" (ptr), "r" (new_value) : "cc", "memory"); return old; } inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { Atomic64 old_value = NoBarrier_AtomicExchange(ptr, new_value); MemoryBarrier(); return old_value; } inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { MemoryBarrier(); return NoBarrier_AtomicExchange(ptr, new_value); } inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { int store_failed; Atomic64 dummy; __asm__ __volatile__( "1:\n" // Dummy load to lock cache line. "ldrexd %1, [%3]\n" "strexd %0, %2, [%3]\n" "teq %0, #0\n" "bne 1b" : "=&r" (store_failed), "=&r"(dummy) : "r"(value), "r" (ptr) : "cc", "memory"); } inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { Atomic64 res; __asm__ __volatile__( "ldrexd %0, [%1]\n" "clrex\n" : "=r" (res) : "r"(ptr), "Q"(*ptr)); return res; } #else // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD inline void NotImplementedFatalError(const char *function_name) { fprintf(stderr, "64-bit %s() not implemented on this platform\n", function_name); abort(); } inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { NotImplementedFatalError("NoBarrier_CompareAndSwap"); return 0; } inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { NotImplementedFatalError("NoBarrier_AtomicExchange"); return 0; } inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { NotImplementedFatalError("Acquire_AtomicExchange"); return 0; } inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { NotImplementedFatalError("Release_AtomicExchange"); return 0; } inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { NotImplementedFatalError("NoBarrier_Store"); } inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { NotImplementedFatalError("NoBarrier_Load"); return 0; } #endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { NoBarrier_Store(ptr, value); MemoryBarrier(); } inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { MemoryBarrier(); NoBarrier_Store(ptr, value); } inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { Atomic64 value = NoBarrier_Load(ptr); MemoryBarrier(); return value; } inline Atomic64 Release_Load(volatile const Atomic64* ptr) { MemoryBarrier(); return NoBarrier_Load(ptr); } inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value); MemoryBarrier(); return value; } inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { MemoryBarrier(); return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } } // namespace subtle ends } // namespace base ends #endif // BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/atomicops.h
.h
16,995
400
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2006, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Sanjay Ghemawat */ // For atomic operations on statistics counters, see atomic_stats_counter.h. // For atomic operations on sequence numbers, see atomic_sequence_num.h. // For atomic operations on reference counts, see atomic_refcount.h. // Some fast atomic operations -- typically with machine-dependent // implementations. This file may need editing as Google code is // ported to different architectures. // The routines exported by this module are subtle. If you use them, even if // you get the code right, it will depend on careful reasoning about atomicity // and memory ordering; it will be less readable, and harder to maintain. If // you plan to use these routines, you should have a good reason, such as solid // evidence that performance would otherwise suffer, or there being no // alternative. You should assume only properties explicitly guaranteed by the // specifications in this file. You are almost certainly _not_ writing code // just for the x86; if you assume x86 semantics, x86 hardware bugs and // implementations on other archtectures will cause your code to break. If you // do not know what you are doing, avoid these routines, and use a Mutex. // // These following lower-level operations are typically useful only to people // implementing higher-level synchronization operations like spinlocks, // mutexes, and condition-variables. They combine CompareAndSwap(), a load, or // a store with appropriate memory-ordering instructions. "Acquire" operations // ensure that no later memory access can be reordered ahead of the operation. // "Release" operations ensure that no previous memory access can be reordered // after the operation. "Barrier" operations have both "Acquire" and "Release" // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory // access. // // It is incorrect to make direct assignments to/from an atomic variable. // You should use one of the Load or Store routines. The NoBarrier // versions are provided when no barriers are needed: // NoBarrier_Store() // NoBarrier_Load() // Although there are currently no compiler enforcement, you are encouraged // to use these. Moreover, if you choose to use base::subtle::Atomic64 type, // you MUST use one of the Load or Store routines to get correct behavior // on 32-bit platforms. // // The intent is eventually to put all of these routines in namespace // base::subtle #ifndef THREAD_ATOMICOPS_H_ #define THREAD_ATOMICOPS_H_ #include <config.h> #ifdef HAVE_STDINT_H #include <stdint.h> #endif // ------------------------------------------------------------------------ // Include the platform specific implementations of the types // and operations listed below. Implementations are to provide Atomic32 // and Atomic64 operations. If there is a mismatch between intptr_t and // the Atomic32 or Atomic64 types for a platform, the platform-specific header // should define the macro, AtomicWordCastType in a clause similar to the // following: // #if ...pointers are 64 bits... // # define AtomicWordCastType base::subtle::Atomic64 // #else // # define AtomicWordCastType Atomic32 // #endif // TODO(csilvers): figure out ARCH_PIII/ARCH_K8 (perhaps via ./configure?) // ------------------------------------------------------------------------ #include "base/arm_instruction_set_select.h" #define GCC_VERSION (__GNUC__ * 10000 \ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) #define CLANG_VERSION (__clang_major__ * 10000 \ + __clang_minor__ * 100 \ + __clang_patchlevel__) #if defined(TCMALLOC_PREFER_GCC_ATOMICS) && defined(__GNUC__) && GCC_VERSION >= 40700 #include "base/atomicops-internals-gcc.h" #elif defined(TCMALLOC_PREFER_GCC_ATOMICS) && defined(__clang__) && CLANG_VERSION >= 30400 #include "base/atomicops-internals-gcc.h" #elif defined(__MACH__) && defined(__APPLE__) #include "base/atomicops-internals-macosx.h" #elif defined(__GNUC__) && defined(ARMV6) #include "base/atomicops-internals-arm-v6plus.h" #elif defined(ARMV3) #include "base/atomicops-internals-arm-generic.h" #elif defined(__GNUC__) && (defined(__i386) || defined(__x86_64__)) #include "base/atomicops-internals-x86.h" #elif defined(_WIN32) #include "base/atomicops-internals-windows.h" #elif defined(__linux__) && defined(__PPC__) #include "base/atomicops-internals-linuxppc.h" #elif defined(__GNUC__) && defined(__mips__) #include "base/atomicops-internals-mips.h" #elif defined(__GNUC__) && GCC_VERSION >= 40700 #include "base/atomicops-internals-gcc.h" #elif defined(__clang__) && CLANG_VERSION >= 30400 #include "base/atomicops-internals-gcc.h" #else #error You need to implement atomic operations for this architecture #endif // Signed type that can hold a pointer and supports the atomic ops below, as // well as atomic loads and stores. Instances must be naturally-aligned. typedef intptr_t AtomicWord; #ifdef AtomicWordCastType // ------------------------------------------------------------------------ // This section is needed only when explicit type casting is required to // cast AtomicWord to one of the basic atomic types (Atomic64 or Atomic32). // It also serves to document the AtomicWord interface. // ------------------------------------------------------------------------ namespace base { namespace subtle { // Atomically execute: // result = *ptr; // if (*ptr == old_value) // *ptr = new_value; // return result; // // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". // Always return the old value of "*ptr" // // This routine implies no memory barriers. inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, AtomicWord old_value, AtomicWord new_value) { return NoBarrier_CompareAndSwap( reinterpret_cast<volatile AtomicWordCastType*>(ptr), old_value, new_value); } // Atomically store new_value into *ptr, returning the previous value held in // *ptr. This routine implies no memory barriers. inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, AtomicWord new_value) { return NoBarrier_AtomicExchange( reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); } inline AtomicWord Acquire_AtomicExchange(volatile AtomicWord* ptr, AtomicWord new_value) { return Acquire_AtomicExchange( reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); } inline AtomicWord Release_AtomicExchange(volatile AtomicWord* ptr, AtomicWord new_value) { return Release_AtomicExchange( reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); } inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, AtomicWord old_value, AtomicWord new_value) { return base::subtle::Acquire_CompareAndSwap( reinterpret_cast<volatile AtomicWordCastType*>(ptr), old_value, new_value); } inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, AtomicWord old_value, AtomicWord new_value) { return base::subtle::Release_CompareAndSwap( reinterpret_cast<volatile AtomicWordCastType*>(ptr), old_value, new_value); } inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) { NoBarrier_Store( reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); } inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { return base::subtle::Acquire_Store( reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); } inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { return base::subtle::Release_Store( reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); } inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) { return NoBarrier_Load( reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); } inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { return base::subtle::Acquire_Load( reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); } inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { return base::subtle::Release_Load( reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); } } // namespace base::subtle } // namespace base #endif // AtomicWordCastType // ------------------------------------------------------------------------ // Commented out type definitions and method declarations for documentation // of the interface provided by this module. // ------------------------------------------------------------------------ #if 0 // Signed 32-bit type that supports the atomic ops below, as well as atomic // loads and stores. Instances must be naturally aligned. This type differs // from AtomicWord in 64-bit binaries where AtomicWord is 64-bits. typedef int32_t Atomic32; // Corresponding operations on Atomic32 namespace base { namespace subtle { // Signed 64-bit type that supports the atomic ops below, as well as atomic // loads and stores. Instances must be naturally aligned. This type differs // from AtomicWord in 32-bit binaries where AtomicWord is 32-bits. typedef int64_t Atomic64; Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value); Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value); Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value); void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); void Acquire_Store(volatile Atomic32* ptr, Atomic32 value); void Release_Store(volatile Atomic32* ptr, Atomic32 value); Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); Atomic32 Acquire_Load(volatile const Atomic32* ptr); Atomic32 Release_Load(volatile const Atomic32* ptr); // Corresponding operations on Atomic64 Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value); Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value); Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value); void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value); void Acquire_Store(volatile Atomic64* ptr, Atomic64 value); void Release_Store(volatile Atomic64* ptr, Atomic64 value); Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); Atomic64 Acquire_Load(volatile const Atomic64* ptr); Atomic64 Release_Load(volatile const Atomic64* ptr); } // namespace base::subtle } // namespace base void MemoryBarrier(); #endif // 0 // ------------------------------------------------------------------------ // The following are to be deprecated when all uses have been changed to // use the base::subtle namespace. // ------------------------------------------------------------------------ #ifdef AtomicWordCastType // AtomicWord versions to be deprecated inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, AtomicWord old_value, AtomicWord new_value) { return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value); } inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, AtomicWord old_value, AtomicWord new_value) { return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value); } inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { return base::subtle::Acquire_Store(ptr, value); } inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { return base::subtle::Release_Store(ptr, value); } inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { return base::subtle::Acquire_Load(ptr); } inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { return base::subtle::Release_Load(ptr); } #endif // AtomicWordCastType // 32-bit Acquire/Release operations to be deprecated. inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value); } inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value); } inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { base::subtle::Acquire_Store(ptr, value); } inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { return base::subtle::Release_Store(ptr, value); } inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { return base::subtle::Acquire_Load(ptr); } inline Atomic32 Release_Load(volatile const Atomic32* ptr) { return base::subtle::Release_Load(ptr); } #ifdef BASE_HAS_ATOMIC64 // 64-bit Acquire/Release operations to be deprecated. inline base::subtle::Atomic64 Acquire_CompareAndSwap( volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) { return base::subtle::Acquire_CompareAndSwap(ptr, old_value, new_value); } inline base::subtle::Atomic64 Release_CompareAndSwap( volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) { return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value); } inline void Acquire_Store( volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) { base::subtle::Acquire_Store(ptr, value); } inline void Release_Store( volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) { return base::subtle::Release_Store(ptr, value); } inline base::subtle::Atomic64 Acquire_Load( volatile const base::subtle::Atomic64* ptr) { return base::subtle::Acquire_Load(ptr); } inline base::subtle::Atomic64 Release_Load( volatile const base::subtle::Atomic64* ptr) { return base::subtle::Release_Load(ptr); } #endif // BASE_HAS_ATOMIC64 #endif // THREAD_ATOMICOPS_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/spinlock_win32-inl.h
.h
2,068
55
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2009, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * This file is a Win32-specific part of spinlock_internal.cc */ #include <windows.h> namespace base { namespace internal { void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) { if (loop == 0) { } else if (loop == 1) { Sleep(0); } else { Sleep(base::internal::SuggestedDelayNS(loop) / 1000000); } } void SpinLockWake(volatile Atomic32 *w, bool all) { } } // namespace internal } // namespace base
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/spinlock_linux-inl.h
.h
3,663
103
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2009, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * This file is a Linux-specific part of spinlock_internal.cc */ #include <errno.h> #include <sched.h> #include <time.h> #include <limits.h> #include "base/linux_syscall_support.h" #define FUTEX_WAIT 0 #define FUTEX_WAKE 1 #define FUTEX_PRIVATE_FLAG 128 // Note: Instead of making direct system calls that are inlined, we rely // on the syscall() function in glibc to do the right thing. static bool have_futex; static int futex_private_flag = FUTEX_PRIVATE_FLAG; namespace { static struct InitModule { InitModule() { int x = 0; // futexes are ints, so we can use them only when // that's the same size as the lockword_ in SpinLock. have_futex = (sizeof(Atomic32) == sizeof(int) && syscall(__NR_futex, &x, FUTEX_WAKE, 1, NULL, NULL, 0) >= 0); if (have_futex && syscall(__NR_futex, &x, FUTEX_WAKE | futex_private_flag, 1, NULL, NULL, 0) < 0) { futex_private_flag = 0; } } } init_module; } // anonymous namespace namespace base { namespace internal { void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) { if (loop != 0) { int save_errno = errno; struct timespec tm; tm.tv_sec = 0; if (have_futex) { tm.tv_nsec = base::internal::SuggestedDelayNS(loop); } else { tm.tv_nsec = 2000001; // above 2ms so linux 2.4 doesn't spin } if (have_futex) { tm.tv_nsec *= 16; // increase the delay; we expect explicit wakeups syscall(__NR_futex, reinterpret_cast<int*>(const_cast<Atomic32*>(w)), FUTEX_WAIT | futex_private_flag, value, reinterpret_cast<struct kernel_timespec*>(&tm), NULL, 0); } else { nanosleep(&tm, NULL); } errno = save_errno; } } void SpinLockWake(volatile Atomic32 *w, bool all) { if (have_futex) { syscall(__NR_futex, reinterpret_cast<int*>(const_cast<Atomic32*>(w)), FUTEX_WAKE | futex_private_flag, all ? INT_MAX : 1, NULL, NULL, 0); } } } // namespace internal } // namespace base
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/thread_annotations.h
.h
5,982
135
// Copyright (c) 2008, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Le-Chun Wu // // This header file contains the macro definitions for thread safety // annotations that allow the developers to document the locking policies // of their multi-threaded code. The annotations can also help program // analysis tools to identify potential thread safety issues. // // The annotations are implemented using GCC's "attributes" extension. // Using the macros defined here instead of the raw GCC attributes allows // for portability and future compatibility. // // This functionality is not yet fully implemented in perftools, // but may be one day. #ifndef BASE_THREAD_ANNOTATIONS_H_ #define BASE_THREAD_ANNOTATIONS_H_ #if defined(__GNUC__) \ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) \ && defined(__SUPPORT_TS_ANNOTATION__) && (!defined(SWIG)) #define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) #else #define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op #endif // Document if a shared variable/field needs to be protected by a lock. // GUARDED_BY allows the user to specify a particular lock that should be // held when accessing the annotated variable, while GUARDED_VAR only // indicates a shared variable should be guarded (by any lock). GUARDED_VAR // is primarily used when the client cannot express the name of the lock. #define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) #define GUARDED_VAR THREAD_ANNOTATION_ATTRIBUTE__(guarded) // Document if the memory location pointed to by a pointer should be guarded // by a lock when dereferencing the pointer. Similar to GUARDED_VAR, // PT_GUARDED_VAR is primarily used when the client cannot express the name // of the lock. Note that a pointer variable to a shared memory location // could itself be a shared variable. For example, if a shared global pointer // q, which is guarded by mu1, points to a shared memory location that is // guarded by mu2, q should be annotated as follows: // int *q GUARDED_BY(mu1) PT_GUARDED_BY(mu2); #define PT_GUARDED_BY(x) \ THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded_by(x)) #define PT_GUARDED_VAR \ THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded) // Document the acquisition order between locks that can be held // simultaneously by a thread. For any two locks that need to be annotated // to establish an acquisition order, only one of them needs the annotation. // (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER // and ACQUIRED_BEFORE.) #define ACQUIRED_AFTER(x) \ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(x)) #define ACQUIRED_BEFORE(x) \ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(x)) // The following three annotations document the lock requirements for // functions/methods. // Document if a function expects certain locks to be held before it is called #define EXCLUSIVE_LOCKS_REQUIRED(x) \ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(x)) #define SHARED_LOCKS_REQUIRED(x) \ THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(x)) // Document the locks acquired in the body of the function. These locks // cannot be held when calling this function (as google3's Mutex locks are // non-reentrant). #define LOCKS_EXCLUDED(x) \ THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(x)) // Document the lock the annotated function returns without acquiring it. #define LOCK_RETURNED(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) // Document if a class/type is a lockable type (such as the Mutex class). #define LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(lockable) // Document if a class is a scoped lockable type (such as the MutexLock class). #define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) // The following annotations specify lock and unlock primitives. #define EXCLUSIVE_LOCK_FUNCTION(x) \ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock(x)) #define SHARED_LOCK_FUNCTION(x) \ THREAD_ANNOTATION_ATTRIBUTE__(shared_lock(x)) #define EXCLUSIVE_TRYLOCK_FUNCTION(x) \ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock(x)) #define SHARED_TRYLOCK_FUNCTION(x) \ THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock(x)) #define UNLOCK_FUNCTION(x) \ THREAD_ANNOTATION_ATTRIBUTE__(unlock(x)) // An escape hatch for thread safety analysis to ignore the annotated function. #define NO_THREAD_SAFETY_ANALYSIS \ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) #endif // BASE_THREAD_ANNOTATIONS_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/spinlock.h
.h
5,502
144
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2006, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Sanjay Ghemawat */ // SpinLock is async signal safe. // If used within a signal handler, all lock holders // should block the signal even outside the signal handler. #ifndef BASE_SPINLOCK_H_ #define BASE_SPINLOCK_H_ #include <config.h> #include "base/atomicops.h" #include "base/basictypes.h" #include "base/dynamic_annotations.h" #include "base/thread_annotations.h" class LOCKABLE SpinLock { public: SpinLock() : lockword_(kSpinLockFree) { } // Special constructor for use with static SpinLock objects. E.g., // // static SpinLock lock(base::LINKER_INITIALIZED); // // When intialized using this constructor, we depend on the fact // that the linker has already initialized the memory appropriately. // A SpinLock constructed like this can be freely used from global // initializers without worrying about the order in which global // initializers run. explicit SpinLock(base::LinkerInitialized /*x*/) { // Does nothing; lockword_ is already initialized } // Acquire this SpinLock. // TODO(csilvers): uncomment the annotation when we figure out how to // support this macro with 0 args (see thread_annotations.h) inline void Lock() /*EXCLUSIVE_LOCK_FUNCTION()*/ { if (base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree, kSpinLockHeld) != kSpinLockFree) { SlowLock(); } ANNOTATE_RWLOCK_ACQUIRED(this, 1); } // Try to acquire this SpinLock without blocking and return true if the // acquisition was successful. If the lock was not acquired, false is // returned. If this SpinLock is free at the time of the call, TryLock // will return true with high probability. inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) { bool res = (base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree, kSpinLockHeld) == kSpinLockFree); if (res) { ANNOTATE_RWLOCK_ACQUIRED(this, 1); } return res; } // Release this SpinLock, which must be held by the calling thread. // TODO(csilvers): uncomment the annotation when we figure out how to // support this macro with 0 args (see thread_annotations.h) inline void Unlock() /*UNLOCK_FUNCTION()*/ { ANNOTATE_RWLOCK_RELEASED(this, 1); uint64 prev_value = static_cast<uint64>( base::subtle::Release_AtomicExchange(&lockword_, kSpinLockFree)); if (prev_value != kSpinLockHeld) { // Speed the wakeup of any waiter. SlowUnlock(); } } // Determine if the lock is held. When the lock is held by the invoking // thread, true will always be returned. Intended to be used as // CHECK(lock.IsHeld()). inline bool IsHeld() const { return base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree; } static const base::LinkerInitialized LINKER_INITIALIZED; // backwards compat private: enum { kSpinLockFree = 0 }; enum { kSpinLockHeld = 1 }; enum { kSpinLockSleeper = 2 }; volatile Atomic32 lockword_; void SlowLock(); void SlowUnlock(); Atomic32 SpinLoop(); DISALLOW_COPY_AND_ASSIGN(SpinLock); }; // Corresponding locker object that arranges to acquire a spinlock for // the duration of a C++ scope. class SCOPED_LOCKABLE SpinLockHolder { private: SpinLock* lock_; public: inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l) : lock_(l) { l->Lock(); } // TODO(csilvers): uncomment the annotation when we figure out how to // support this macro with 0 args (see thread_annotations.h) inline ~SpinLockHolder() /*UNLOCK_FUNCTION()*/ { lock_->Unlock(); } }; // Catch bug where variable name is omitted, e.g. SpinLockHolder (&lock); #define SpinLockHolder(x) COMPILE_ASSERT(0, spin_lock_decl_missing_var_name) #endif // BASE_SPINLOCK_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/linuxthreads.cc
.cc
25,731
708
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2005-2007, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Markus Gutschke */ #include "base/linuxthreads.h" #ifdef THREADS #ifdef __cplusplus extern "C" { #endif #include <sched.h> #include <signal.h> #include <stdlib.h> #include <string.h> #include <fcntl.h> #include <sys/socket.h> #include <sys/wait.h> #include <sys/prctl.h> #include <semaphore.h> #include "base/linux_syscall_support.h" #include "base/thread_lister.h" #ifndef CLONE_UNTRACED #define CLONE_UNTRACED 0x00800000 #endif /* Synchronous signals that should not be blocked while in the lister thread. */ static const int sync_signals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGXCPU, SIGXFSZ }; /* itoa() is not a standard function, and we cannot safely call printf() * after suspending threads. So, we just implement our own copy. A * recursive approach is the easiest here. */ static char *local_itoa(char *buf, int i) { if (i < 0) { *buf++ = '-'; return local_itoa(buf, -i); } else { if (i >= 10) buf = local_itoa(buf, i/10); *buf++ = (i%10) + '0'; *buf = '\000'; return buf; } } /* Wrapper around clone() that runs "fn" on the same stack as the * caller! Unlike fork(), the cloned thread shares the same address space. * The caller must be careful to use only minimal amounts of stack until * the cloned thread has returned. * There is a good chance that the cloned thread and the caller will share * the same copy of errno! */ #ifdef __GNUC__ #if __GNUC__ == 3 && __GNUC_MINOR__ >= 1 || __GNUC__ > 3 /* Try to force this function into a separate stack frame, and make sure * that arguments are passed on the stack. */ static int local_clone (int (*fn)(void *), void *arg, ...) __attribute__ ((noinline)); #endif #endif /* To avoid the gap cross page boundaries, increase by the large parge * size mostly PowerPC system uses. */ #ifdef __PPC64__ #define CLONE_STACK_SIZE 65536 #else #define CLONE_STACK_SIZE 4096 #endif static int local_clone (int (*fn)(void *), void *arg, ...) { /* Leave 4kB of gap between the callers stack and the new clone. This * should be more than sufficient for the caller to call waitpid() until * the cloned thread terminates. * * It is important that we set the CLONE_UNTRACED flag, because newer * versions of "gdb" otherwise attempt to attach to our thread, and will * attempt to reap its status codes. This subsequently results in the * caller hanging indefinitely in waitpid(), waiting for a change in * status that will never happen. By setting the CLONE_UNTRACED flag, we * prevent "gdb" from stealing events, but we still expect the thread * lister to fail, because it cannot PTRACE_ATTACH to the process that * is being debugged. This is OK and the error code will be reported * correctly. */ return sys_clone(fn, (char *)&arg - CLONE_STACK_SIZE, CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_UNTRACED, arg, 0, 0, 0); } /* Local substitute for the atoi() function, which is not necessarily safe * to call once threads are suspended (depending on whether libc looks up * locale information, when executing atoi()). */ static int local_atoi(const char *s) { int n = 0; int neg = *s == '-'; if (neg) s++; while (*s >= '0' && *s <= '9') n = 10*n + (*s++ - '0'); return neg ? -n : n; } /* Re-runs fn until it doesn't cause EINTR */ #define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR) /* Wrap a class around system calls, in order to give us access to * a private copy of errno. This only works in C++, but it has the * advantage of not needing nested functions, which are a non-standard * language extension. */ #ifdef __cplusplus namespace { class SysCalls { public: #define SYS_CPLUSPLUS #define SYS_ERRNO my_errno #define SYS_INLINE inline #define SYS_PREFIX -1 #undef SYS_LINUX_SYSCALL_SUPPORT_H #include "linux_syscall_support.h" SysCalls() : my_errno(0) { } int my_errno; }; } #define ERRNO sys.my_errno #else #define ERRNO my_errno #endif /* Wrapper for open() which is guaranteed to never return EINTR. */ static int c_open(const char *fname, int flags, int mode) { ssize_t rc; NO_INTR(rc = sys_open(fname, flags, mode)); return rc; } /* abort() is not safely reentrant, and changes it's behavior each time * it is called. This means, if the main application ever called abort() * we cannot safely call it again. This would happen if we were called * from a SIGABRT signal handler in the main application. So, document * that calling SIGABRT from the thread lister makes it not signal safe * (and vice-versa). * Also, since we share address space with the main application, we * cannot call abort() from the callback and expect the main application * to behave correctly afterwards. In fact, the only thing we can do, is * to terminate the main application with extreme prejudice (aka * PTRACE_KILL). * We set up our own SIGABRT handler to do this. * In order to find the main application from the signal handler, we * need to store information about it in global variables. This is * safe, because the main application should be suspended at this * time. If the callback ever called TCMalloc_ResumeAllProcessThreads(), then * we are running a higher risk, though. So, try to avoid calling * abort() after calling TCMalloc_ResumeAllProcessThreads. */ static volatile int *sig_pids, sig_num_threads, sig_proc, sig_marker; /* Signal handler to help us recover from dying while we are attached to * other threads. */ static void SignalHandler(int signum, siginfo_t *si, void *data) { if (sig_pids != NULL) { if (signum == SIGABRT) { while (sig_num_threads-- > 0) { /* Not sure if sched_yield is really necessary here, but it does not */ /* hurt, and it might be necessary for the same reasons that we have */ /* to do so in sys_ptrace_detach(). */ sys_sched_yield(); sys_ptrace(PTRACE_KILL, sig_pids[sig_num_threads], 0, 0); } } else if (sig_num_threads > 0) { TCMalloc_ResumeAllProcessThreads(sig_num_threads, (int *)sig_pids); } } sig_pids = NULL; if (sig_marker >= 0) NO_INTR(sys_close(sig_marker)); sig_marker = -1; if (sig_proc >= 0) NO_INTR(sys_close(sig_proc)); sig_proc = -1; sys__exit(signum == SIGABRT ? 1 : 2); } /* Try to dirty the stack, and hope that the compiler is not smart enough * to optimize this function away. Or worse, the compiler could inline the * function and permanently allocate the data on the stack. */ static void DirtyStack(size_t amount) { char buf[amount]; memset(buf, 0, amount); sys_read(-1, buf, amount); } /* Data structure for passing arguments to the lister thread. */ #define ALT_STACKSIZE (MINSIGSTKSZ + 4096) struct ListerParams { int result, err; char *altstack_mem; ListAllProcessThreadsCallBack callback; void *parameter; va_list ap; sem_t *lock; }; static void ListerThread(struct ListerParams *args) { int found_parent = 0; pid_t clone_pid = sys_gettid(), ppid = sys_getppid(); char proc_self_task[80], marker_name[48], *marker_path; const char *proc_paths[3]; const char *const *proc_path = proc_paths; int proc = -1, marker = -1, num_threads = 0; int max_threads = 0, sig; struct kernel_stat marker_sb, proc_sb; stack_t altstack; /* Wait for parent thread to set appropriate permissions * to allow ptrace activity */ if (sem_wait(args->lock) < 0) { goto failure; } /* Create "marker" that we can use to detect threads sharing the same * address space and the same file handles. By setting the FD_CLOEXEC flag * we minimize the risk of misidentifying child processes as threads; * and since there is still a race condition, we will filter those out * later, anyway. */ if ((marker = sys_socket(PF_LOCAL, SOCK_DGRAM, 0)) < 0 || sys_fcntl(marker, F_SETFD, FD_CLOEXEC) < 0) { failure: args->result = -1; args->err = errno; if (marker >= 0) NO_INTR(sys_close(marker)); sig_marker = marker = -1; if (proc >= 0) NO_INTR(sys_close(proc)); sig_proc = proc = -1; sys__exit(1); } /* Compute search paths for finding thread directories in /proc */ local_itoa(strrchr(strcpy(proc_self_task, "/proc/"), '\000'), ppid); strcpy(marker_name, proc_self_task); marker_path = marker_name + strlen(marker_name); strcat(proc_self_task, "/task/"); proc_paths[0] = proc_self_task; /* /proc/$$/task/ */ proc_paths[1] = "/proc/"; /* /proc/ */ proc_paths[2] = NULL; /* Compute path for marker socket in /proc */ local_itoa(strcpy(marker_path, "/fd/") + 4, marker); if (sys_stat(marker_name, &marker_sb) < 0) { goto failure; } /* Catch signals on an alternate pre-allocated stack. This way, we can * safely execute the signal handler even if we ran out of memory. */ memset(&altstack, 0, sizeof(altstack)); altstack.ss_sp = args->altstack_mem; altstack.ss_flags = 0; altstack.ss_size = ALT_STACKSIZE; sys_sigaltstack(&altstack, (const stack_t *)NULL); /* Some kernels forget to wake up traced processes, when the * tracer dies. So, intercept synchronous signals and make sure * that we wake up our tracees before dying. It is the caller's * responsibility to ensure that asynchronous signals do not * interfere with this function. */ sig_marker = marker; sig_proc = -1; for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) { struct kernel_sigaction sa; memset(&sa, 0, sizeof(sa)); sa.sa_sigaction_ = SignalHandler; sys_sigfillset(&sa.sa_mask); sa.sa_flags = SA_ONSTACK|SA_SIGINFO|SA_RESETHAND; sys_sigaction(sync_signals[sig], &sa, (struct kernel_sigaction *)NULL); } /* Read process directories in /proc/... */ for (;;) { /* Some kernels know about threads, and hide them in "/proc" * (although they are still there, if you know the process * id). Threads are moved into a separate "task" directory. We * check there first, and then fall back on the older naming * convention if necessary. */ if ((sig_proc = proc = c_open(*proc_path, O_RDONLY|O_DIRECTORY, 0)) < 0) { if (*++proc_path != NULL) continue; goto failure; } if (sys_fstat(proc, &proc_sb) < 0) goto failure; /* Since we are suspending threads, we cannot call any libc * functions that might acquire locks. Most notably, we cannot * call malloc(). So, we have to allocate memory on the stack, * instead. Since we do not know how much memory we need, we * make a best guess. And if we guessed incorrectly we retry on * a second iteration (by jumping to "detach_threads"). * * Unless the number of threads is increasing very rapidly, we * should never need to do so, though, as our guestimate is very * conservative. */ if (max_threads < proc_sb.st_nlink + 100) max_threads = proc_sb.st_nlink + 100; /* scope */ { pid_t pids[max_threads]; int added_entries = 0; sig_num_threads = num_threads; sig_pids = pids; for (;;) { struct KERNEL_DIRENT *entry; char buf[4096]; ssize_t nbytes = GETDENTS(proc, (struct KERNEL_DIRENT *)buf, sizeof(buf)); if (nbytes < 0) goto failure; else if (nbytes == 0) { if (added_entries) { /* Need to keep iterating over "/proc" in multiple * passes until we no longer find any more threads. This * algorithm eventually completes, when all threads have * been suspended. */ added_entries = 0; sys_lseek(proc, 0, SEEK_SET); continue; } break; } for (entry = (struct KERNEL_DIRENT *)buf; entry < (struct KERNEL_DIRENT *)&buf[nbytes]; entry = (struct KERNEL_DIRENT *)((char *)entry+entry->d_reclen)) { if (entry->d_ino != 0) { const char *ptr = entry->d_name; pid_t pid; /* Some kernels hide threads by preceding the pid with a '.' */ if (*ptr == '.') ptr++; /* If the directory is not numeric, it cannot be a * process/thread */ if (*ptr < '0' || *ptr > '9') continue; pid = local_atoi(ptr); /* Attach (and suspend) all threads */ if (pid && pid != clone_pid) { struct kernel_stat tmp_sb; char fname[entry->d_reclen + 48]; strcat(strcat(strcpy(fname, "/proc/"), entry->d_name), marker_path); /* Check if the marker is identical to the one we created */ if (sys_stat(fname, &tmp_sb) >= 0 && marker_sb.st_ino == tmp_sb.st_ino) { long i, j; /* Found one of our threads, make sure it is no duplicate */ for (i = 0; i < num_threads; i++) { /* Linear search is slow, but should not matter much for * the typically small number of threads. */ if (pids[i] == pid) { /* Found a duplicate; most likely on second pass */ goto next_entry; } } /* Check whether data structure needs growing */ if (num_threads >= max_threads) { /* Back to square one, this time with more memory */ NO_INTR(sys_close(proc)); goto detach_threads; } /* Attaching to thread suspends it */ pids[num_threads++] = pid; sig_num_threads = num_threads; if (sys_ptrace(PTRACE_ATTACH, pid, (void *)0, (void *)0) < 0) { /* If operation failed, ignore thread. Maybe it * just died? There might also be a race * condition with a concurrent core dumper or * with a debugger. In that case, we will just * make a best effort, rather than failing * entirely. */ num_threads--; sig_num_threads = num_threads; goto next_entry; } while (sys_waitpid(pid, (int *)0, __WALL) < 0) { if (errno != EINTR) { sys_ptrace_detach(pid); num_threads--; sig_num_threads = num_threads; goto next_entry; } } if (sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i++ != j || sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i != j) { /* Address spaces are distinct, even though both * processes show the "marker". This is probably * a forked child process rather than a thread. */ sys_ptrace_detach(pid); num_threads--; sig_num_threads = num_threads; } else { found_parent |= pid == ppid; added_entries++; } } } } next_entry:; } } NO_INTR(sys_close(proc)); sig_proc = proc = -1; /* If we failed to find any threads, try looking somewhere else in * /proc. Maybe, threads are reported differently on this system. */ if (num_threads > 1 || !*++proc_path) { NO_INTR(sys_close(marker)); sig_marker = marker = -1; /* If we never found the parent process, something is very wrong. * Most likely, we are running in debugger. Any attempt to operate * on the threads would be very incomplete. Let's just report an * error to the caller. */ if (!found_parent) { TCMalloc_ResumeAllProcessThreads(num_threads, pids); sys__exit(3); } /* Now we are ready to call the callback, * which takes care of resuming the threads for us. */ args->result = args->callback(args->parameter, num_threads, pids, args->ap); args->err = errno; /* Callback should have resumed threads, but better safe than sorry */ if (TCMalloc_ResumeAllProcessThreads(num_threads, pids)) { /* Callback forgot to resume at least one thread, report error */ args->err = EINVAL; args->result = -1; } sys__exit(0); } detach_threads: /* Resume all threads prior to retrying the operation */ TCMalloc_ResumeAllProcessThreads(num_threads, pids); sig_pids = NULL; num_threads = 0; sig_num_threads = num_threads; max_threads += 100; } } } /* This function gets the list of all linux threads of the current process * passes them to the 'callback' along with the 'parameter' pointer; at the * call back call time all the threads are paused via * PTRACE_ATTACH. * The callback is executed from a separate thread which shares only the * address space, the filesystem, and the filehandles with the caller. Most * notably, it does not share the same pid and ppid; and if it terminates, * the rest of the application is still there. 'callback' is supposed to do * or arrange for TCMalloc_ResumeAllProcessThreads. This happens automatically, if * the thread raises a synchronous signal (e.g. SIGSEGV); asynchronous * signals are blocked. If the 'callback' decides to unblock them, it must * ensure that they cannot terminate the application, or that * TCMalloc_ResumeAllProcessThreads will get called. * It is an error for the 'callback' to make any library calls that could * acquire locks. Most notably, this means that most system calls have to * avoid going through libc. Also, this means that it is not legal to call * exit() or abort(). * We return -1 on error and the return value of 'callback' on success. */ int TCMalloc_ListAllProcessThreads(void *parameter, ListAllProcessThreadsCallBack callback, ...) { char altstack_mem[ALT_STACKSIZE]; struct ListerParams args; pid_t clone_pid; int dumpable = 1, sig; struct kernel_sigset_t sig_blocked, sig_old; sem_t lock; va_start(args.ap, callback); /* If we are short on virtual memory, initializing the alternate stack * might trigger a SIGSEGV. Let's do this early, before it could get us * into more trouble (i.e. before signal handlers try to use the alternate * stack, and before we attach to other threads). */ memset(altstack_mem, 0, sizeof(altstack_mem)); /* Some of our cleanup functions could conceivable use more stack space. * Try to touch the stack right now. This could be defeated by the compiler * being too smart for it's own good, so try really hard. */ DirtyStack(32768); /* Make this process "dumpable". This is necessary in order to ptrace() * after having called setuid(). */ dumpable = sys_prctl(PR_GET_DUMPABLE, 0); if (!dumpable) sys_prctl(PR_SET_DUMPABLE, 1); /* Fill in argument block for dumper thread */ args.result = -1; args.err = 0; args.altstack_mem = altstack_mem; args.parameter = parameter; args.callback = callback; args.lock = &lock; /* Before cloning the thread lister, block all asynchronous signals, as we */ /* are not prepared to handle them. */ sys_sigfillset(&sig_blocked); for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) { sys_sigdelset(&sig_blocked, sync_signals[sig]); } if (sys_sigprocmask(SIG_BLOCK, &sig_blocked, &sig_old)) { args.err = errno; args.result = -1; goto failed; } /* scope */ { /* After cloning, both the parent and the child share the same instance * of errno. We must make sure that at least one of these processes * (in our case, the parent) uses modified syscall macros that update * a local copy of errno, instead. */ #ifdef __cplusplus #define sys0_sigprocmask sys.sigprocmask #define sys0_waitpid sys.waitpid SysCalls sys; #else int my_errno; #define SYS_ERRNO my_errno #define SYS_INLINE inline #define SYS_PREFIX 0 #undef SYS_LINUX_SYSCALL_SUPPORT_H #include "linux_syscall_support.h" #endif /* Lock before clone so that parent can set * ptrace permissions (if necessary) prior * to ListerThread actually executing */ if (sem_init(&lock, 0, 0) == 0) { int clone_errno; clone_pid = local_clone((int (*)(void *))ListerThread, &args); clone_errno = errno; sys_sigprocmask(SIG_SETMASK, &sig_old, &sig_old); if (clone_pid >= 0) { #ifdef PR_SET_PTRACER /* In newer versions of glibc permission must explicitly * be given to allow for ptrace. */ prctl(PR_SET_PTRACER, clone_pid, 0, 0, 0); #endif /* Releasing the lock here allows the * ListerThread to execute and ptrace us. */ sem_post(&lock); int status, rc; while ((rc = sys0_waitpid(clone_pid, &status, __WALL)) < 0 && ERRNO == EINTR) { /* Keep waiting */ } if (rc < 0) { args.err = ERRNO; args.result = -1; } else if (WIFEXITED(status)) { switch (WEXITSTATUS(status)) { case 0: break; /* Normal process termination */ case 2: args.err = EFAULT; /* Some fault (e.g. SIGSEGV) detected */ args.result = -1; break; case 3: args.err = EPERM; /* Process is already being traced */ args.result = -1; break; default:args.err = ECHILD; /* Child died unexpectedly */ args.result = -1; break; } } else if (!WIFEXITED(status)) { args.err = EFAULT; /* Terminated due to an unhandled signal*/ args.result = -1; } sem_destroy(&lock); } else { args.result = -1; args.err = clone_errno; } } else { args.result = -1; args.err = errno; } } /* Restore the "dumpable" state of the process */ failed: if (!dumpable) sys_prctl(PR_SET_DUMPABLE, dumpable); va_end(args.ap); errno = args.err; return args.result; } /* This function resumes the list of all linux threads that * TCMalloc_ListAllProcessThreads pauses before giving to its callback. * The function returns non-zero if at least one thread was * suspended and has now been resumed. */ int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids) { int detached_at_least_one = 0; while (num_threads-- > 0) { detached_at_least_one |= sys_ptrace_detach(thread_pids[num_threads]) >= 0; } return detached_at_least_one; } #ifdef __cplusplus } #endif #endif
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/logging.h
.h
11,678
260
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // This file contains #include information about logging-related stuff. // Pretty much everybody needs to #include this file so that they can // log various happenings. // #ifndef _LOGGING_H_ #define _LOGGING_H_ #include <config.h> #include <stdarg.h> #include <stdlib.h> #include <stdio.h> #ifdef HAVE_UNISTD_H #include <unistd.h> // for write() #endif #include <string.h> // for strlen(), strcmp() #include <assert.h> #include <errno.h> // for errno #include "base/commandlineflags.h" // On some systems (like freebsd), we can't call write() at all in a // global constructor, perhaps because errno hasn't been set up. // (In windows, we can't call it because it might call malloc.) // Calling the write syscall is safer (it doesn't set errno), so we // prefer that. Note we don't care about errno for logging: we just // do logging on a best-effort basis. #if defined(_MSC_VER) #define WRITE_TO_STDERR(buf, len) WriteToStderr(buf, len); // in port.cc #elif defined(HAVE_SYS_SYSCALL_H) #include <sys/syscall.h> #define WRITE_TO_STDERR(buf, len) syscall(SYS_write, STDERR_FILENO, buf, len) #else #define WRITE_TO_STDERR(buf, len) write(STDERR_FILENO, buf, len) #endif // MSVC and mingw define their own, safe version of vnsprintf (the // windows one in broken) in port.cc. Everyone else can use the // version here. We had to give it a unique name for windows. #ifndef _WIN32 # define perftools_vsnprintf vsnprintf #endif // We log all messages at this log-level and below. // INFO == -1, WARNING == -2, ERROR == -3, FATAL == -4 DECLARE_int32(verbose); // CHECK dies with a fatal error if condition is not true. It is *not* // controlled by NDEBUG, so the check will be executed regardless of // compilation mode. Therefore, it is safe to do things like: // CHECK(fp->Write(x) == 4) // Note we use write instead of printf/puts to avoid the risk we'll // call malloc(). #define CHECK(condition) \ do { \ if (!(condition)) { \ WRITE_TO_STDERR("Check failed: " #condition "\n", \ sizeof("Check failed: " #condition "\n")-1); \ abort(); \ } \ } while (0) // This takes a message to print. The name is historical. #define RAW_CHECK(condition, message) \ do { \ if (!(condition)) { \ WRITE_TO_STDERR("Check failed: " #condition ": " message "\n", \ sizeof("Check failed: " #condition ": " message "\n")-1);\ abort(); \ } \ } while (0) // This is like RAW_CHECK, but only in debug-mode #ifdef NDEBUG enum { DEBUG_MODE = 0 }; #define RAW_DCHECK(condition, message) #else enum { DEBUG_MODE = 1 }; #define RAW_DCHECK(condition, message) RAW_CHECK(condition, message) #endif // This prints errno as well. Note we use write instead of printf/puts to // avoid the risk we'll call malloc(). #define PCHECK(condition) \ do { \ if (!(condition)) { \ const int err_no = errno; \ WRITE_TO_STDERR("Check failed: " #condition ": ", \ sizeof("Check failed: " #condition ": ")-1); \ WRITE_TO_STDERR(strerror(err_no), strlen(strerror(err_no))); \ WRITE_TO_STDERR("\n", sizeof("\n")-1); \ abort(); \ } \ } while (0) // Helper macro for binary operators; prints the two values on error // Don't use this macro directly in your code, use CHECK_EQ et al below // WARNING: These don't compile correctly if one of the arguments is a pointer // and the other is NULL. To work around this, simply static_cast NULL to the // type of the desired pointer. // TODO(jandrews): Also print the values in case of failure. Requires some // sort of type-sensitive ToString() function. #define CHECK_OP(op, val1, val2) \ do { \ if (!((val1) op (val2))) { \ fprintf(stderr, "Check failed: %s %s %s\n", #val1, #op, #val2); \ abort(); \ } \ } while (0) #define CHECK_EQ(val1, val2) CHECK_OP(==, val1, val2) #define CHECK_NE(val1, val2) CHECK_OP(!=, val1, val2) #define CHECK_LE(val1, val2) CHECK_OP(<=, val1, val2) #define CHECK_LT(val1, val2) CHECK_OP(< , val1, val2) #define CHECK_GE(val1, val2) CHECK_OP(>=, val1, val2) #define CHECK_GT(val1, val2) CHECK_OP(> , val1, val2) // Synonyms for CHECK_* that are used in some unittests. #define EXPECT_EQ(val1, val2) CHECK_EQ(val1, val2) #define EXPECT_NE(val1, val2) CHECK_NE(val1, val2) #define EXPECT_LE(val1, val2) CHECK_LE(val1, val2) #define EXPECT_LT(val1, val2) CHECK_LT(val1, val2) #define EXPECT_GE(val1, val2) CHECK_GE(val1, val2) #define EXPECT_GT(val1, val2) CHECK_GT(val1, val2) #define ASSERT_EQ(val1, val2) EXPECT_EQ(val1, val2) #define ASSERT_NE(val1, val2) EXPECT_NE(val1, val2) #define ASSERT_LE(val1, val2) EXPECT_LE(val1, val2) #define ASSERT_LT(val1, val2) EXPECT_LT(val1, val2) #define ASSERT_GE(val1, val2) EXPECT_GE(val1, val2) #define ASSERT_GT(val1, val2) EXPECT_GT(val1, val2) // As are these variants. #define EXPECT_TRUE(cond) CHECK(cond) #define EXPECT_FALSE(cond) CHECK(!(cond)) #define EXPECT_STREQ(a, b) CHECK(strcmp(a, b) == 0) #define ASSERT_TRUE(cond) EXPECT_TRUE(cond) #define ASSERT_FALSE(cond) EXPECT_FALSE(cond) #define ASSERT_STREQ(a, b) EXPECT_STREQ(a, b) // Used for (libc) functions that return -1 and set errno #define CHECK_ERR(invocation) PCHECK((invocation) != -1) // A few more checks that only happen in debug mode #ifdef NDEBUG #define DCHECK_EQ(val1, val2) #define DCHECK_NE(val1, val2) #define DCHECK_LE(val1, val2) #define DCHECK_LT(val1, val2) #define DCHECK_GE(val1, val2) #define DCHECK_GT(val1, val2) #else #define DCHECK_EQ(val1, val2) CHECK_EQ(val1, val2) #define DCHECK_NE(val1, val2) CHECK_NE(val1, val2) #define DCHECK_LE(val1, val2) CHECK_LE(val1, val2) #define DCHECK_LT(val1, val2) CHECK_LT(val1, val2) #define DCHECK_GE(val1, val2) CHECK_GE(val1, val2) #define DCHECK_GT(val1, val2) CHECK_GT(val1, val2) #endif #ifdef ERROR #undef ERROR // may conflict with ERROR macro on windows #endif enum LogSeverity {INFO = -1, WARNING = -2, ERROR = -3, FATAL = -4}; // NOTE: we add a newline to the end of the output if it's not there already inline void LogPrintf(int severity, const char* pat, va_list ap) { // We write directly to the stderr file descriptor and avoid FILE // buffering because that may invoke malloc() char buf[600]; perftools_vsnprintf(buf, sizeof(buf)-1, pat, ap); if (buf[0] != '\0' && buf[strlen(buf)-1] != '\n') { assert(strlen(buf)+1 < sizeof(buf)); strcat(buf, "\n"); } WRITE_TO_STDERR(buf, strlen(buf)); if ((severity) == FATAL) abort(); // LOG(FATAL) indicates a big problem, so don't run atexit() calls } // Note that since the order of global constructors is unspecified, // global code that calls RAW_LOG may execute before FLAGS_verbose is set. // Such code will run with verbosity == 0 no matter what. #define VLOG_IS_ON(severity) (FLAGS_verbose >= severity) // In a better world, we'd use __VA_ARGS__, but VC++ 7 doesn't support it. #define LOG_PRINTF(severity, pat) do { \ if (VLOG_IS_ON(severity)) { \ va_list ap; \ va_start(ap, pat); \ LogPrintf(severity, pat, ap); \ va_end(ap); \ } \ } while (0) // RAW_LOG is the main function; some synonyms are used in unittests. inline void RAW_LOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); } inline void RAW_VLOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); } inline void LOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); } inline void VLOG(int lvl, const char* pat, ...) { LOG_PRINTF(lvl, pat); } inline void LOG_IF(int lvl, bool cond, const char* pat, ...) { if (cond) LOG_PRINTF(lvl, pat); } // This isn't technically logging, but it's also IO and also is an // attempt to be "raw" -- that is, to not use any higher-level libc // routines that might allocate memory or (ideally) try to allocate // locks. We use an opaque file handle (not necessarily an int) // to allow even more low-level stuff in the future. // Like other "raw" routines, these functions are best effort, and // thus don't return error codes (except RawOpenForWriting()). #if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) #ifndef NOMINMAX #define NOMINMAX // @#!$& windows #endif #include <windows.h> typedef HANDLE RawFD; const RawFD kIllegalRawFD = INVALID_HANDLE_VALUE; #else typedef int RawFD; const RawFD kIllegalRawFD = -1; // what open returns if it fails #endif // defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) RawFD RawOpenForWriting(const char* filename); // uses default permissions void RawWrite(RawFD fd, const char* buf, size_t len); void RawClose(RawFD fd); #endif // _LOGGING_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/atomicops-internals-linuxppc.h
.h
13,891
438
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2008, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- */ // Implementation of atomic operations for ppc-linux. This file should not // be included directly. Clients should instead include // "base/atomicops.h". #ifndef BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_ #define BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_ typedef int32_t Atomic32; #ifdef __PPC64__ #define BASE_HAS_ATOMIC64 1 #endif namespace base { namespace subtle { static inline void _sync(void) { __asm__ __volatile__("sync": : : "memory"); } static inline void _lwsync(void) { // gcc defines __NO_LWSYNC__ when appropriate; see // http://gcc.gnu.org/ml/gcc-patches/2006-11/msg01238.html #ifdef __NO_LWSYNC__ __asm__ __volatile__("msync": : : "memory"); #else __asm__ __volatile__("lwsync": : : "memory"); #endif } static inline void _isync(void) { __asm__ __volatile__("isync": : : "memory"); } static inline Atomic32 OSAtomicAdd32(Atomic32 amount, Atomic32 *value) { Atomic32 t; __asm__ __volatile__( "1: lwarx %0,0,%3\n\ add %0,%2,%0\n\ stwcx. %0,0,%3 \n\ bne- 1b" : "=&r" (t), "+m" (*value) : "r" (amount), "r" (value) : "cc"); return t; } static inline Atomic32 OSAtomicAdd32Barrier(Atomic32 amount, Atomic32 *value) { Atomic32 t; _lwsync(); t = OSAtomicAdd32(amount, value); // This is based on the code snippet in the architecture manual (Vol // 2, Appendix B). It's a little tricky: correctness depends on the // fact that the code right before this (in OSAtomicAdd32) has a // conditional branch with a data dependency on the update. // Otherwise, we'd have to use sync. _isync(); return t; } static inline bool OSAtomicCompareAndSwap32(Atomic32 old_value, Atomic32 new_value, Atomic32 *value) { Atomic32 prev; __asm__ __volatile__( "1: lwarx %0,0,%2\n\ cmpw 0,%0,%3\n\ bne- 2f\n\ stwcx. %4,0,%2\n\ bne- 1b\n\ 2:" : "=&r" (prev), "+m" (*value) : "r" (value), "r" (old_value), "r" (new_value) : "cc"); return prev == old_value; } static inline Atomic32 OSAtomicCompareAndSwap32Acquire(Atomic32 old_value, Atomic32 new_value, Atomic32 *value) { Atomic32 t; t = OSAtomicCompareAndSwap32(old_value, new_value, value); // This is based on the code snippet in the architecture manual (Vol // 2, Appendix B). It's a little tricky: correctness depends on the // fact that the code right before this (in // OSAtomicCompareAndSwap32) has a conditional branch with a data // dependency on the update. Otherwise, we'd have to use sync. _isync(); return t; } static inline Atomic32 OSAtomicCompareAndSwap32Release(Atomic32 old_value, Atomic32 new_value, Atomic32 *value) { _lwsync(); return OSAtomicCompareAndSwap32(old_value, new_value, value); } typedef int64_t Atomic64; inline void MemoryBarrier() { // This can't be _lwsync(); we need to order the immediately // preceding stores against any load that may follow, but lwsync // doesn't guarantee that. _sync(); } // 32-bit Versions. inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev_value; do { if (OSAtomicCompareAndSwap32(old_value, new_value, const_cast<Atomic32*>(ptr))) { return old_value; } prev_value = *ptr; } while (prev_value == old_value); return prev_value; } inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value) { Atomic32 old_value; do { old_value = *ptr; } while (!OSAtomicCompareAndSwap32(old_value, new_value, const_cast<Atomic32*>(ptr))); return old_value; } inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value) { Atomic32 old_value; do { old_value = *ptr; } while (!OSAtomicCompareAndSwap32Acquire(old_value, new_value, const_cast<Atomic32*>(ptr))); return old_value; } inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr, Atomic32 new_value) { Atomic32 old_value; do { old_value = *ptr; } while (!OSAtomicCompareAndSwap32Release(old_value, new_value, const_cast<Atomic32*>(ptr))); return old_value; } inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev_value; do { if (OSAtomicCompareAndSwap32Acquire(old_value, new_value, const_cast<Atomic32*>(ptr))) { return old_value; } prev_value = *ptr; } while (prev_value == old_value); return prev_value; } inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev_value; do { if (OSAtomicCompareAndSwap32Release(old_value, new_value, const_cast<Atomic32*>(ptr))) { return old_value; } prev_value = *ptr; } while (prev_value == old_value); return prev_value; } #ifdef __PPC64__ // 64-bit Versions. static inline Atomic64 OSAtomicAdd64(Atomic64 amount, Atomic64 *value) { Atomic64 t; __asm__ __volatile__( "1: ldarx %0,0,%3\n\ add %0,%2,%0\n\ stdcx. %0,0,%3 \n\ bne- 1b" : "=&r" (t), "+m" (*value) : "r" (amount), "r" (value) : "cc"); return t; } static inline Atomic64 OSAtomicAdd64Barrier(Atomic64 amount, Atomic64 *value) { Atomic64 t; _lwsync(); t = OSAtomicAdd64(amount, value); // This is based on the code snippet in the architecture manual (Vol // 2, Appendix B). It's a little tricky: correctness depends on the // fact that the code right before this (in OSAtomicAdd64) has a // conditional branch with a data dependency on the update. // Otherwise, we'd have to use sync. _isync(); return t; } static inline bool OSAtomicCompareAndSwap64(Atomic64 old_value, Atomic64 new_value, Atomic64 *value) { Atomic64 prev; __asm__ __volatile__( "1: ldarx %0,0,%2\n\ cmpd 0,%0,%3\n\ bne- 2f\n\ stdcx. %4,0,%2\n\ bne- 1b\n\ 2:" : "=&r" (prev), "+m" (*value) : "r" (value), "r" (old_value), "r" (new_value) : "cc"); return prev == old_value; } static inline Atomic64 OSAtomicCompareAndSwap64Acquire(Atomic64 old_value, Atomic64 new_value, Atomic64 *value) { Atomic64 t; t = OSAtomicCompareAndSwap64(old_value, new_value, value); // This is based on the code snippet in the architecture manual (Vol // 2, Appendix B). It's a little tricky: correctness depends on the // fact that the code right before this (in // OSAtomicCompareAndSwap64) has a conditional branch with a data // dependency on the update. Otherwise, we'd have to use sync. _isync(); return t; } static inline Atomic64 OSAtomicCompareAndSwap64Release(Atomic64 old_value, Atomic64 new_value, Atomic64 *value) { _lwsync(); return OSAtomicCompareAndSwap64(old_value, new_value, value); } inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev_value; do { if (OSAtomicCompareAndSwap64(old_value, new_value, const_cast<Atomic64*>(ptr))) { return old_value; } prev_value = *ptr; } while (prev_value == old_value); return prev_value; } inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, Atomic64 new_value) { Atomic64 old_value; do { old_value = *ptr; } while (!OSAtomicCompareAndSwap64(old_value, new_value, const_cast<Atomic64*>(ptr))); return old_value; } inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr, Atomic64 new_value) { Atomic64 old_value; do { old_value = *ptr; } while (!OSAtomicCompareAndSwap64Acquire(old_value, new_value, const_cast<Atomic64*>(ptr))); return old_value; } inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr, Atomic64 new_value) { Atomic64 old_value; do { old_value = *ptr; } while (!OSAtomicCompareAndSwap64Release(old_value, new_value, const_cast<Atomic64*>(ptr))); return old_value; } inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev_value; do { if (OSAtomicCompareAndSwap64Acquire(old_value, new_value, const_cast<Atomic64*>(ptr))) { return old_value; } prev_value = *ptr; } while (prev_value == old_value); return prev_value; } inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev_value; do { if (OSAtomicCompareAndSwap64Release(old_value, new_value, const_cast<Atomic64*>(ptr))) { return old_value; } prev_value = *ptr; } while (prev_value == old_value); return prev_value; } #endif inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) { *ptr = value; } inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { *ptr = value; // This can't be _lwsync(); we need to order the immediately // preceding stores against any load that may follow, but lwsync // doesn't guarantee that. _sync(); } inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { _lwsync(); *ptr = value; } inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) { return *ptr; } inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { Atomic32 value = *ptr; _lwsync(); return value; } inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { // This can't be _lwsync(); we need to order the immediately // preceding stores against any load that may follow, but lwsync // doesn't guarantee that. _sync(); return *ptr; } #ifdef __PPC64__ // 64-bit Versions. inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) { *ptr = value; } inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { *ptr = value; // This can't be _lwsync(); we need to order the immediately // preceding stores against any load that may follow, but lwsync // doesn't guarantee that. _sync(); } inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { _lwsync(); *ptr = value; } inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) { return *ptr; } inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { Atomic64 value = *ptr; _lwsync(); return value; } inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { // This can't be _lwsync(); we need to order the immediately // preceding stores against any load that may follow, but lwsync // doesn't guarantee that. _sync(); return *ptr; } #endif } // namespace base::subtle } // namespace base #endif // BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/atomicops-internals-arm-generic.h
.h
7,910
229
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2003, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // // Author: Lei Zhang, Sasha Levitskiy // // This file is an internal atomic implementation, use base/atomicops.h instead. // // LinuxKernelCmpxchg is from Google Gears. #ifndef BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_ #define BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_ #include <stdio.h> #include <stdlib.h> #include "base/basictypes.h" typedef int32_t Atomic32; namespace base { namespace subtle { typedef int64_t Atomic64; // 0xffff0fc0 is the hard coded address of a function provided by // the kernel which implements an atomic compare-exchange. On older // ARM architecture revisions (pre-v6) this may be implemented using // a syscall. This address is stable, and in active use (hard coded) // by at least glibc-2.7 and the Android C library. // pLinuxKernelCmpxchg has both acquire and release barrier sematincs. typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, Atomic32 new_value, volatile Atomic32* ptr); LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg ATTRIBUTE_WEAK = (LinuxKernelCmpxchgFunc) 0xffff0fc0; typedef void (*LinuxKernelMemoryBarrierFunc)(void); LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier ATTRIBUTE_WEAK = (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev_value = *ptr; do { if (!pLinuxKernelCmpxchg(old_value, new_value, const_cast<Atomic32*>(ptr))) { return old_value; } prev_value = *ptr; } while (prev_value == old_value); return prev_value; } inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { Atomic32 old_value; do { old_value = *ptr; } while (pLinuxKernelCmpxchg(old_value, new_value, const_cast<Atomic32*>(ptr))); return old_value; } inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { // pLinuxKernelCmpxchg already has acquire and release barrier semantics. return NoBarrier_AtomicExchange(ptr, new_value); } inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { // pLinuxKernelCmpxchg already has acquire and release barrier semantics. return NoBarrier_AtomicExchange(ptr, new_value); } inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } inline void MemoryBarrier() { pLinuxKernelMemoryBarrier(); } inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; MemoryBarrier(); } inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { MemoryBarrier(); *ptr = value; } inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { Atomic32 value = *ptr; MemoryBarrier(); return value; } inline Atomic32 Release_Load(volatile const Atomic32* ptr) { MemoryBarrier(); return *ptr; } // 64-bit versions are not implemented yet. inline void NotImplementedFatalError(const char *function_name) { fprintf(stderr, "64-bit %s() not implemented on this platform\n", function_name); abort(); } inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { NotImplementedFatalError("NoBarrier_CompareAndSwap"); return 0; } inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { NotImplementedFatalError("NoBarrier_AtomicExchange"); return 0; } inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { // pLinuxKernelCmpxchg already has acquire and release barrier semantics. return NoBarrier_AtomicExchange(ptr, new_value); } inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { // pLinuxKernelCmpxchg already has acquire and release barrier semantics. return NoBarrier_AtomicExchange(ptr, new_value); } inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { NotImplementedFatalError("NoBarrier_Store"); } inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { NotImplementedFatalError("Acquire_Store64"); } inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { NotImplementedFatalError("Release_Store"); } inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { NotImplementedFatalError("NoBarrier_Load"); return 0; } inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { NotImplementedFatalError("Atomic64 Acquire_Load"); return 0; } inline Atomic64 Release_Load(volatile const Atomic64* ptr) { NotImplementedFatalError("Atomic64 Release_Load"); return 0; } inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { NotImplementedFatalError("Atomic64 Acquire_CompareAndSwap"); return 0; } inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { NotImplementedFatalError("Atomic64 Release_CompareAndSwap"); return 0; } } // namespace base::subtle } // namespace base #endif // BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/low_level_alloc.h
.h
5,115
121
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2006, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #if !defined(_BASE_LOW_LEVEL_ALLOC_H_) #define _BASE_LOW_LEVEL_ALLOC_H_ // A simple thread-safe memory allocator that does not depend on // mutexes or thread-specific data. It is intended to be used // sparingly, and only when malloc() would introduce an unwanted // dependency, such as inside the heap-checker. #include <config.h> #include <stddef.h> // for size_t #include "base/basictypes.h" class LowLevelAlloc { public: class PagesAllocator { public: virtual ~PagesAllocator(); virtual void *MapPages(int32 flags, size_t size) = 0; virtual void UnMapPages(int32 flags, void *addr, size_t size) = 0; }; static PagesAllocator *GetDefaultPagesAllocator(void); struct Arena; // an arena from which memory may be allocated // Returns a pointer to a block of at least "request" bytes // that have been newly allocated from the specific arena. // for Alloc() call the DefaultArena() is used. // Returns 0 if passed request==0. // Does not return 0 under other circumstances; it crashes if memory // is not available. static void *Alloc(size_t request) ATTRIBUTE_SECTION(malloc_hook); static void *AllocWithArena(size_t request, Arena *arena) ATTRIBUTE_SECTION(malloc_hook); // Deallocates a region of memory that was previously allocated with // Alloc(). Does nothing if passed 0. "s" must be either 0, // or must have been returned from a call to Alloc() and not yet passed to // Free() since that call to Alloc(). The space is returned to the arena // from which it was allocated. static void Free(void *s) ATTRIBUTE_SECTION(malloc_hook); // ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free // are to put all callers of MallocHook::Invoke* in this module // into special section, // so that MallocHook::GetCallerStackTrace can function accurately. // Create a new arena. // The root metadata for the new arena is allocated in the // meta_data_arena; the DefaultArena() can be passed for meta_data_arena. // These values may be ored into flags: enum { // Report calls to Alloc() and Free() via the MallocHook interface. // Set in the DefaultArena. kCallMallocHook = 0x0001, // Make calls to Alloc(), Free() be async-signal-safe. Not set in // DefaultArena(). kAsyncSignalSafe = 0x0002, // When used with DefaultArena(), the NewArena() and DeleteArena() calls // obey the flags given explicitly in the NewArena() call, even if those // flags differ from the settings in DefaultArena(). So the call // NewArena(kAsyncSignalSafe, DefaultArena()) is itself async-signal-safe, // as well as generatating an arena that provides async-signal-safe // Alloc/Free. }; static Arena *NewArena(int32 flags, Arena *meta_data_arena); // note: pages allocator will never be destroyed and allocated pages will never be freed // When allocator is NULL, it's same as NewArena static Arena *NewArenaWithCustomAlloc(int32 flags, Arena *meta_data_arena, PagesAllocator *allocator); // Destroys an arena allocated by NewArena and returns true, // provided no allocated blocks remain in the arena. // If allocated blocks remain in the arena, does nothing and // returns false. // It is illegal to attempt to destroy the DefaultArena(). static bool DeleteArena(Arena *arena); // The default arena that always exists. static Arena *DefaultArena(); private: LowLevelAlloc(); // no instances }; #endif
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/thread_lister.c
.c
2,742
84
/* Copyright (c) 2005-2007, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Markus Gutschke */ #include "config.h" #include "base/thread_lister.h" #include <stdio.h> /* needed for NULL on some powerpc platforms (?!) */ #include <sys/types.h> #include <unistd.h> /* for getpid */ #ifdef HAVE_SYS_PRCTL # include <sys/prctl.h> #endif #include "base/linuxthreads.h" /* Include other thread listers here that define THREADS macro * only when they can provide a good implementation. */ #ifndef THREADS /* Default trivial thread lister for single-threaded applications, * or if the multi-threading code has not been ported, yet. */ int TCMalloc_ListAllProcessThreads(void *parameter, ListAllProcessThreadsCallBack callback, ...) { int rc; va_list ap; pid_t pid; #ifdef HAVE_SYS_PRCTL int dumpable = prctl(PR_GET_DUMPABLE, 0); if (!dumpable) prctl(PR_SET_DUMPABLE, 1); #endif va_start(ap, callback); pid = getpid(); rc = callback(parameter, 1, &pid, ap); va_end(ap); #ifdef HAVE_SYS_PRCTL if (!dumpable) prctl(PR_SET_DUMPABLE, 0); #endif return rc; } int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids) { return 1; } #endif /* ifndef THREADS */
C
3D
mcellteam/mcell
libs/gperftools/src/base/spinlock_internal.h
.h
2,107
52
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2010, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * This file is an internal part spinlock.cc and once.cc * It may not be used directly by code outside of //base. */ #ifndef BASE_SPINLOCK_INTERNAL_H_ #define BASE_SPINLOCK_INTERNAL_H_ #include <config.h> #include "base/basictypes.h" #include "base/atomicops.h" namespace base { namespace internal { void SpinLockWake(volatile Atomic32 *w, bool all); void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop); } // namespace internal } // namespace base #endif
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/spinlock_internal.cc
.cc
4,311
103
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2010, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // The OS-specific header included below must provide two calls: // base::internal::SpinLockDelay() and base::internal::SpinLockWake(). // See spinlock_internal.h for the spec of SpinLockWake(). // void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) // SpinLockDelay() generates an apprproate spin delay on iteration "loop" of a // spin loop on location *w, whose previously observed value was "value". // SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, // or may wait for a delay that can be truncated by a call to SpinlockWake(w). // In all cases, it must return in bounded time even if SpinlockWake() is not // called. #include "base/spinlock_internal.h" // forward declaration for use by spinlock_*-inl.h namespace base { namespace internal { static int SuggestedDelayNS(int loop); }} #if defined(_WIN32) #include "base/spinlock_win32-inl.h" #elif defined(__linux__) #include "base/spinlock_linux-inl.h" #else #include "base/spinlock_posix-inl.h" #endif namespace base { namespace internal { // Return a suggested delay in nanoseconds for iteration number "loop" static int SuggestedDelayNS(int loop) { // Weak pseudo-random number generator to get some spread between threads // when many are spinning. #ifdef BASE_HAS_ATOMIC64 static base::subtle::Atomic64 rand; uint64 r = base::subtle::NoBarrier_Load(&rand); r = 0x5deece66dLL * r + 0xb; // numbers from nrand48() base::subtle::NoBarrier_Store(&rand, r); r <<= 16; // 48-bit random number now in top 48-bits. if (loop < 0 || loop > 32) { // limit loop to 0..32 loop = 32; } // loop>>3 cannot exceed 4 because loop cannot exceed 32. // Select top 20..24 bits of lower 48 bits, // giving approximately 0ms to 16ms. // Mean is exponential in loop for first 32 iterations, then 8ms. // The futex path multiplies this by 16, since we expect explicit wakeups // almost always on that path. return r >> (44 - (loop >> 3)); #else static Atomic32 rand; uint32 r = base::subtle::NoBarrier_Load(&rand); r = 0x343fd * r + 0x269ec3; // numbers from MSVC++ base::subtle::NoBarrier_Store(&rand, r); r <<= 1; // 31-bit random number now in top 31-bits. if (loop < 0 || loop > 32) { // limit loop to 0..32 loop = 32; } // loop>>3 cannot exceed 4 because loop cannot exceed 32. // Select top 20..24 bits of lower 31 bits, // giving approximately 0ms to 16ms. // Mean is exponential in loop for first 32 iterations, then 8ms. // The futex path multiplies this by 16, since we expect explicit wakeups // almost always on that path. return r >> (12 - (loop >> 3)); #endif } } // namespace internal } // namespace base
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/spinlock.cc
.cc
5,213
130
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2006, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Sanjay Ghemawat */ #include <config.h> #include "base/spinlock.h" #include "base/spinlock_internal.h" #include "base/sysinfo.h" /* for GetSystemCPUsCount() */ // NOTE on the Lock-state values: // // kSpinLockFree represents the unlocked state // kSpinLockHeld represents the locked state with no waiters // kSpinLockSleeper represents the locked state with waiters static int adaptive_spin_count = 0; const base::LinkerInitialized SpinLock::LINKER_INITIALIZED = base::LINKER_INITIALIZED; namespace { struct SpinLock_InitHelper { SpinLock_InitHelper() { // On multi-cpu machines, spin for longer before yielding // the processor or sleeping. Reduces idle time significantly. if (GetSystemCPUsCount() > 1) { adaptive_spin_count = 1000; } } }; // Hook into global constructor execution: // We do not do adaptive spinning before that, // but nothing lock-intensive should be going on at that time. static SpinLock_InitHelper init_helper; inline void SpinlockPause(void) { #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) __asm__ __volatile__("rep; nop" : : ); #endif } } // unnamed namespace // Monitor the lock to see if its value changes within some time // period (adaptive_spin_count loop iterations). The last value read // from the lock is returned from the method. Atomic32 SpinLock::SpinLoop() { int c = adaptive_spin_count; while (base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree && --c > 0) { SpinlockPause(); } return base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree, kSpinLockSleeper); } void SpinLock::SlowLock() { Atomic32 lock_value = SpinLoop(); int lock_wait_call_count = 0; while (lock_value != kSpinLockFree) { // If the lock is currently held, but not marked as having a sleeper, mark // it as having a sleeper. if (lock_value == kSpinLockHeld) { // Here, just "mark" that the thread is going to sleep. Don't store the // lock wait time in the lock as that will cause the current lock // owner to think it experienced contention. lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockHeld, kSpinLockSleeper); if (lock_value == kSpinLockHeld) { // Successfully transitioned to kSpinLockSleeper. Pass // kSpinLockSleeper to the SpinLockDelay routine to properly indicate // the last lock_value observed. lock_value = kSpinLockSleeper; } else if (lock_value == kSpinLockFree) { // Lock is free again, so try and acquire it before sleeping. The // new lock state will be the number of cycles this thread waited if // this thread obtains the lock. lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree, kSpinLockSleeper); continue; // skip the delay at the end of the loop } } // Wait for an OS specific delay. base::internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count); // Spin again after returning from the wait routine to give this thread // some chance of obtaining the lock. lock_value = SpinLoop(); } } void SpinLock::SlowUnlock() { // wake waiter if necessary base::internal::SpinLockWake(&lockword_, false); }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/elfcore.h
.h
21,143
402
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2005-2008, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Markus Gutschke, Carl Crous */ #ifndef _ELFCORE_H #define _ELFCORE_H #ifdef __cplusplus extern "C" { #endif /* We currently only support x86-32, x86-64, ARM, MIPS, PPC on Linux. * Porting to other related platforms should not be difficult. */ #if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \ defined(__mips__) || defined(__PPC__)) && defined(__linux) #include <stdarg.h> #include <stdint.h> #include <sys/types.h> #include <config.h> /* Define the DUMPER symbol to make sure that there is exactly one * core dumper built into the library. */ #define DUMPER "ELF" /* By the time that we get a chance to read CPU registers in the * calling thread, they are already in a not particularly useful * state. Besides, there will be multiple frames on the stack that are * just making the core file confusing. To fix this problem, we take a * snapshot of the frame pointer, stack pointer, and instruction * pointer at an earlier time, and then insert these values into the * core file. */ #if defined(__i386__) || defined(__x86_64__) typedef struct i386_regs { /* Normal (non-FPU) CPU registers */ #ifdef __x86_64__ #define BP rbp #define SP rsp #define IP rip uint64_t r15,r14,r13,r12,rbp,rbx,r11,r10; uint64_t r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax; uint64_t rip,cs,eflags; uint64_t rsp,ss; uint64_t fs_base, gs_base; uint64_t ds,es,fs,gs; #else #define BP ebp #define SP esp #define IP eip uint32_t ebx, ecx, edx, esi, edi, ebp, eax; uint16_t ds, __ds, es, __es; uint16_t fs, __fs, gs, __gs; uint32_t orig_eax, eip; uint16_t cs, __cs; uint32_t eflags, esp; uint16_t ss, __ss; #endif } i386_regs; #elif defined(__arm__) typedef struct arm_regs { /* General purpose registers */ #define BP uregs[11] /* Frame pointer */ #define SP uregs[13] /* Stack pointer */ #define IP uregs[15] /* Program counter */ #define LR uregs[14] /* Link register */ long uregs[18]; } arm_regs; #elif defined(__mips__) typedef struct mips_regs { unsigned long pad[6]; /* Unused padding to match kernel structures */ unsigned long uregs[32]; /* General purpose registers. */ unsigned long hi; /* Used for multiplication and division. */ unsigned long lo; unsigned long cp0_epc; /* Program counter. */ unsigned long cp0_badvaddr; unsigned long cp0_status; unsigned long cp0_cause; unsigned long unused; } mips_regs; #elif defined (__PPC__) typedef struct ppc_regs { #define SP uregs[1] /* Stack pointer */ #define IP rip /* Program counter */ #define LR lr /* Link register */ unsigned long uregs[32]; /* General Purpose Registers - r0-r31. */ double fpr[32]; /* Floating-Point Registers - f0-f31. */ unsigned long rip; /* Program counter. */ unsigned long msr; unsigned long ccr; unsigned long lr; unsigned long ctr; unsigned long xeq; unsigned long mq; } ppc_regs; #endif #if defined(__i386__) && defined(__GNUC__) /* On x86 we provide an optimized version of the FRAME() macro, if the * compiler supports a GCC-style asm() directive. This results in somewhat * more accurate values for CPU registers. */ typedef struct Frame { struct i386_regs uregs; int errno_; pid_t tid; } Frame; #define FRAME(f) Frame f; \ do { \ f.errno_ = errno; \ f.tid = sys_gettid(); \ __asm__ volatile ( \ "push %%ebp\n" \ "push %%ebx\n" \ "mov %%ebx,0(%%eax)\n" \ "mov %%ecx,4(%%eax)\n" \ "mov %%edx,8(%%eax)\n" \ "mov %%esi,12(%%eax)\n" \ "mov %%edi,16(%%eax)\n" \ "mov %%ebp,20(%%eax)\n" \ "mov %%eax,24(%%eax)\n" \ "mov %%ds,%%ebx\n" \ "mov %%ebx,28(%%eax)\n" \ "mov %%es,%%ebx\n" \ "mov %%ebx,32(%%eax)\n" \ "mov %%fs,%%ebx\n" \ "mov %%ebx,36(%%eax)\n" \ "mov %%gs,%%ebx\n" \ "mov %%ebx, 40(%%eax)\n" \ "call 0f\n" \ "0:pop %%ebx\n" \ "add $1f-0b,%%ebx\n" \ "mov %%ebx,48(%%eax)\n" \ "mov %%cs,%%ebx\n" \ "mov %%ebx,52(%%eax)\n" \ "pushf\n" \ "pop %%ebx\n" \ "mov %%ebx,56(%%eax)\n" \ "mov %%esp,%%ebx\n" \ "add $8,%%ebx\n" \ "mov %%ebx,60(%%eax)\n" \ "mov %%ss,%%ebx\n" \ "mov %%ebx,64(%%eax)\n" \ "pop %%ebx\n" \ "pop %%ebp\n" \ "1:" \ : : "a" (&f) : "memory"); \ } while (0) #define SET_FRAME(f,r) \ do { \ errno = (f).errno_; \ (r) = (f).uregs; \ } while (0) #elif defined(__x86_64__) && defined(__GNUC__) /* The FRAME and SET_FRAME macros for x86_64. */ typedef struct Frame { struct i386_regs uregs; int errno_; pid_t tid; } Frame; #define FRAME(f) Frame f; \ do { \ f.errno_ = errno; \ f.tid = sys_gettid(); \ __asm__ volatile ( \ "push %%rbp\n" \ "push %%rbx\n" \ "mov %%r15,0(%%rax)\n" \ "mov %%r14,8(%%rax)\n" \ "mov %%r13,16(%%rax)\n" \ "mov %%r12,24(%%rax)\n" \ "mov %%rbp,32(%%rax)\n" \ "mov %%rbx,40(%%rax)\n" \ "mov %%r11,48(%%rax)\n" \ "mov %%r10,56(%%rax)\n" \ "mov %%r9,64(%%rax)\n" \ "mov %%r8,72(%%rax)\n" \ "mov %%rax,80(%%rax)\n" \ "mov %%rcx,88(%%rax)\n" \ "mov %%rdx,96(%%rax)\n" \ "mov %%rsi,104(%%rax)\n" \ "mov %%rdi,112(%%rax)\n" \ "mov %%ds,%%rbx\n" \ "mov %%rbx,184(%%rax)\n" \ "mov %%es,%%rbx\n" \ "mov %%rbx,192(%%rax)\n" \ "mov %%fs,%%rbx\n" \ "mov %%rbx,200(%%rax)\n" \ "mov %%gs,%%rbx\n" \ "mov %%rbx,208(%%rax)\n" \ "call 0f\n" \ "0:pop %%rbx\n" \ "add $1f-0b,%%rbx\n" \ "mov %%rbx,128(%%rax)\n" \ "mov %%cs,%%rbx\n" \ "mov %%rbx,136(%%rax)\n" \ "pushf\n" \ "pop %%rbx\n" \ "mov %%rbx,144(%%rax)\n" \ "mov %%rsp,%%rbx\n" \ "add $16,%%ebx\n" \ "mov %%rbx,152(%%rax)\n" \ "mov %%ss,%%rbx\n" \ "mov %%rbx,160(%%rax)\n" \ "pop %%rbx\n" \ "pop %%rbp\n" \ "1:" \ : : "a" (&f) : "memory"); \ } while (0) #define SET_FRAME(f,r) \ do { \ errno = (f).errno_; \ (f).uregs.fs_base = (r).fs_base; \ (f).uregs.gs_base = (r).gs_base; \ (r) = (f).uregs; \ } while (0) #elif defined(__arm__) && defined(__GNUC__) /* ARM calling conventions are a little more tricky. A little assembly * helps in obtaining an accurate snapshot of all registers. */ typedef struct Frame { struct arm_regs arm; int errno_; pid_t tid; } Frame; #define FRAME(f) Frame f; \ do { \ long cpsr; \ f.errno_ = errno; \ f.tid = sys_gettid(); \ __asm__ volatile( \ "stmia %0, {r0-r15}\n" /* All integer regs */\ : : "r"(&f.arm) : "memory"); \ f.arm.uregs[16] = 0; \ __asm__ volatile( \ "mrs %0, cpsr\n" /* Condition code reg */\ : "=r"(cpsr)); \ f.arm.uregs[17] = cpsr; \ } while (0) #define SET_FRAME(f,r) \ do { \ /* Don't override the FPU status register. */\ /* Use the value obtained from ptrace(). This*/\ /* works, because our code does not perform */\ /* any FPU operations, itself. */\ long fps = (f).arm.uregs[16]; \ errno = (f).errno_; \ (r) = (f).arm; \ (r).uregs[16] = fps; \ } while (0) #elif defined(__mips__) && defined(__GNUC__) typedef struct Frame { struct mips_regs mips_regs; int errno_; pid_t tid; } Frame; #define MIPSREG(n) ({ register unsigned long r __asm__("$"#n); r; }) #define FRAME(f) Frame f = { 0 }; \ do { \ unsigned long hi, lo; \ register unsigned long pc __asm__("$31"); \ f.mips_regs.uregs[ 0] = MIPSREG( 0); \ f.mips_regs.uregs[ 1] = MIPSREG( 1); \ f.mips_regs.uregs[ 2] = MIPSREG( 2); \ f.mips_regs.uregs[ 3] = MIPSREG( 3); \ f.mips_regs.uregs[ 4] = MIPSREG( 4); \ f.mips_regs.uregs[ 5] = MIPSREG( 5); \ f.mips_regs.uregs[ 6] = MIPSREG( 6); \ f.mips_regs.uregs[ 7] = MIPSREG( 7); \ f.mips_regs.uregs[ 8] = MIPSREG( 8); \ f.mips_regs.uregs[ 9] = MIPSREG( 9); \ f.mips_regs.uregs[10] = MIPSREG(10); \ f.mips_regs.uregs[11] = MIPSREG(11); \ f.mips_regs.uregs[12] = MIPSREG(12); \ f.mips_regs.uregs[13] = MIPSREG(13); \ f.mips_regs.uregs[14] = MIPSREG(14); \ f.mips_regs.uregs[15] = MIPSREG(15); \ f.mips_regs.uregs[16] = MIPSREG(16); \ f.mips_regs.uregs[17] = MIPSREG(17); \ f.mips_regs.uregs[18] = MIPSREG(18); \ f.mips_regs.uregs[19] = MIPSREG(19); \ f.mips_regs.uregs[20] = MIPSREG(20); \ f.mips_regs.uregs[21] = MIPSREG(21); \ f.mips_regs.uregs[22] = MIPSREG(22); \ f.mips_regs.uregs[23] = MIPSREG(23); \ f.mips_regs.uregs[24] = MIPSREG(24); \ f.mips_regs.uregs[25] = MIPSREG(25); \ f.mips_regs.uregs[26] = MIPSREG(26); \ f.mips_regs.uregs[27] = MIPSREG(27); \ f.mips_regs.uregs[28] = MIPSREG(28); \ f.mips_regs.uregs[29] = MIPSREG(29); \ f.mips_regs.uregs[30] = MIPSREG(30); \ f.mips_regs.uregs[31] = MIPSREG(31); \ __asm__ volatile ("mfhi %0" : "=r"(hi)); \ __asm__ volatile ("mflo %0" : "=r"(lo)); \ __asm__ volatile ("jal 1f; 1:nop" : "=r"(pc)); \ f.mips_regs.hi = hi; \ f.mips_regs.lo = lo; \ f.mips_regs.cp0_epc = pc; \ f.errno_ = errno; \ f.tid = sys_gettid(); \ } while (0) #define SET_FRAME(f,r) \ do { \ errno = (f).errno_; \ memcpy((r).uregs, (f).mips_regs.uregs, \ 32*sizeof(unsigned long)); \ (r).hi = (f).mips_regs.hi; \ (r).lo = (f).mips_regs.lo; \ (r).cp0_epc = (f).mips_regs.cp0_epc; \ } while (0) #else /* If we do not have a hand-optimized assembly version of the FRAME() * macro, we cannot reliably unroll the stack. So, we show a few additional * stack frames for the coredumper. */ typedef struct Frame { pid_t tid; } Frame; #define FRAME(f) Frame f; do { f.tid = sys_gettid(); } while (0) #define SET_FRAME(f,r) do { } while (0) #endif /* Internal function for generating a core file. This API can change without * notice and is only supposed to be used internally by the core dumper. * * This function works for both single- and multi-threaded core * dumps. If called as * * FRAME(frame); * InternalGetCoreDump(&frame, 0, NULL, ap); * * it creates a core file that only contains information about the * calling thread. * * Optionally, the caller can provide information about other threads * by passing their process ids in "thread_pids". The process id of * the caller should not be included in this array. All of the threads * must have been attached to with ptrace(), prior to calling this * function. They will be detached when "InternalGetCoreDump()" returns. * * This function either returns a file handle that can be read for obtaining * a core dump, or "-1" in case of an error. In the latter case, "errno" * will be set appropriately. * * While "InternalGetCoreDump()" is not technically async signal safe, you * might be tempted to invoke it from a signal handler. The code goes to * great lengths to make a best effort that this will actually work. But in * any case, you must make sure that you preserve the value of "errno" * yourself. It is guaranteed to be clobbered otherwise. * * Also, "InternalGetCoreDump" is not strictly speaking re-entrant. Again, * it makes a best effort to behave reasonably when called in a multi- * threaded environment, but it is ultimately the caller's responsibility * to provide locking. */ int InternalGetCoreDump(void *frame, int num_threads, pid_t *thread_pids, va_list ap /* const struct CoreDumpParameters *params, const char *file_name, const char *PATH */); #endif #ifdef __cplusplus } #endif #endif /* _ELFCORE_H */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/simple_mutex.h
.h
13,985
333
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2007, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // --- // Author: Craig Silverstein. // // A simple mutex wrapper, supporting locks and read-write locks. // You should assume the locks are *not* re-entrant. // // To use: you should define the following macros in your configure.ac: // ACX_PTHREAD // AC_RWLOCK // The latter is defined in ../autoconf. // // This class is meant to be internal-only and should be wrapped by an // internal namespace. Before you use this module, please give the // name of your internal namespace for this module. Or, if you want // to expose it, you'll want to move it to the Google namespace. We // cannot put this class in global namespace because there can be some // problems when we have multiple versions of Mutex in each shared object. // // NOTE: TryLock() is broken for NO_THREADS mode, at least in NDEBUG // mode. // // CYGWIN NOTE: Cygwin support for rwlock seems to be buggy: // http://www.cygwin.com/ml/cygwin/2008-12/msg00017.html // Because of that, we might as well use windows locks for // cygwin. They seem to be more reliable than the cygwin pthreads layer. // // TRICKY IMPLEMENTATION NOTE: // This class is designed to be safe to use during // dynamic-initialization -- that is, by global constructors that are // run before main() starts. The issue in this case is that // dynamic-initialization happens in an unpredictable order, and it // could be that someone else's dynamic initializer could call a // function that tries to acquire this mutex -- but that all happens // before this mutex's constructor has run. (This can happen even if // the mutex and the function that uses the mutex are in the same .cc // file.) Basically, because Mutex does non-trivial work in its // constructor, it's not, in the naive implementation, safe to use // before dynamic initialization has run on it. // // The solution used here is to pair the actual mutex primitive with a // bool that is set to true when the mutex is dynamically initialized. // (Before that it's false.) Then we modify all mutex routines to // look at the bool, and not try to lock/unlock until the bool makes // it to true (which happens after the Mutex constructor has run.) // // This works because before main() starts -- particularly, during // dynamic initialization -- there are no threads, so a) it's ok that // the mutex operations are a no-op, since we don't need locking then // anyway; and b) we can be quite confident our bool won't change // state between a call to Lock() and a call to Unlock() (that would // require a global constructor in one translation unit to call Lock() // and another global constructor in another translation unit to call // Unlock() later, which is pretty perverse). // // That said, it's tricky, and can conceivably fail; it's safest to // avoid trying to acquire a mutex in a global constructor, if you // can. One way it can fail is that a really smart compiler might // initialize the bool to true at static-initialization time (too // early) rather than at dynamic-initialization time. To discourage // that, we set is_safe_ to true in code (not the constructor // colon-initializer) and set it to true via a function that always // evaluates to true, but that the compiler can't know always // evaluates to true. This should be good enough. // // A related issue is code that could try to access the mutex // after it's been destroyed in the global destructors (because // the Mutex global destructor runs before some other global // destructor, that tries to acquire the mutex). The way we // deal with this is by taking a constructor arg that global // mutexes should pass in, that causes the destructor to do no // work. We still depend on the compiler not doing anything // weird to a Mutex's memory after it is destroyed, but for a // static global variable, that's pretty safe. #ifndef GOOGLE_MUTEX_H_ #define GOOGLE_MUTEX_H_ #include <config.h> #if defined(NO_THREADS) typedef int MutexType; // to keep a lock-count #elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) # ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN // We only need minimal includes # endif // We need Windows NT or later for TryEnterCriticalSection(). If you // don't need that functionality, you can remove these _WIN32_WINNT // lines, and change TryLock() to assert(0) or something. # ifndef _WIN32_WINNT # define _WIN32_WINNT 0x0400 # endif # include <windows.h> typedef CRITICAL_SECTION MutexType; #elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK) // Needed for pthread_rwlock_*. If it causes problems, you could take it // out, but then you'd have to unset HAVE_RWLOCK (at least on linux -- it // *does* cause problems for FreeBSD, or MacOSX, but isn't needed // for locking there.) # ifdef __linux__ # define _XOPEN_SOURCE 500 // may be needed to get the rwlock calls # endif # include <pthread.h> typedef pthread_rwlock_t MutexType; #elif defined(HAVE_PTHREAD) # include <pthread.h> typedef pthread_mutex_t MutexType; #else # error Need to implement mutex.h for your architecture, or #define NO_THREADS #endif #include <assert.h> #include <stdlib.h> // for abort() #define MUTEX_NAMESPACE perftools_mutex_namespace namespace MUTEX_NAMESPACE { class Mutex { public: // This is used for the single-arg constructor enum LinkerInitialized { LINKER_INITIALIZED }; // Create a Mutex that is not held by anybody. This constructor is // typically used for Mutexes allocated on the heap or the stack. inline Mutex(); // This constructor should be used for global, static Mutex objects. // It inhibits work being done by the destructor, which makes it // safer for code that tries to acqiure this mutex in their global // destructor. inline Mutex(LinkerInitialized); // Destructor inline ~Mutex(); inline void Lock(); // Block if needed until free then acquire exclusively inline void Unlock(); // Release a lock acquired via Lock() inline bool TryLock(); // If free, Lock() and return true, else return false // Note that on systems that don't support read-write locks, these may // be implemented as synonyms to Lock() and Unlock(). So you can use // these for efficiency, but don't use them anyplace where being able // to do shared reads is necessary to avoid deadlock. inline void ReaderLock(); // Block until free or shared then acquire a share inline void ReaderUnlock(); // Release a read share of this Mutex inline void WriterLock() { Lock(); } // Acquire an exclusive lock inline void WriterUnlock() { Unlock(); } // Release a lock from WriterLock() private: MutexType mutex_; // We want to make sure that the compiler sets is_safe_ to true only // when we tell it to, and never makes assumptions is_safe_ is // always true. volatile is the most reliable way to do that. volatile bool is_safe_; // This indicates which constructor was called. bool destroy_; inline void SetIsSafe() { is_safe_ = true; } // Catch the error of writing Mutex when intending MutexLock. Mutex(Mutex* /*ignored*/) {} // Disallow "evil" constructors Mutex(const Mutex&); void operator=(const Mutex&); }; // Now the implementation of Mutex for various systems #if defined(NO_THREADS) // When we don't have threads, we can be either reading or writing, // but not both. We can have lots of readers at once (in no-threads // mode, that's most likely to happen in recursive function calls), // but only one writer. We represent this by having mutex_ be -1 when // writing and a number > 0 when reading (and 0 when no lock is held). // // In debug mode, we assert these invariants, while in non-debug mode // we do nothing, for efficiency. That's why everything is in an // assert. Mutex::Mutex() : mutex_(0) { } Mutex::Mutex(Mutex::LinkerInitialized) : mutex_(0) { } Mutex::~Mutex() { assert(mutex_ == 0); } void Mutex::Lock() { assert(--mutex_ == -1); } void Mutex::Unlock() { assert(mutex_++ == -1); } bool Mutex::TryLock() { if (mutex_) return false; Lock(); return true; } void Mutex::ReaderLock() { assert(++mutex_ > 0); } void Mutex::ReaderUnlock() { assert(mutex_-- > 0); } #elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) Mutex::Mutex() : destroy_(true) { InitializeCriticalSection(&mutex_); SetIsSafe(); } Mutex::Mutex(LinkerInitialized) : destroy_(false) { InitializeCriticalSection(&mutex_); SetIsSafe(); } Mutex::~Mutex() { if (destroy_) DeleteCriticalSection(&mutex_); } void Mutex::Lock() { if (is_safe_) EnterCriticalSection(&mutex_); } void Mutex::Unlock() { if (is_safe_) LeaveCriticalSection(&mutex_); } bool Mutex::TryLock() { return is_safe_ ? TryEnterCriticalSection(&mutex_) != 0 : true; } void Mutex::ReaderLock() { Lock(); } // we don't have read-write locks void Mutex::ReaderUnlock() { Unlock(); } #elif defined(HAVE_PTHREAD) && defined(HAVE_RWLOCK) #define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \ if (is_safe_ && fncall(&mutex_) != 0) abort(); \ } while (0) Mutex::Mutex() : destroy_(true) { SetIsSafe(); if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort(); } Mutex::Mutex(Mutex::LinkerInitialized) : destroy_(false) { SetIsSafe(); if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort(); } Mutex::~Mutex() { if (destroy_) SAFE_PTHREAD(pthread_rwlock_destroy); } void Mutex::Lock() { SAFE_PTHREAD(pthread_rwlock_wrlock); } void Mutex::Unlock() { SAFE_PTHREAD(pthread_rwlock_unlock); } bool Mutex::TryLock() { return is_safe_ ? pthread_rwlock_trywrlock(&mutex_) == 0 : true; } void Mutex::ReaderLock() { SAFE_PTHREAD(pthread_rwlock_rdlock); } void Mutex::ReaderUnlock() { SAFE_PTHREAD(pthread_rwlock_unlock); } #undef SAFE_PTHREAD #elif defined(HAVE_PTHREAD) #define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \ if (is_safe_ && fncall(&mutex_) != 0) abort(); \ } while (0) Mutex::Mutex() : destroy_(true) { SetIsSafe(); if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort(); } Mutex::Mutex(Mutex::LinkerInitialized) : destroy_(false) { SetIsSafe(); if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort(); } Mutex::~Mutex() { if (destroy_) SAFE_PTHREAD(pthread_mutex_destroy); } void Mutex::Lock() { SAFE_PTHREAD(pthread_mutex_lock); } void Mutex::Unlock() { SAFE_PTHREAD(pthread_mutex_unlock); } bool Mutex::TryLock() { return is_safe_ ? pthread_mutex_trylock(&mutex_) == 0 : true; } void Mutex::ReaderLock() { Lock(); } void Mutex::ReaderUnlock() { Unlock(); } #undef SAFE_PTHREAD #endif // -------------------------------------------------------------------------- // Some helper classes // MutexLock(mu) acquires mu when constructed and releases it when destroyed. class MutexLock { public: explicit MutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); } ~MutexLock() { mu_->Unlock(); } private: Mutex * const mu_; // Disallow "evil" constructors MutexLock(const MutexLock&); void operator=(const MutexLock&); }; // ReaderMutexLock and WriterMutexLock do the same, for rwlocks class ReaderMutexLock { public: explicit ReaderMutexLock(Mutex *mu) : mu_(mu) { mu_->ReaderLock(); } ~ReaderMutexLock() { mu_->ReaderUnlock(); } private: Mutex * const mu_; // Disallow "evil" constructors ReaderMutexLock(const ReaderMutexLock&); void operator=(const ReaderMutexLock&); }; class WriterMutexLock { public: explicit WriterMutexLock(Mutex *mu) : mu_(mu) { mu_->WriterLock(); } ~WriterMutexLock() { mu_->WriterUnlock(); } private: Mutex * const mu_; // Disallow "evil" constructors WriterMutexLock(const WriterMutexLock&); void operator=(const WriterMutexLock&); }; // Catch bug where variable name is omitted, e.g. MutexLock (&mu); #define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_decl_missing_var_name) #define ReaderMutexLock(x) COMPILE_ASSERT(0, rmutex_lock_decl_missing_var_name) #define WriterMutexLock(x) COMPILE_ASSERT(0, wmutex_lock_decl_missing_var_name) } // namespace MUTEX_NAMESPACE using namespace MUTEX_NAMESPACE; #undef MUTEX_NAMESPACE #endif /* #define GOOGLE_SIMPLE_MUTEX_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/low_level_alloc.cc
.cc
23,266
583
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2006, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // A low-level allocator that can be used by other low-level // modules without introducing dependency cycles. // This allocator is slow and wasteful of memory; // it should not be used when performance is key. #include "base/low_level_alloc.h" #include "base/dynamic_annotations.h" #include "base/spinlock.h" #include "base/logging.h" #include "malloc_hook-inl.h" #include <gperftools/malloc_hook.h> #include <errno.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef HAVE_MMAP #include <sys/mman.h> #endif #include <new> // for placement-new // On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old // form of the name instead. #ifndef MAP_ANONYMOUS # define MAP_ANONYMOUS MAP_ANON #endif // A first-fit allocator with amortized logarithmic free() time. LowLevelAlloc::PagesAllocator::~PagesAllocator() { } // --------------------------------------------------------------------------- static const int kMaxLevel = 30; // We put this class-only struct in a namespace to avoid polluting the // global namespace with this struct name (thus risking an ODR violation). namespace low_level_alloc_internal { // This struct describes one allocated block, or one free block. struct AllocList { struct Header { intptr_t size; // size of entire region, including this field. Must be // first. Valid in both allocated and unallocated blocks intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this LowLevelAlloc::Arena *arena; // pointer to parent arena void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*) } header; // Next two fields: in unallocated blocks: freelist skiplist data // in allocated blocks: overlaps with client data int levels; // levels in skiplist used AllocList *next[kMaxLevel]; // actually has levels elements. // The AllocList node may not have room for // all kMaxLevel entries. See max_fit in // LLA_SkiplistLevels() }; } using low_level_alloc_internal::AllocList; // --------------------------------------------------------------------------- // A trivial skiplist implementation. This is used to keep the freelist // in address order while taking only logarithmic time per insert and delete. // An integer approximation of log2(size/base) // Requires size >= base. static int IntLog2(size_t size, size_t base) { int result = 0; for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result) result++; } // floor(size / 2**result) <= base < floor(size / 2**(result-1)) // => log2(size/(base+1)) <= result < 1+log2(size/base) // => result ~= log2(size/base) return result; } // Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1. static int Random() { static uint32 r = 1; // no locking---it's not critical ANNOTATE_BENIGN_RACE(&r, "benign race, not critical."); int result = 1; while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) { result++; } return result; } // Return a number of skiplist levels for a node of size bytes, where // base is the minimum node size. Compute level=log2(size / base)+n // where n is 1 if random is false and otherwise a random number generated with // the standard distribution for a skiplist: See Random() above. // Bigger nodes tend to have more skiplist levels due to the log2(size / base) // term, so first-fit searches touch fewer nodes. "level" is clipped so // level<kMaxLevel and next[level-1] will fit in the node. // 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel static int LLA_SkiplistLevels(size_t size, size_t base, bool random) { // max_fit is the maximum number of levels that will fit in a node for the // given size. We can't return more than max_fit, no matter what the // random number generator says. int max_fit = (size-OFFSETOF_MEMBER(AllocList, next)) / sizeof (AllocList *); int level = IntLog2(size, base) + (random? Random() : 1); if (level > max_fit) level = max_fit; if (level > kMaxLevel-1) level = kMaxLevel - 1; RAW_CHECK(level >= 1, "block not big enough for even one level"); return level; } // Return "atleast", the first element of AllocList *head s.t. *atleast >= *e. // For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater // points to the last element at level i in the AllocList less than *e, or is // head if no such element exists. static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e, AllocList **prev) { AllocList *p = head; for (int level = head->levels - 1; level >= 0; level--) { for (AllocList *n; (n = p->next[level]) != 0 && n < e; p = n) { } prev[level] = p; } return (head->levels == 0) ? 0 : prev[0]->next[0]; } // Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch. // Requires that e->levels be previously set by the caller (using // LLA_SkiplistLevels()) static void LLA_SkiplistInsert(AllocList *head, AllocList *e, AllocList **prev) { LLA_SkiplistSearch(head, e, prev); for (; head->levels < e->levels; head->levels++) { // extend prev pointers prev[head->levels] = head; // to all *e's levels } for (int i = 0; i != e->levels; i++) { // add element to list e->next[i] = prev[i]->next[i]; prev[i]->next[i] = e; } } // Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch(). // Requires that e->levels be previous set by the caller (using // LLA_SkiplistLevels()) static void LLA_SkiplistDelete(AllocList *head, AllocList *e, AllocList **prev) { AllocList *found = LLA_SkiplistSearch(head, e, prev); RAW_CHECK(e == found, "element not in freelist"); for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) { prev[i]->next[i] = e->next[i]; } while (head->levels > 0 && head->next[head->levels - 1] == 0) { head->levels--; // reduce head->levels if level unused } } // --------------------------------------------------------------------------- // Arena implementation struct LowLevelAlloc::Arena { Arena() : mu(SpinLock::LINKER_INITIALIZED) {} // does nothing; for static init explicit Arena(int) : pagesize(0) {} // set pagesize to zero explicitly // for non-static init SpinLock mu; // protects freelist, allocation_count, // pagesize, roundup, min_size AllocList freelist; // head of free list; sorted by addr (under mu) int32 allocation_count; // count of allocated blocks (under mu) int32 flags; // flags passed to NewArena (ro after init) size_t pagesize; // ==getpagesize() (init under mu, then ro) size_t roundup; // lowest power of 2 >= max(16,sizeof (AllocList)) // (init under mu, then ro) size_t min_size; // smallest allocation block size // (init under mu, then ro) PagesAllocator *allocator; }; // The default arena, which is used when 0 is passed instead of an Arena // pointer. static struct LowLevelAlloc::Arena default_arena; // Non-malloc-hooked arenas: used only to allocate metadata for arenas that // do not want malloc hook reporting, so that for them there's no malloc hook // reporting even during arena creation. static struct LowLevelAlloc::Arena unhooked_arena; static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena; namespace { class DefaultPagesAllocator : public LowLevelAlloc::PagesAllocator { public: virtual ~DefaultPagesAllocator() {}; virtual void *MapPages(int32 flags, size_t size); virtual void UnMapPages(int32 flags, void *addr, size_t size); }; } // magic numbers to identify allocated and unallocated blocks static const intptr_t kMagicAllocated = 0x4c833e95; static const intptr_t kMagicUnallocated = ~kMagicAllocated; namespace { class SCOPED_LOCKABLE ArenaLock { public: explicit ArenaLock(LowLevelAlloc::Arena *arena) EXCLUSIVE_LOCK_FUNCTION(arena->mu) : left_(false), mask_valid_(false), arena_(arena) { if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { // We've decided not to support async-signal-safe arena use until // there a demonstrated need. Here's how one could do it though // (would need to be made more portable). #if 0 sigset_t all; sigfillset(&all); this->mask_valid_ = (pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0); #else RAW_CHECK(false, "We do not yet support async-signal-safe arena."); #endif } this->arena_->mu.Lock(); } ~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); } void Leave() /*UNLOCK_FUNCTION()*/ { this->arena_->mu.Unlock(); #if 0 if (this->mask_valid_) { pthread_sigmask(SIG_SETMASK, &this->mask_, 0); } #endif this->left_ = true; } private: bool left_; // whether left region bool mask_valid_; #if 0 sigset_t mask_; // old mask of blocked signals #endif LowLevelAlloc::Arena *arena_; DISALLOW_COPY_AND_ASSIGN(ArenaLock); }; } // anonymous namespace // create an appropriate magic number for an object at "ptr" // "magic" should be kMagicAllocated or kMagicUnallocated inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) { return magic ^ reinterpret_cast<intptr_t>(ptr); } // Initialize the fields of an Arena static void ArenaInit(LowLevelAlloc::Arena *arena) { if (arena->pagesize == 0) { arena->pagesize = getpagesize(); // Round up block sizes to a power of two close to the header size. arena->roundup = 16; while (arena->roundup < sizeof (arena->freelist.header)) { arena->roundup += arena->roundup; } // Don't allocate blocks less than twice the roundup size to avoid tiny // free blocks. arena->min_size = 2 * arena->roundup; arena->freelist.header.size = 0; arena->freelist.header.magic = Magic(kMagicUnallocated, &arena->freelist.header); arena->freelist.header.arena = arena; arena->freelist.levels = 0; memset(arena->freelist.next, 0, sizeof (arena->freelist.next)); arena->allocation_count = 0; if (arena == &default_arena) { // Default arena should be hooked, e.g. for heap-checker to trace // pointer chains through objects in the default arena. arena->flags = LowLevelAlloc::kCallMallocHook; } else if (arena == &unhooked_async_sig_safe_arena) { arena->flags = LowLevelAlloc::kAsyncSignalSafe; } else { arena->flags = 0; // other arenas' flags may be overridden by client, // but unhooked_arena will have 0 in 'flags'. } arena->allocator = LowLevelAlloc::GetDefaultPagesAllocator(); } } // L < meta_data_arena->mu LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags, Arena *meta_data_arena) { return NewArenaWithCustomAlloc(flags, meta_data_arena, NULL); } // L < meta_data_arena->mu LowLevelAlloc::Arena *LowLevelAlloc::NewArenaWithCustomAlloc(int32 flags, Arena *meta_data_arena, PagesAllocator *allocator) { RAW_CHECK(meta_data_arena != 0, "must pass a valid arena"); if (meta_data_arena == &default_arena) { if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { meta_data_arena = &unhooked_async_sig_safe_arena; } else if ((flags & LowLevelAlloc::kCallMallocHook) == 0) { meta_data_arena = &unhooked_arena; } } // Arena(0) uses the constructor for non-static contexts Arena *result = new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0); ArenaInit(result); result->flags = flags; if (allocator) { result->allocator = allocator; } return result; } // L < arena->mu, L < arena->arena->mu bool LowLevelAlloc::DeleteArena(Arena *arena) { RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena, "may not delete default arena"); ArenaLock section(arena); bool empty = (arena->allocation_count == 0); section.Leave(); if (empty) { while (arena->freelist.next[0] != 0) { AllocList *region = arena->freelist.next[0]; size_t size = region->header.size; arena->freelist.next[0] = region->next[0]; RAW_CHECK(region->header.magic == Magic(kMagicUnallocated, &region->header), "bad magic number in DeleteArena()"); RAW_CHECK(region->header.arena == arena, "bad arena pointer in DeleteArena()"); RAW_CHECK(size % arena->pagesize == 0, "empty arena has non-page-aligned block size"); RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0, "empty arena has non-page-aligned block"); int munmap_result; if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) { munmap_result = munmap(region, size); } else { munmap_result = MallocHook::UnhookedMUnmap(region, size); } RAW_CHECK(munmap_result == 0, "LowLevelAlloc::DeleteArena: munmap failed address"); } Free(arena); } return empty; } // --------------------------------------------------------------------------- // Return value rounded up to next multiple of align. // align must be a power of two. static intptr_t RoundUp(intptr_t addr, intptr_t align) { return (addr + align - 1) & ~(align - 1); } // Equivalent to "return prev->next[i]" but with sanity checking // that the freelist is in the correct order, that it // consists of regions marked "unallocated", and that no two regions // are adjacent in memory (they should have been coalesced). // L < arena->mu static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) { RAW_CHECK(i < prev->levels, "too few levels in Next()"); AllocList *next = prev->next[i]; if (next != 0) { RAW_CHECK(next->header.magic == Magic(kMagicUnallocated, &next->header), "bad magic number in Next()"); RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()"); if (prev != &arena->freelist) { RAW_CHECK(prev < next, "unordered freelist"); RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size < reinterpret_cast<char *>(next), "malformed freelist"); } } return next; } // Coalesce list item "a" with its successor if they are adjacent. static void Coalesce(AllocList *a) { AllocList *n = a->next[0]; if (n != 0 && reinterpret_cast<char *>(a) + a->header.size == reinterpret_cast<char *>(n)) { LowLevelAlloc::Arena *arena = a->header.arena; a->header.size += n->header.size; n->header.magic = 0; n->header.arena = 0; AllocList *prev[kMaxLevel]; LLA_SkiplistDelete(&arena->freelist, n, prev); LLA_SkiplistDelete(&arena->freelist, a, prev); a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, true); LLA_SkiplistInsert(&arena->freelist, a, prev); } } // Adds block at location "v" to the free list // L >= arena->mu static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) { AllocList *f = reinterpret_cast<AllocList *>( reinterpret_cast<char *>(v) - sizeof (f->header)); RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), "bad magic number in AddToFreelist()"); RAW_CHECK(f->header.arena == arena, "bad arena pointer in AddToFreelist()"); f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, true); AllocList *prev[kMaxLevel]; LLA_SkiplistInsert(&arena->freelist, f, prev); f->header.magic = Magic(kMagicUnallocated, &f->header); Coalesce(f); // maybe coalesce with successor Coalesce(prev[0]); // maybe coalesce with predecessor } // Frees storage allocated by LowLevelAlloc::Alloc(). // L < arena->mu void LowLevelAlloc::Free(void *v) { if (v != 0) { AllocList *f = reinterpret_cast<AllocList *>( reinterpret_cast<char *>(v) - sizeof (f->header)); RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), "bad magic number in Free()"); LowLevelAlloc::Arena *arena = f->header.arena; if ((arena->flags & kCallMallocHook) != 0) { MallocHook::InvokeDeleteHook(v); } ArenaLock section(arena); AddToFreelist(v, arena); RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free"); arena->allocation_count--; section.Leave(); } } // allocates and returns a block of size bytes, to be freed with Free() // L < arena->mu static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { void *result = 0; if (request != 0) { AllocList *s; // will point to region that satisfies request ArenaLock section(arena); ArenaInit(arena); // round up with header size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup); for (;;) { // loop until we find a suitable region // find the minimum levels that a block of this size must have int i = LLA_SkiplistLevels(req_rnd, arena->min_size, false) - 1; if (i < arena->freelist.levels) { // potential blocks exist AllocList *before = &arena->freelist; // predecessor of s while ((s = Next(i, before, arena)) != 0 && s->header.size < req_rnd) { before = s; } if (s != 0) { // we found a region break; } } // we unlock before mmap() both because mmap() may call a callback hook, // and because it may be slow. arena->mu.Unlock(); // mmap generous 64K chunks to decrease // the chances/impact of fragmentation: size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); void *new_pages = arena->allocator->MapPages(arena->flags, new_pages_size); arena->mu.Lock(); s = reinterpret_cast<AllocList *>(new_pages); s->header.size = new_pages_size; // Pretend the block is allocated; call AddToFreelist() to free it. s->header.magic = Magic(kMagicAllocated, &s->header); s->header.arena = arena; AddToFreelist(&s->levels, arena); // insert new region into free list } AllocList *prev[kMaxLevel]; LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list // s points to the first free region that's big enough if (req_rnd + arena->min_size <= s->header.size) { // big enough to split AllocList *n = reinterpret_cast<AllocList *> (req_rnd + reinterpret_cast<char *>(s)); n->header.size = s->header.size - req_rnd; n->header.magic = Magic(kMagicAllocated, &n->header); n->header.arena = arena; s->header.size = req_rnd; AddToFreelist(&n->levels, arena); } s->header.magic = Magic(kMagicAllocated, &s->header); RAW_CHECK(s->header.arena == arena, ""); arena->allocation_count++; section.Leave(); result = &s->levels; } ANNOTATE_NEW_MEMORY(result, request); return result; } void *LowLevelAlloc::Alloc(size_t request) { void *result = DoAllocWithArena(request, &default_arena); if ((default_arena.flags & kCallMallocHook) != 0) { // this call must be directly in the user-called allocator function // for MallocHook::GetCallerStackTrace to work properly MallocHook::InvokeNewHook(result, request); } return result; } void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) { RAW_CHECK(arena != 0, "must pass a valid arena"); void *result = DoAllocWithArena(request, arena); if ((arena->flags & kCallMallocHook) != 0) { // this call must be directly in the user-called allocator function // for MallocHook::GetCallerStackTrace to work properly MallocHook::InvokeNewHook(result, request); } return result; } LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { return &default_arena; } static DefaultPagesAllocator *default_pages_allocator; static union { char chars[sizeof(DefaultPagesAllocator)]; void *ptr; } debug_pages_allocator_space; LowLevelAlloc::PagesAllocator *LowLevelAlloc::GetDefaultPagesAllocator(void) { if (default_pages_allocator) { return default_pages_allocator; } default_pages_allocator = new (debug_pages_allocator_space.chars) DefaultPagesAllocator(); return default_pages_allocator; } void *DefaultPagesAllocator::MapPages(int32 flags, size_t size) { void *new_pages; if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { new_pages = MallocHook::UnhookedMMap(0, size, PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); } else { new_pages = mmap(0, size, PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); } RAW_CHECK(new_pages != MAP_FAILED, "mmap error"); return new_pages; } void DefaultPagesAllocator::UnMapPages(int32 flags, void *region, size_t size) { int munmap_result; if ((flags & LowLevelAlloc::kAsyncSignalSafe) == 0) { munmap_result = munmap(region, size); } else { munmap_result = MallocHook::UnhookedMUnmap(region, size); } RAW_CHECK(munmap_result == 0, "LowLevelAlloc::DeleteArena: munmap failed address"); }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/logging.cc
.cc
3,801
109
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2007, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // This file just provides storage for FLAGS_verbose. #include <config.h> #include "base/logging.h" #include "base/commandlineflags.h" DEFINE_int32(verbose, EnvToInt("PERFTOOLS_VERBOSE", 0), "Set to numbers >0 for more verbose output, or <0 for less. " "--verbose == -4 means we log fatal errors only."); #if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) // While windows does have a POSIX-compatible API // (_open/_write/_close), it acquires memory. Using this lower-level // windows API is the closest we can get to being "raw". RawFD RawOpenForWriting(const char* filename) { // CreateFile allocates memory if file_name isn't absolute, so if // that ever becomes a problem then we ought to compute the absolute // path on its behalf (perhaps the ntdll/kernel function isn't aware // of the working directory?) RawFD fd = CreateFileA(filename, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, 0, NULL); if (fd != kIllegalRawFD && GetLastError() == ERROR_ALREADY_EXISTS) SetEndOfFile(fd); // truncate the existing file return fd; } void RawWrite(RawFD handle, const char* buf, size_t len) { while (len > 0) { DWORD wrote; BOOL ok = WriteFile(handle, buf, len, &wrote, NULL); // We do not use an asynchronous file handle, so ok==false means an error if (!ok) break; buf += wrote; len -= wrote; } } void RawClose(RawFD handle) { CloseHandle(handle); } #else // _WIN32 || __CYGWIN__ || __CYGWIN32__ #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #ifdef HAVE_FCNTL_H #include <fcntl.h> #endif // Re-run fn until it doesn't cause EINTR. #define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR) RawFD RawOpenForWriting(const char* filename) { return open(filename, O_WRONLY|O_CREAT|O_TRUNC, 0664); } void RawWrite(RawFD fd, const char* buf, size_t len) { while (len > 0) { ssize_t r; NO_INTR(r = write(fd, buf, len)); if (r <= 0) break; buf += r; len -= r; } } void RawClose(RawFD fd) { NO_INTR(close(fd)); } #endif // _WIN32 || __CYGWIN__ || __CYGWIN32__
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/basictypes.h
.h
16,983
437
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef _BASICTYPES_H_ #define _BASICTYPES_H_ #include <config.h> #include <string.h> // for memcpy() #ifdef HAVE_INTTYPES_H #include <inttypes.h> // gets us PRId64, etc #endif // To use this in an autoconf setting, make sure you run the following // autoconf macros: // AC_HEADER_STDC /* for stdint_h and inttypes_h */ // AC_CHECK_TYPES([__int64]) /* defined in some windows platforms */ #ifdef HAVE_INTTYPES_H #include <inttypes.h> // uint16_t might be here; PRId64 too. #endif #ifdef HAVE_STDINT_H #include <stdint.h> // to get uint16_t (ISO naming madness) #endif #include <sys/types.h> // our last best hope for uint16_t // Standard typedefs // All Google code is compiled with -funsigned-char to make "char" // unsigned. Google code therefore doesn't need a "uchar" type. // TODO(csilvers): how do we make sure unsigned-char works on non-gcc systems? typedef signed char schar; typedef int8_t int8; typedef int16_t int16; typedef int32_t int32; typedef int64_t int64; // NOTE: unsigned types are DANGEROUS in loops and other arithmetical // places. Use the signed types unless your variable represents a bit // pattern (eg a hash value) or you really need the extra bit. Do NOT // use 'unsigned' to express "this value should always be positive"; // use assertions for this. typedef uint8_t uint8; typedef uint16_t uint16; typedef uint32_t uint32; typedef uint64_t uint64; const uint16 kuint16max = ( (uint16) 0xFFFF); const uint32 kuint32max = ( (uint32) 0xFFFFFFFF); const uint64 kuint64max = ( (((uint64) kuint32max) << 32) | kuint32max ); const int8 kint8max = ( ( int8) 0x7F); const int16 kint16max = ( ( int16) 0x7FFF); const int32 kint32max = ( ( int32) 0x7FFFFFFF); const int64 kint64max = ( ((( int64) kint32max) << 32) | kuint32max ); const int8 kint8min = ( ( int8) 0x80); const int16 kint16min = ( ( int16) 0x8000); const int32 kint32min = ( ( int32) 0x80000000); const int64 kint64min = ( (((uint64) kint32min) << 32) | 0 ); // Define the "portable" printf and scanf macros, if they're not // already there (via the inttypes.h we #included above, hopefully). // Mostly it's old systems that don't support inttypes.h, so we assume // they're 32 bit. #ifndef PRIx64 #define PRIx64 "llx" #endif #ifndef SCNx64 #define SCNx64 "llx" #endif #ifndef PRId64 #define PRId64 "lld" #endif #ifndef SCNd64 #define SCNd64 "lld" #endif #ifndef PRIu64 #define PRIu64 "llu" #endif #ifndef PRIxPTR #define PRIxPTR "lx" #endif // Also allow for printing of a pthread_t. #define GPRIuPTHREAD "lu" #define GPRIxPTHREAD "lx" #if defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__APPLE__) || defined(__FreeBSD__) #define PRINTABLE_PTHREAD(pthreadt) reinterpret_cast<uintptr_t>(pthreadt) #else #define PRINTABLE_PTHREAD(pthreadt) pthreadt #endif #if defined(__GNUC__) #define PREDICT_TRUE(x) __builtin_expect(!!(x), 1) #define PREDICT_FALSE(x) __builtin_expect(!!(x), 0) #else #define PREDICT_TRUE(x) (x) #define PREDICT_FALSE(x) (x) #endif // A macro to disallow the evil copy constructor and operator= functions // This should be used in the private: declarations for a class #define DISALLOW_EVIL_CONSTRUCTORS(TypeName) \ TypeName(const TypeName&); \ void operator=(const TypeName&) // An alternate name that leaves out the moral judgment... :-) #define DISALLOW_COPY_AND_ASSIGN(TypeName) DISALLOW_EVIL_CONSTRUCTORS(TypeName) // The COMPILE_ASSERT macro can be used to verify that a compile time // expression is true. For example, you could use it to verify the // size of a static array: // // COMPILE_ASSERT(sizeof(num_content_type_names) == sizeof(int), // content_type_names_incorrect_size); // // or to make sure a struct is smaller than a certain size: // // COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large); // // The second argument to the macro is the name of the variable. If // the expression is false, most compilers will issue a warning/error // containing the name of the variable. // // Implementation details of COMPILE_ASSERT: // // - COMPILE_ASSERT works by defining an array type that has -1 // elements (and thus is invalid) when the expression is false. // // - The simpler definition // // #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1] // // does not work, as gcc supports variable-length arrays whose sizes // are determined at run-time (this is gcc's extension and not part // of the C++ standard). As a result, gcc fails to reject the // following code with the simple definition: // // int foo; // COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is // // not a compile-time constant. // // - By using the type CompileAssert<(bool(expr))>, we ensures that // expr is a compile-time constant. (Template arguments must be // determined at compile-time.) // // - The outter parentheses in CompileAssert<(bool(expr))> are necessary // to work around a bug in gcc 3.4.4 and 4.0.1. If we had written // // CompileAssert<bool(expr)> // // instead, these compilers will refuse to compile // // COMPILE_ASSERT(5 > 0, some_message); // // (They seem to think the ">" in "5 > 0" marks the end of the // template argument list.) // // - The array size is (bool(expr) ? 1 : -1), instead of simply // // ((expr) ? 1 : -1). // // This is to avoid running into a bug in MS VC 7.1, which // causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1. template <bool> struct CompileAssert { }; #ifdef HAVE___ATTRIBUTE__ # define ATTRIBUTE_UNUSED __attribute__((unused)) #else # define ATTRIBUTE_UNUSED #endif #if defined(HAVE___ATTRIBUTE__) && defined(HAVE_TLS) #define ATTR_INITIAL_EXEC __attribute__ ((tls_model ("initial-exec"))) #else #define ATTR_INITIAL_EXEC #endif #define COMPILE_ASSERT(expr, msg) \ typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] ATTRIBUTE_UNUSED #define arraysize(a) (sizeof(a) / sizeof(*(a))) #define OFFSETOF_MEMBER(strct, field) \ (reinterpret_cast<char*>(&reinterpret_cast<strct*>(16)->field) - \ reinterpret_cast<char*>(16)) // bit_cast<Dest,Source> implements the equivalent of // "*reinterpret_cast<Dest*>(&source)". // // The reinterpret_cast method would produce undefined behavior // according to ISO C++ specification section 3.10 -15 -. // bit_cast<> calls memcpy() which is blessed by the standard, // especially by the example in section 3.9. // // Fortunately memcpy() is very fast. In optimized mode, with a // constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline // code with the minimal amount of data movement. On a 32-bit system, // memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8) // compiles to two loads and two stores. template <class Dest, class Source> inline Dest bit_cast(const Source& source) { COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), bitcasting_unequal_sizes); Dest dest; memcpy(&dest, &source, sizeof(dest)); return dest; } // bit_store<Dest,Source> implements the equivalent of // "dest = *reinterpret_cast<Dest*>(&source)". // // This prevents undefined behavior when the dest pointer is unaligned. template <class Dest, class Source> inline void bit_store(Dest *dest, const Source *source) { COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), bitcasting_unequal_sizes); memcpy(dest, source, sizeof(Dest)); } #ifdef HAVE___ATTRIBUTE__ # define ATTRIBUTE_WEAK __attribute__((weak)) # define ATTRIBUTE_NOINLINE __attribute__((noinline)) #else # define ATTRIBUTE_WEAK # define ATTRIBUTE_NOINLINE #endif #if defined(HAVE___ATTRIBUTE__) && defined(__ELF__) # define ATTRIBUTE_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) #else # define ATTRIBUTE_VISIBILITY_HIDDEN #endif // Section attributes are supported for both ELF and Mach-O, but in // very different ways. Here's the API we provide: // 1) ATTRIBUTE_SECTION: put this with the declaration of all functions // you want to be in the same linker section // 2) DEFINE_ATTRIBUTE_SECTION_VARS: must be called once per unique // name. You want to make sure this is executed before any // DECLARE_ATTRIBUTE_SECTION_VARS; the easiest way is to put them // in the same .cc file. Put this call at the global level. // 3) INIT_ATTRIBUTE_SECTION_VARS: you can scatter calls to this in // multiple places to help ensure execution before any // DECLARE_ATTRIBUTE_SECTION_VARS. You must have at least one // DEFINE, but you can have many INITs. Put each in its own scope. // 4) DECLARE_ATTRIBUTE_SECTION_VARS: must be called before using // ATTRIBUTE_SECTION_START or ATTRIBUTE_SECTION_STOP on a name. // Put this call at the global level. // 5) ATTRIBUTE_SECTION_START/ATTRIBUTE_SECTION_STOP: call this to say // where in memory a given section is. All functions declared with // ATTRIBUTE_SECTION are guaranteed to be between START and STOP. #if defined(HAVE___ATTRIBUTE__) && defined(__ELF__) # define ATTRIBUTE_SECTION(name) __attribute__ ((section (#name))) __attribute__((noinline)) // Weak section declaration to be used as a global declaration // for ATTRIBUTE_SECTION_START|STOP(name) to compile and link // even without functions with ATTRIBUTE_SECTION(name). # define DECLARE_ATTRIBUTE_SECTION_VARS(name) \ extern char __start_##name[] ATTRIBUTE_WEAK; \ extern char __stop_##name[] ATTRIBUTE_WEAK # define INIT_ATTRIBUTE_SECTION_VARS(name) // no-op for ELF # define DEFINE_ATTRIBUTE_SECTION_VARS(name) // no-op for ELF // Return void* pointers to start/end of a section of code with functions // having ATTRIBUTE_SECTION(name), or 0 if no such function exists. // One must DECLARE_ATTRIBUTE_SECTION(name) for this to compile and link. # define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(__start_##name)) # define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(__stop_##name)) # define HAVE_ATTRIBUTE_SECTION_START 1 #elif defined(HAVE___ATTRIBUTE__) && defined(__MACH__) # define ATTRIBUTE_SECTION(name) __attribute__ ((section ("__TEXT, " #name))) #include <mach-o/getsect.h> #include <mach-o/dyld.h> class AssignAttributeStartEnd { public: AssignAttributeStartEnd(const char* name, char** pstart, char** pend) { // Find out what dynamic library name is defined in if (_dyld_present()) { for (int i = _dyld_image_count() - 1; i >= 0; --i) { const mach_header* hdr = _dyld_get_image_header(i); #ifdef MH_MAGIC_64 if (hdr->magic == MH_MAGIC_64) { uint64_t len; *pstart = getsectdatafromheader_64((mach_header_64*)hdr, "__TEXT", name, &len); if (*pstart) { // NULL if not defined in this dynamic library *pstart += _dyld_get_image_vmaddr_slide(i); // correct for reloc *pend = *pstart + len; return; } } #endif if (hdr->magic == MH_MAGIC) { uint32_t len; *pstart = getsectdatafromheader(hdr, "__TEXT", name, &len); if (*pstart) { // NULL if not defined in this dynamic library *pstart += _dyld_get_image_vmaddr_slide(i); // correct for reloc *pend = *pstart + len; return; } } } } // If we get here, not defined in a dll at all. See if defined statically. unsigned long len; // don't ask me why this type isn't uint32_t too... *pstart = getsectdata("__TEXT", name, &len); *pend = *pstart + len; } }; #define DECLARE_ATTRIBUTE_SECTION_VARS(name) \ extern char* __start_##name; \ extern char* __stop_##name #define INIT_ATTRIBUTE_SECTION_VARS(name) \ DECLARE_ATTRIBUTE_SECTION_VARS(name); \ static const AssignAttributeStartEnd __assign_##name( \ #name, &__start_##name, &__stop_##name) #define DEFINE_ATTRIBUTE_SECTION_VARS(name) \ char* __start_##name, *__stop_##name; \ INIT_ATTRIBUTE_SECTION_VARS(name) # define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(__start_##name)) # define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(__stop_##name)) # define HAVE_ATTRIBUTE_SECTION_START 1 #else // not HAVE___ATTRIBUTE__ && __ELF__, nor HAVE___ATTRIBUTE__ && __MACH__ # define ATTRIBUTE_SECTION(name) # define DECLARE_ATTRIBUTE_SECTION_VARS(name) # define INIT_ATTRIBUTE_SECTION_VARS(name) # define DEFINE_ATTRIBUTE_SECTION_VARS(name) # define ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void*>(0)) # define ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void*>(0)) #endif // HAVE___ATTRIBUTE__ and __ELF__ or __MACH__ #if defined(HAVE___ATTRIBUTE__) # if (defined(__i386__) || defined(__x86_64__)) # define CACHELINE_ALIGNED __attribute__((aligned(64))) # elif (defined(__PPC__) || defined(__PPC64__)) # define CACHELINE_ALIGNED __attribute__((aligned(16))) # elif (defined(__arm__)) # define CACHELINE_ALIGNED __attribute__((aligned(64))) // some ARMs have shorter cache lines (ARM1176JZF-S is 32 bytes for example) but obviously 64-byte aligned implies 32-byte aligned # elif (defined(__mips__)) # define CACHELINE_ALIGNED __attribute__((aligned(128))) # elif (defined(__aarch64__)) # define CACHELINE_ALIGNED __attribute__((aligned(64))) // implementation specific, Cortex-A53 and 57 should have 64 bytes # elif (defined(__s390__)) # define CACHELINE_ALIGNED __attribute__((aligned(256))) # else # error Could not determine cache line length - unknown architecture # endif #else # define CACHELINE_ALIGNED #endif // defined(HAVE___ATTRIBUTE__) #if defined(HAVE___ATTRIBUTE__ALIGNED_FN) # define CACHELINE_ALIGNED_FN CACHELINE_ALIGNED #else # define CACHELINE_ALIGNED_FN #endif // Structure for discovering alignment union MemoryAligner { void* p; double d; size_t s; } CACHELINE_ALIGNED; #if defined(HAVE___ATTRIBUTE__) && defined(__ELF__) #define ATTRIBUTE_HIDDEN __attribute__((visibility("hidden"))) #else #define ATTRIBUTE_HIDDEN #endif #if defined(__GNUC__) #define ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline)) #elif defined(_MSC_VER) #define ATTRIBUTE_ALWAYS_INLINE __forceinline #else #define ATTRIBUTE_ALWAYS_INLINE #endif // The following enum should be used only as a constructor argument to indicate // that the variable has static storage class, and that the constructor should // do nothing to its state. It indicates to the reader that it is legal to // declare a static nistance of the class, provided the constructor is given // the base::LINKER_INITIALIZED argument. Normally, it is unsafe to declare a // static variable that has a constructor or a destructor because invocation // order is undefined. However, IF the type can be initialized by filling with // zeroes (which the loader does for static variables), AND the destructor also // does nothing to the storage, then a constructor declared as // explicit MyClass(base::LinkerInitialized x) {} // and invoked as // static MyClass my_variable_name(base::LINKER_INITIALIZED); namespace base { enum LinkerInitialized { LINKER_INITIALIZED }; } #endif // _BASICTYPES_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/vdso_support.h
.h
5,147
137
// Copyright (c) 2008, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Paul Pluzhnikov // // Allow dynamic symbol lookup in the kernel VDSO page. // // VDSO stands for "Virtual Dynamic Shared Object" -- a page of // executable code, which looks like a shared library, but doesn't // necessarily exist anywhere on disk, and which gets mmap()ed into // every process by kernels which support VDSO, such as 2.6.x for 32-bit // executables, and 2.6.24 and above for 64-bit executables. // // More details could be found here: // http://www.trilithium.com/johan/2005/08/linux-gate/ // // VDSOSupport -- a class representing kernel VDSO (if present). // // Example usage: // VDSOSupport vdso; // VDSOSupport::SymbolInfo info; // typedef (*FN)(unsigned *, void *, void *); // FN fn = NULL; // if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) { // fn = reinterpret_cast<FN>(info.address); // } #ifndef BASE_VDSO_SUPPORT_H_ #define BASE_VDSO_SUPPORT_H_ #include <config.h> #include "base/basictypes.h" #include "base/elf_mem_image.h" #ifdef HAVE_ELF_MEM_IMAGE // Enable VDSO support only for the architectures/operating systems that // support it. #if defined(__linux__) && (defined(__i386__) || defined(__PPC__)) #define HAVE_VDSO_SUPPORT 1 #endif #include <stdlib.h> // for NULL namespace base { // NOTE: this class may be used from within tcmalloc, and can not // use any memory allocation routines. class VDSOSupport { public: VDSOSupport(); typedef ElfMemImage::SymbolInfo SymbolInfo; typedef ElfMemImage::SymbolIterator SymbolIterator; // Answers whether we have a vdso at all. bool IsPresent() const { return image_.IsPresent(); } // Allow to iterate over all VDSO symbols. SymbolIterator begin() const { return image_.begin(); } SymbolIterator end() const { return image_.end(); } // Look up versioned dynamic symbol in the kernel VDSO. // Returns false if VDSO is not present, or doesn't contain given // symbol/version/type combination. // If info_out != NULL, additional details are filled in. bool LookupSymbol(const char *name, const char *version, int symbol_type, SymbolInfo *info_out) const; // Find info about symbol (if any) which overlaps given address. // Returns true if symbol was found; false if VDSO isn't present // or doesn't have a symbol overlapping given address. // If info_out != NULL, additional details are filled in. bool LookupSymbolByAddress(const void *address, SymbolInfo *info_out) const; // Used only for testing. Replace real VDSO base with a mock. // Returns previous value of vdso_base_. After you are done testing, // you are expected to call SetBase() with previous value, in order to // reset state to the way it was. const void *SetBase(const void *s); // Computes vdso_base_ and returns it. Should be called as early as // possible; before any thread creation, chroot or setuid. static const void *Init(); private: // image_ represents VDSO ELF image in memory. // image_.ehdr_ == NULL implies there is no VDSO. ElfMemImage image_; // Cached value of auxv AT_SYSINFO_EHDR, computed once. // This is a tri-state: // kInvalidBase => value hasn't been determined yet. // 0 => there is no VDSO. // else => vma of VDSO Elf{32,64}_Ehdr. // // When testing with mock VDSO, low bit is set. // The low bit is always available because vdso_base_ is // page-aligned. static const void *vdso_base_; DISALLOW_COPY_AND_ASSIGN(VDSOSupport); }; } // namespace base #endif // HAVE_ELF_MEM_IMAGE #endif // BASE_VDSO_SUPPORT_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/atomicops-internals-windows.h
.h
17,090
458
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2006, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Sanjay Ghemawat */ // Implementation of atomic operations using Windows API // functions. This file should not be included directly. Clients // should instead include "base/atomicops.h". #ifndef BASE_ATOMICOPS_INTERNALS_WINDOWS_H_ #define BASE_ATOMICOPS_INTERNALS_WINDOWS_H_ #include <stdio.h> #include <stdlib.h> #include "base/basictypes.h" // For COMPILE_ASSERT typedef int32 Atomic32; #if defined(_WIN64) #define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic* #endif namespace base { namespace subtle { typedef int64 Atomic64; // 32-bit low-level operations on any platform extern "C" { // We use windows intrinsics when we can (they seem to be supported // well on MSVC 8.0 and above). Unfortunately, in some // environments, <windows.h> and <intrin.h> have conflicting // declarations of some other intrinsics, breaking compilation: // http://connect.microsoft.com/VisualStudio/feedback/details/262047 // Therefore, we simply declare the relevant intrinsics ourself. // MinGW has a bug in the header files where it doesn't indicate the // first argument is volatile -- they're not up to date. See // http://readlist.com/lists/lists.sourceforge.net/mingw-users/0/3861.html // We have to const_cast away the volatile to avoid compiler warnings. // TODO(csilvers): remove this once MinGW has updated MinGW/include/winbase.h #if defined(__MINGW32__) inline LONG FastInterlockedCompareExchange(volatile LONG* ptr, LONG newval, LONG oldval) { return ::InterlockedCompareExchange(const_cast<LONG*>(ptr), newval, oldval); } inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) { return ::InterlockedExchange(const_cast<LONG*>(ptr), newval); } inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) { return ::InterlockedExchangeAdd(const_cast<LONG*>(ptr), increment); } #elif _MSC_VER >= 1400 // intrinsics didn't work so well before MSVC 8.0 // Unfortunately, in some environments, <windows.h> and <intrin.h> // have conflicting declarations of some intrinsics, breaking // compilation. So we declare the intrinsics we need ourselves. See // http://connect.microsoft.com/VisualStudio/feedback/details/262047 LONG _InterlockedCompareExchange(volatile LONG* ptr, LONG newval, LONG oldval); #pragma intrinsic(_InterlockedCompareExchange) inline LONG FastInterlockedCompareExchange(volatile LONG* ptr, LONG newval, LONG oldval) { return _InterlockedCompareExchange(ptr, newval, oldval); } LONG _InterlockedExchange(volatile LONG* ptr, LONG newval); #pragma intrinsic(_InterlockedExchange) inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) { return _InterlockedExchange(ptr, newval); } LONG _InterlockedExchangeAdd(volatile LONG* ptr, LONG increment); #pragma intrinsic(_InterlockedExchangeAdd) inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) { return _InterlockedExchangeAdd(ptr, increment); } #else inline LONG FastInterlockedCompareExchange(volatile LONG* ptr, LONG newval, LONG oldval) { return ::InterlockedCompareExchange(ptr, newval, oldval); } inline LONG FastInterlockedExchange(volatile LONG* ptr, LONG newval) { return ::InterlockedExchange(ptr, newval); } inline LONG FastInterlockedExchangeAdd(volatile LONG* ptr, LONG increment) { return ::InterlockedExchangeAdd(ptr, increment); } #endif // ifdef __MINGW32__ } // extern "C" inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { LONG result = FastInterlockedCompareExchange( reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value), static_cast<LONG>(old_value)); return static_cast<Atomic32>(result); } inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { LONG result = FastInterlockedExchange( reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value)); return static_cast<Atomic32>(result); } inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { // FastInterlockedExchange has both acquire and release memory barriers. return NoBarrier_AtomicExchange(ptr, new_value); } inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { // FastInterlockedExchange has both acquire and release memory barriers. return NoBarrier_AtomicExchange(ptr, new_value); } } // namespace base::subtle } // namespace base // In msvc8/vs2005, winnt.h already contains a definition for // MemoryBarrier in the global namespace. Add it there for earlier // versions and forward to it from within the namespace. #if !(defined(_MSC_VER) && _MSC_VER >= 1400) inline void MemoryBarrier() { Atomic32 value = 0; base::subtle::NoBarrier_AtomicExchange(&value, 0); // actually acts as a barrier in thisd implementation } #endif namespace base { namespace subtle { inline void MemoryBarrier() { ::MemoryBarrier(); } inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { Acquire_AtomicExchange(ptr, value); } inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; // works w/o barrier for current Intel chips as of June 2005 // See comments in Atomic64 version of Release_Store() below. } inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { Atomic32 value = *ptr; return value; } inline Atomic32 Release_Load(volatile const Atomic32* ptr) { MemoryBarrier(); return *ptr; } // 64-bit operations #if defined(_WIN64) || defined(__MINGW64__) // 64-bit low-level operations on 64-bit platform. COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic); // These are the intrinsics needed for 64-bit operations. Similar to the // 32-bit case above. extern "C" { #if defined(__MINGW64__) inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr, PVOID newval, PVOID oldval) { return ::InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr), newval, oldval); } inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) { return ::InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval); } inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr, LONGLONG increment) { return ::InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment); } #elif _MSC_VER >= 1400 // intrinsics didn't work so well before MSVC 8.0 // Like above, we need to declare the intrinsics ourselves. PVOID _InterlockedCompareExchangePointer(volatile PVOID* ptr, PVOID newval, PVOID oldval); #pragma intrinsic(_InterlockedCompareExchangePointer) inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr, PVOID newval, PVOID oldval) { return _InterlockedCompareExchangePointer(const_cast<PVOID*>(ptr), newval, oldval); } PVOID _InterlockedExchangePointer(volatile PVOID* ptr, PVOID newval); #pragma intrinsic(_InterlockedExchangePointer) inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) { return _InterlockedExchangePointer(const_cast<PVOID*>(ptr), newval); } LONGLONG _InterlockedExchangeAdd64(volatile LONGLONG* ptr, LONGLONG increment); #pragma intrinsic(_InterlockedExchangeAdd64) inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr, LONGLONG increment) { return _InterlockedExchangeAdd64(const_cast<LONGLONG*>(ptr), increment); } #else inline PVOID FastInterlockedCompareExchangePointer(volatile PVOID* ptr, PVOID newval, PVOID oldval) { return ::InterlockedCompareExchangePointer(ptr, newval, oldval); } inline PVOID FastInterlockedExchangePointer(volatile PVOID* ptr, PVOID newval) { return ::InterlockedExchangePointer(ptr, newval); } inline LONGLONG FastInterlockedExchangeAdd64(volatile LONGLONG* ptr, LONGLONG increment) { return ::InterlockedExchangeAdd64(ptr, increment); } #endif // ifdef __MINGW64__ } // extern "C" inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { PVOID result = FastInterlockedCompareExchangePointer( reinterpret_cast<volatile PVOID*>(ptr), reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); return reinterpret_cast<Atomic64>(result); } inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { PVOID result = FastInterlockedExchangePointer( reinterpret_cast<volatile PVOID*>(ptr), reinterpret_cast<PVOID>(new_value)); return reinterpret_cast<Atomic64>(result); } inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; } inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { NoBarrier_AtomicExchange(ptr, value); // acts as a barrier in this implementation } inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; // works w/o barrier for current Intel chips as of June 2005 // When new chips come out, check: // IA-32 Intel Architecture Software Developer's Manual, Volume 3: // System Programming Guide, Chatper 7: Multiple-processor management, // Section 7.2, Memory Ordering. // Last seen at: // http://developer.intel.com/design/pentium4/manuals/index_new.htm } inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; } inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { Atomic64 value = *ptr; return value; } inline Atomic64 Release_Load(volatile const Atomic64* ptr) { MemoryBarrier(); return *ptr; } #else // defined(_WIN64) || defined(__MINGW64__) // 64-bit low-level operations on 32-bit platform // TODO(vchen): The GNU assembly below must be converted to MSVC inline // assembly. Then the file should be renamed to ...-x86-msvc.h, probably. inline void NotImplementedFatalError(const char *function_name) { fprintf(stderr, "64-bit %s() not implemented on this platform\n", function_name); abort(); } inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { #if 0 // Not implemented Atomic64 prev; __asm__ __volatile__("movl (%3), %%ebx\n\t" // Move 64-bit new_value into "movl 4(%3), %%ecx\n\t" // ecx:ebx "lock; cmpxchg8b %1\n\t" // If edx:eax (old_value) same : "=A" (prev) // as contents of ptr: : "m" (*ptr), // ecx:ebx => ptr "0" (old_value), // else: "r" (&new_value) // old *ptr => edx:eax : "memory", "%ebx", "%ecx"); return prev; #else NotImplementedFatalError("NoBarrier_CompareAndSwap"); return 0; #endif } inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { #if 0 // Not implemented __asm__ __volatile__( "movl (%2), %%ebx\n\t" // Move 64-bit new_value into "movl 4(%2), %%ecx\n\t" // ecx:ebx "0:\n\t" "movl %1, %%eax\n\t" // Read contents of ptr into "movl 4%1, %%edx\n\t" // edx:eax "lock; cmpxchg8b %1\n\t" // Attempt cmpxchg; if *ptr "jnz 0b\n\t" // is no longer edx:eax, loop : "=A" (new_value) : "m" (*ptr), "r" (&new_value) : "memory", "%ebx", "%ecx"); return new_value; // Now it's the previous value. #else NotImplementedFatalError("NoBarrier_AtomicExchange"); return 0; #endif } inline void NoBarrier_Store(volatile Atomic64* ptrValue, Atomic64 value) { __asm { movq mm0, value; // Use mmx reg for 64-bit atomic moves mov eax, ptrValue; movq [eax], mm0; emms; // Empty mmx state to enable FP registers } } inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { NoBarrier_AtomicExchange(ptr, value); // acts as a barrier in this implementation } inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { NoBarrier_Store(ptr, value); } inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptrValue) { Atomic64 value; __asm { mov eax, ptrValue; movq mm0, [eax]; // Use mmx reg for 64-bit atomic moves movq value, mm0; emms; // Empty mmx state to enable FP registers } return value; } inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { Atomic64 value = NoBarrier_Load(ptr); return value; } inline Atomic64 Release_Load(volatile const Atomic64* ptr) { MemoryBarrier(); return NoBarrier_Load(ptr); } #endif // defined(_WIN64) || defined(__MINGW64__) inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { // FastInterlockedExchange has both acquire and release memory barriers. return NoBarrier_AtomicExchange(ptr, new_value); } inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { // FastInterlockedExchange has both acquire and release memory barriers. return NoBarrier_AtomicExchange(ptr, new_value); } inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } } // namespace base::subtle } // namespace base #endif // BASE_ATOMICOPS_INTERNALS_WINDOWS_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/sysinfo.h
.h
10,330
233
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2006, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // All functions here are thread-hostile due to file caching unless // commented otherwise. #ifndef _SYSINFO_H_ #define _SYSINFO_H_ #include <config.h> #include <time.h> #if (defined(_WIN32) || defined(__MINGW32__)) && (!defined(__CYGWIN__) && !defined(__CYGWIN32__)) #include <windows.h> // for DWORD #include <tlhelp32.h> // for CreateToolhelp32Snapshot #endif #ifdef HAVE_UNISTD_H #include <unistd.h> // for pid_t #endif #include <stddef.h> // for size_t #include <limits.h> // for PATH_MAX #include "base/basictypes.h" #include "base/logging.h" // for RawFD // This getenv function is safe to call before the C runtime is initialized. // On Windows, it utilizes GetEnvironmentVariable() and on unix it uses // /proc/self/environ instead calling getenv(). It's intended to be used in // routines that run before main(), when the state required for getenv() may // not be set up yet. In particular, errno isn't set up until relatively late // (after the pthreads library has a chance to make it threadsafe), and // getenv() doesn't work until then. // On some platforms, this call will utilize the same, static buffer for // repeated GetenvBeforeMain() calls. Callers should not expect pointers from // this routine to be long lived. // Note that on unix, /proc only has the environment at the time the // application was started, so this routine ignores setenv() calls/etc. Also // note it only reads the first 16K of the environment. extern const char* GetenvBeforeMain(const char* name); // This takes as an argument an environment-variable name (like // CPUPROFILE) whose value is supposed to be a file-path, and sets // path to that path, and returns true. Non-trivial for surprising // reasons, as documented in sysinfo.cc. path must have space PATH_MAX. extern bool GetUniquePathFromEnv(const char* env_name, char* path); extern int GetSystemCPUsCount(); void SleepForMilliseconds(int milliseconds); // Return true if we're running POSIX (e.g., NPTL on Linux) threads, // as opposed to a non-POSIX thread library. The thing that we care // about is whether a thread's pid is the same as the thread that // spawned it. If so, this function returns true. // Thread-safe. // Note: We consider false negatives to be OK. bool HasPosixThreads(); #ifndef SWIG // SWIG doesn't like struct Buffer and variable arguments. // A ProcMapsIterator abstracts access to /proc/maps for a given // process. Needs to be stack-allocatable and avoid using stdio/malloc // so it can be used in the google stack dumper, heap-profiler, etc. // // On Windows and Mac OS X, this iterator iterates *only* over DLLs // mapped into this process space. For Linux, FreeBSD, and Solaris, // it iterates over *all* mapped memory regions, including anonymous // mmaps. For other O/Ss, it is unlikely to work at all, and Valid() // will always return false. Also note: this routine only works on // FreeBSD if procfs is mounted: make sure this is in your /etc/fstab: // proc /proc procfs rw 0 0 class ProcMapsIterator { public: struct Buffer { #ifdef __FreeBSD__ // FreeBSD requires us to read all of the maps file at once, so // we have to make a buffer that's "always" big enough static const size_t kBufSize = 102400; #else // a one-line buffer is good enough static const size_t kBufSize = PATH_MAX + 1024; #endif char buf_[kBufSize]; }; // Create a new iterator for the specified pid. pid can be 0 for "self". explicit ProcMapsIterator(pid_t pid); // Create an iterator with specified storage (for use in signal // handler). "buffer" should point to a ProcMapsIterator::Buffer // buffer can be NULL in which case a bufer will be allocated. ProcMapsIterator(pid_t pid, Buffer *buffer); // Iterate through maps_backing instead of maps if use_maps_backing // is true. Otherwise the same as above. buffer can be NULL and // it will allocate a buffer itself. ProcMapsIterator(pid_t pid, Buffer *buffer, bool use_maps_backing); // Returns true if the iterator successfully initialized; bool Valid() const; // Returns a pointer to the most recently parsed line. Only valid // after Next() returns true, and until the iterator is destroyed or // Next() is called again. This may give strange results on non-Linux // systems. Prefer FormatLine() if that may be a concern. const char *CurrentLine() const { return stext_; } // Writes the "canonical" form of the /proc/xxx/maps info for a single // line to the passed-in buffer. Returns the number of bytes written, // or 0 if it was not able to write the complete line. (To guarantee // success, buffer should have size at least Buffer::kBufSize.) // Takes as arguments values set via a call to Next(). The // "canonical" form of the line (taken from linux's /proc/xxx/maps): // <start_addr(hex)>-<end_addr(hex)> <perms(rwxp)> <offset(hex)> + // <major_dev(hex)>:<minor_dev(hex)> <inode> <filename> Note: the // eg // 08048000-0804c000 r-xp 00000000 03:01 3793678 /bin/cat // If you don't have the dev_t (dev), feel free to pass in 0. // (Next() doesn't return a dev_t, though NextExt does.) // // Note: if filename and flags were obtained via a call to Next(), // then the output of this function is only valid if Next() returned // true, and only until the iterator is destroyed or Next() is // called again. (Since filename, at least, points into CurrentLine.) static int FormatLine(char* buffer, int bufsize, uint64 start, uint64 end, const char *flags, uint64 offset, int64 inode, const char *filename, dev_t dev); // Find the next entry in /proc/maps; return true if found or false // if at the end of the file. // // Any of the result pointers can be NULL if you're not interested // in those values. // // If "flags" and "filename" are passed, they end up pointing to // storage within the ProcMapsIterator that is valid only until the // iterator is destroyed or Next() is called again. The caller may // modify the contents of these strings (up as far as the first NUL, // and only until the subsequent call to Next()) if desired. // The offsets are all uint64 in order to handle the case of a // 32-bit process running on a 64-bit kernel // // IMPORTANT NOTE: see top-of-class notes for details about what // mapped regions Next() iterates over, depending on O/S. // TODO(csilvers): make flags and filename const. bool Next(uint64 *start, uint64 *end, char **flags, uint64 *offset, int64 *inode, char **filename); bool NextExt(uint64 *start, uint64 *end, char **flags, uint64 *offset, int64 *inode, char **filename, uint64 *file_mapping, uint64 *file_pages, uint64 *anon_mapping, uint64 *anon_pages, dev_t *dev); ~ProcMapsIterator(); private: void Init(pid_t pid, Buffer *buffer, bool use_maps_backing); char *ibuf_; // input buffer char *stext_; // start of text char *etext_; // end of text char *nextline_; // start of next line char *ebuf_; // end of buffer (1 char for a nul) #if (defined(_WIN32) || defined(__MINGW32__)) && (!defined(__CYGWIN__) && !defined(__CYGWIN32__)) HANDLE snapshot_; // filehandle on dll info // In a change from the usual W-A pattern, there is no A variant of // MODULEENTRY32. Tlhelp32.h #defines the W variant, but not the A. // We want the original A variants, and this #undef is the only // way I see to get them. Redefining it when we're done prevents us // from affecting other .cc files. # ifdef MODULEENTRY32 // Alias of W # undef MODULEENTRY32 MODULEENTRY32 module_; // info about current dll (and dll iterator) # define MODULEENTRY32 MODULEENTRY32W # else // It's the ascii, the one we want. MODULEENTRY32 module_; // info about current dll (and dll iterator) # endif #elif defined(__MACH__) int current_image_; // dll's are called "images" in macos parlance int current_load_cmd_; // the segment of this dll we're examining #elif defined(__sun__) // Solaris int fd_; char current_filename_[PATH_MAX]; #else int fd_; // filehandle on /proc/*/maps #endif pid_t pid_; char flags_[10]; Buffer* dynamic_buffer_; // dynamically-allocated Buffer bool using_maps_backing_; // true if we are looking at maps_backing instead of maps. }; #endif /* #ifndef SWIG */ // Helper routines namespace tcmalloc { int FillProcSelfMaps(char buf[], int size, bool* wrote_all); void DumpProcSelfMaps(RawFD fd); } #endif /* #ifndef _SYSINFO_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/atomicops-internals-mips.h
.h
10,005
324
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2013, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Author: Jovan Zelincevic <jovan.zelincevic@imgtec.com> // based on atomicops-internals by Sanjay Ghemawat // This file is an internal atomic implementation, use base/atomicops.h instead. // // This code implements MIPS atomics. #ifndef BASE_ATOMICOPS_INTERNALS_MIPS_H_ #define BASE_ATOMICOPS_INTERNALS_MIPS_H_ #if (_MIPS_ISA == _MIPS_ISA_MIPS64) #define BASE_HAS_ATOMIC64 1 #endif typedef int32_t Atomic32; namespace base { namespace subtle { // Atomically execute: // result = *ptr; // if (*ptr == old_value) // *ptr = new_value; // return result; // // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". // Always return the old value of "*ptr" // // This routine implies no memory barriers. inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev, tmp; __asm__ volatile( ".set push \n" ".set noreorder \n" "1: \n" "ll %0, %5 \n" // prev = *ptr "bne %0, %3, 2f \n" // if (prev != old_value) goto 2 " move %2, %4 \n" // tmp = new_value "sc %2, %1 \n" // *ptr = tmp (with atomic check) "beqz %2, 1b \n" // start again on atomic error " nop \n" // delay slot nop "2: \n" ".set pop \n" : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) : "Ir" (old_value), "r" (new_value), "m" (*ptr) : "memory" ); return prev; } // Atomically store new_value into *ptr, returning the previous value held in // *ptr. This routine implies no memory barriers. inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { Atomic32 temp, old; __asm__ volatile( ".set push \n" ".set noreorder \n" "1: \n" "ll %1, %2 \n" // old = *ptr "move %0, %3 \n" // temp = new_value "sc %0, %2 \n" // *ptr = temp (with atomic check) "beqz %0, 1b \n" // start again on atomic error " nop \n" // delay slot nop ".set pop \n" : "=&r" (temp), "=&r" (old), "=m" (*ptr) : "r" (new_value), "m" (*ptr) : "memory" ); return old; } inline void MemoryBarrier() { __asm__ volatile("sync" : : : "memory"); } // "Acquire" operations // ensure that no later memory access can be reordered ahead of the operation. // "Release" operations ensure that no previous memory access can be reordered // after the operation. "Barrier" operations have both "Acquire" and "Release" // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory // access. inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); MemoryBarrier(); return res; } inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { MemoryBarrier(); Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); return res; } inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { Atomic32 old_value = NoBarrier_AtomicExchange(ptr, new_value); MemoryBarrier(); return old_value; } inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { MemoryBarrier(); return NoBarrier_AtomicExchange(ptr, new_value); } inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; MemoryBarrier(); } inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { MemoryBarrier(); *ptr = value; } inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { Atomic32 value = *ptr; MemoryBarrier(); return value; } inline Atomic32 Release_Load(volatile const Atomic32* ptr) { MemoryBarrier(); return *ptr; } #if (_MIPS_ISA == _MIPS_ISA_MIPS64) || (_MIPS_SIM == _MIPS_SIM_ABI64) typedef int64_t Atomic64; inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev, tmp; __asm__ volatile( ".set push \n" ".set noreorder \n" "1: \n" "lld %0, %5 \n" // prev = *ptr "bne %0, %3, 2f \n" // if (prev != old_value) goto 2 " move %2, %4 \n" // tmp = new_value "scd %2, %1 \n" // *ptr = tmp (with atomic check) "beqz %2, 1b \n" // start again on atomic error " nop \n" // delay slot nop "2: \n" ".set pop \n" : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) : "Ir" (old_value), "r" (new_value), "m" (*ptr) : "memory" ); return prev; } inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { Atomic64 temp, old; __asm__ volatile( ".set push \n" ".set noreorder \n" "1: \n" "lld %1, %2 \n" // old = *ptr "move %0, %3 \n" // temp = new_value "scd %0, %2 \n" // *ptr = temp (with atomic check) "beqz %0, 1b \n" // start again on atomic error " nop \n" // delay slot nop ".set pop \n" : "=&r" (temp), "=&r" (old), "=m" (*ptr) : "r" (new_value), "m" (*ptr) : "memory" ); return old; } inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { Atomic64 old_value = NoBarrier_AtomicExchange(ptr, new_value); MemoryBarrier(); return old_value; } inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); MemoryBarrier(); return res; } inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { MemoryBarrier(); Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); return res; } inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; } inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { MemoryBarrier(); return NoBarrier_AtomicExchange(ptr, new_value); } inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; MemoryBarrier(); } inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { MemoryBarrier(); *ptr = value; } inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; } inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { Atomic64 value = *ptr; MemoryBarrier(); return value; } inline Atomic64 Release_Load(volatile const Atomic64* ptr) { MemoryBarrier(); return *ptr; } #endif } // namespace base::subtle } // namespace base #endif // BASE_ATOMICOPS_INTERNALS_MIPS_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/dynamic_annotations.h
.h
27,439
628
/* Copyright (c) 2008, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Kostya Serebryany */ /* This file defines dynamic annotations for use with dynamic analysis tool such as valgrind, PIN, etc. Dynamic annotation is a source code annotation that affects the generated code (that is, the annotation is not a comment). Each such annotation is attached to a particular instruction and/or to a particular object (address) in the program. The annotations that should be used by users are macros in all upper-case (e.g., ANNOTATE_NEW_MEMORY). Actual implementation of these macros may differ depending on the dynamic analysis tool being used. See http://code.google.com/p/data-race-test/ for more information. This file supports the following dynamic analysis tools: - None (DYNAMIC_ANNOTATIONS_ENABLED is not defined or zero). Macros are defined empty. - ThreadSanitizer, Helgrind, DRD (DYNAMIC_ANNOTATIONS_ENABLED is 1). Macros are defined as calls to non-inlinable empty functions that are intercepted by Valgrind. */ #ifndef BASE_DYNAMIC_ANNOTATIONS_H_ #define BASE_DYNAMIC_ANNOTATIONS_H_ #ifndef DYNAMIC_ANNOTATIONS_ENABLED # define DYNAMIC_ANNOTATIONS_ENABLED 0 #endif #if DYNAMIC_ANNOTATIONS_ENABLED != 0 /* ------------------------------------------------------------- Annotations useful when implementing condition variables such as CondVar, using conditional critical sections (Await/LockWhen) and when constructing user-defined synchronization mechanisms. The annotations ANNOTATE_HAPPENS_BEFORE() and ANNOTATE_HAPPENS_AFTER() can be used to define happens-before arcs in user-defined synchronization mechanisms: the race detector will infer an arc from the former to the latter when they share the same argument pointer. Example 1 (reference counting): void Unref() { ANNOTATE_HAPPENS_BEFORE(&refcount_); if (AtomicDecrementByOne(&refcount_) == 0) { ANNOTATE_HAPPENS_AFTER(&refcount_); delete this; } } Example 2 (message queue): void MyQueue::Put(Type *e) { MutexLock lock(&mu_); ANNOTATE_HAPPENS_BEFORE(e); PutElementIntoMyQueue(e); } Type *MyQueue::Get() { MutexLock lock(&mu_); Type *e = GetElementFromMyQueue(); ANNOTATE_HAPPENS_AFTER(e); return e; } Note: when possible, please use the existing reference counting and message queue implementations instead of inventing new ones. */ /* Report that wait on the condition variable at address "cv" has succeeded and the lock at address "lock" is held. */ #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \ AnnotateCondVarWait(__FILE__, __LINE__, cv, lock) /* Report that wait on the condition variable at "cv" has succeeded. Variant w/o lock. */ #define ANNOTATE_CONDVAR_WAIT(cv) \ AnnotateCondVarWait(__FILE__, __LINE__, cv, NULL) /* Report that we are about to signal on the condition variable at address "cv". */ #define ANNOTATE_CONDVAR_SIGNAL(cv) \ AnnotateCondVarSignal(__FILE__, __LINE__, cv) /* Report that we are about to signal_all on the condition variable at "cv". */ #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \ AnnotateCondVarSignalAll(__FILE__, __LINE__, cv) /* Annotations for user-defined synchronization mechanisms. */ #define ANNOTATE_HAPPENS_BEFORE(obj) ANNOTATE_CONDVAR_SIGNAL(obj) #define ANNOTATE_HAPPENS_AFTER(obj) ANNOTATE_CONDVAR_WAIT(obj) /* Report that the bytes in the range [pointer, pointer+size) are about to be published safely. The race checker will create a happens-before arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to subsequent accesses to this memory. Note: this annotation may not work properly if the race detector uses sampling, i.e. does not observe all memory accesses. */ #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \ AnnotatePublishMemoryRange(__FILE__, __LINE__, pointer, size) /* DEPRECATED. Don't use it. */ #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) \ AnnotateUnpublishMemoryRange(__FILE__, __LINE__, pointer, size) /* DEPRECATED. Don't use it. */ #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) \ do { \ ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size); \ ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size); \ } while (0) /* Instruct the tool to create a happens-before arc between mu->Unlock() and mu->Lock(). This annotation may slow down the race detector and hide real races. Normally it is used only when it would be difficult to annotate each of the mutex's critical sections individually using the annotations above. This annotation makes sense only for hybrid race detectors. For pure happens-before detectors this is a no-op. For more details see http://code.google.com/p/data-race-test/wiki/PureHappensBeforeVsHybrid . */ #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \ AnnotateMutexIsUsedAsCondVar(__FILE__, __LINE__, mu) /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */ #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) \ AnnotateMutexIsUsedAsCondVar(__FILE__, __LINE__, mu) /* ------------------------------------------------------------- Annotations useful when defining memory allocators, or when memory that was protected in one way starts to be protected in another. */ /* Report that a new memory at "address" of size "size" has been allocated. This might be used when the memory has been retrieved from a free list and is about to be reused, or when a the locking discipline for a variable changes. */ #define ANNOTATE_NEW_MEMORY(address, size) \ AnnotateNewMemory(__FILE__, __LINE__, address, size) /* ------------------------------------------------------------- Annotations useful when defining FIFO queues that transfer data between threads. */ /* Report that the producer-consumer queue (such as ProducerConsumerQueue) at address "pcq" has been created. The ANNOTATE_PCQ_* annotations should be used only for FIFO queues. For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE (for put) and ANNOTATE_HAPPENS_AFTER (for get). */ #define ANNOTATE_PCQ_CREATE(pcq) \ AnnotatePCQCreate(__FILE__, __LINE__, pcq) /* Report that the queue at address "pcq" is about to be destroyed. */ #define ANNOTATE_PCQ_DESTROY(pcq) \ AnnotatePCQDestroy(__FILE__, __LINE__, pcq) /* Report that we are about to put an element into a FIFO queue at address "pcq". */ #define ANNOTATE_PCQ_PUT(pcq) \ AnnotatePCQPut(__FILE__, __LINE__, pcq) /* Report that we've just got an element from a FIFO queue at address "pcq". */ #define ANNOTATE_PCQ_GET(pcq) \ AnnotatePCQGet(__FILE__, __LINE__, pcq) /* ------------------------------------------------------------- Annotations that suppress errors. It is usually better to express the program's synchronization using the other annotations, but these can be used when all else fails. */ /* Report that we may have a benign race at "pointer", with size "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the point where "pointer" has been allocated, preferably close to the point where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. */ #define ANNOTATE_BENIGN_RACE(pointer, description) \ AnnotateBenignRaceSized(__FILE__, __LINE__, pointer, \ sizeof(*(pointer)), description) /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to the memory range [address, address+size). */ #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ AnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description) /* Request the analysis tool to ignore all reads in the current thread until ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey reads, while still checking other reads and all writes. See also ANNOTATE_UNPROTECTED_READ. */ #define ANNOTATE_IGNORE_READS_BEGIN() \ AnnotateIgnoreReadsBegin(__FILE__, __LINE__) /* Stop ignoring reads. */ #define ANNOTATE_IGNORE_READS_END() \ AnnotateIgnoreReadsEnd(__FILE__, __LINE__) /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */ #define ANNOTATE_IGNORE_WRITES_BEGIN() \ AnnotateIgnoreWritesBegin(__FILE__, __LINE__) /* Stop ignoring writes. */ #define ANNOTATE_IGNORE_WRITES_END() \ AnnotateIgnoreWritesEnd(__FILE__, __LINE__) /* Start ignoring all memory accesses (reads and writes). */ #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ do {\ ANNOTATE_IGNORE_READS_BEGIN();\ ANNOTATE_IGNORE_WRITES_BEGIN();\ }while(0)\ /* Stop ignoring all memory accesses. */ #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ do {\ ANNOTATE_IGNORE_WRITES_END();\ ANNOTATE_IGNORE_READS_END();\ }while(0)\ /* Enable (enable!=0) or disable (enable==0) race detection for all threads. This annotation could be useful if you want to skip expensive race analysis during some period of program execution, e.g. during initialization. */ #define ANNOTATE_ENABLE_RACE_DETECTION(enable) \ AnnotateEnableRaceDetection(__FILE__, __LINE__, enable) /* ------------------------------------------------------------- Annotations useful for debugging. */ /* Request to trace every access to "address". */ #define ANNOTATE_TRACE_MEMORY(address) \ AnnotateTraceMemory(__FILE__, __LINE__, address) /* Report the current thread name to a race detector. */ #define ANNOTATE_THREAD_NAME(name) \ AnnotateThreadName(__FILE__, __LINE__, name) /* ------------------------------------------------------------- Annotations useful when implementing locks. They are not normally needed by modules that merely use locks. The "lock" argument is a pointer to the lock object. */ /* Report that a lock has been created at address "lock". */ #define ANNOTATE_RWLOCK_CREATE(lock) \ AnnotateRWLockCreate(__FILE__, __LINE__, lock) /* Report that the lock at address "lock" is about to be destroyed. */ #define ANNOTATE_RWLOCK_DESTROY(lock) \ AnnotateRWLockDestroy(__FILE__, __LINE__, lock) /* Report that the lock at address "lock" has been acquired. is_w=1 for writer lock, is_w=0 for reader lock. */ #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w) /* Report that the lock at address "lock" is about to be released. */ #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w) /* ------------------------------------------------------------- Annotations useful when implementing barriers. They are not normally needed by modules that merely use barriers. The "barrier" argument is a pointer to the barrier object. */ /* Report that the "barrier" has been initialized with initial "count". If 'reinitialization_allowed' is true, initialization is allowed to happen multiple times w/o calling barrier_destroy() */ #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \ AnnotateBarrierInit(__FILE__, __LINE__, barrier, count, \ reinitialization_allowed) /* Report that we are about to enter barrier_wait("barrier"). */ #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \ AnnotateBarrierWaitBefore(__FILE__, __LINE__, barrier) /* Report that we just exited barrier_wait("barrier"). */ #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \ AnnotateBarrierWaitAfter(__FILE__, __LINE__, barrier) /* Report that the "barrier" has been destroyed. */ #define ANNOTATE_BARRIER_DESTROY(barrier) \ AnnotateBarrierDestroy(__FILE__, __LINE__, barrier) /* ------------------------------------------------------------- Annotations useful for testing race detectors. */ /* Report that we expect a race on the variable at "address". Use only in unit tests for a race detector. */ #define ANNOTATE_EXPECT_RACE(address, description) \ AnnotateExpectRace(__FILE__, __LINE__, address, description) /* A no-op. Insert where you like to test the interceptors. */ #define ANNOTATE_NO_OP(arg) \ AnnotateNoOp(__FILE__, __LINE__, arg) /* Force the race detector to flush its state. The actual effect depends on * the implementation of the detector. */ #define ANNOTATE_FLUSH_STATE() \ AnnotateFlushState(__FILE__, __LINE__) #else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */ #define ANNOTATE_RWLOCK_CREATE(lock) /* empty */ #define ANNOTATE_RWLOCK_DESTROY(lock) /* empty */ #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */ #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */ #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) /* */ #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) /* empty */ #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) /* empty */ #define ANNOTATE_BARRIER_DESTROY(barrier) /* empty */ #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) /* empty */ #define ANNOTATE_CONDVAR_WAIT(cv) /* empty */ #define ANNOTATE_CONDVAR_SIGNAL(cv) /* empty */ #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) /* empty */ #define ANNOTATE_HAPPENS_BEFORE(obj) /* empty */ #define ANNOTATE_HAPPENS_AFTER(obj) /* empty */ #define ANNOTATE_PUBLISH_MEMORY_RANGE(address, size) /* empty */ #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(address, size) /* empty */ #define ANNOTATE_SWAP_MEMORY_RANGE(address, size) /* empty */ #define ANNOTATE_PCQ_CREATE(pcq) /* empty */ #define ANNOTATE_PCQ_DESTROY(pcq) /* empty */ #define ANNOTATE_PCQ_PUT(pcq) /* empty */ #define ANNOTATE_PCQ_GET(pcq) /* empty */ #define ANNOTATE_NEW_MEMORY(address, size) /* empty */ #define ANNOTATE_EXPECT_RACE(address, description) /* empty */ #define ANNOTATE_BENIGN_RACE(address, description) /* empty */ #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */ #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) /* empty */ #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) /* empty */ #define ANNOTATE_TRACE_MEMORY(arg) /* empty */ #define ANNOTATE_THREAD_NAME(name) /* empty */ #define ANNOTATE_IGNORE_READS_BEGIN() /* empty */ #define ANNOTATE_IGNORE_READS_END() /* empty */ #define ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */ #define ANNOTATE_IGNORE_WRITES_END() /* empty */ #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */ #define ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */ #define ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */ #define ANNOTATE_NO_OP(arg) /* empty */ #define ANNOTATE_FLUSH_STATE() /* empty */ #endif /* DYNAMIC_ANNOTATIONS_ENABLED */ /* Macro definitions for GCC attributes that allow static thread safety analysis to recognize and use some of the dynamic annotations as escape hatches. TODO(lcwu): remove the check for __SUPPORT_DYN_ANNOTATION__ once the default crosstool/GCC supports these GCC attributes. */ #define ANNOTALYSIS_STATIC_INLINE #define ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY ; #define ANNOTALYSIS_IGNORE_READS_BEGIN #define ANNOTALYSIS_IGNORE_READS_END #define ANNOTALYSIS_IGNORE_WRITES_BEGIN #define ANNOTALYSIS_IGNORE_WRITES_END #define ANNOTALYSIS_UNPROTECTED_READ #if defined(__GNUC__) && (!defined(SWIG)) && (!defined(__clang__)) && \ defined(__SUPPORT_TS_ANNOTATION__) && defined(__SUPPORT_DYN_ANNOTATION__) #if DYNAMIC_ANNOTATIONS_ENABLED == 0 #define ANNOTALYSIS_ONLY 1 #undef ANNOTALYSIS_STATIC_INLINE #define ANNOTALYSIS_STATIC_INLINE static inline #undef ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY #define ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY { (void)file; (void)line; } #endif /* Only emit attributes when annotalysis is enabled. */ #if defined(__SUPPORT_TS_ANNOTATION__) && defined(__SUPPORT_DYN_ANNOTATION__) #undef ANNOTALYSIS_IGNORE_READS_BEGIN #define ANNOTALYSIS_IGNORE_READS_BEGIN __attribute__ ((ignore_reads_begin)) #undef ANNOTALYSIS_IGNORE_READS_END #define ANNOTALYSIS_IGNORE_READS_END __attribute__ ((ignore_reads_end)) #undef ANNOTALYSIS_IGNORE_WRITES_BEGIN #define ANNOTALYSIS_IGNORE_WRITES_BEGIN __attribute__ ((ignore_writes_begin)) #undef ANNOTALYSIS_IGNORE_WRITES_END #define ANNOTALYSIS_IGNORE_WRITES_END __attribute__ ((ignore_writes_end)) #undef ANNOTALYSIS_UNPROTECTED_READ #define ANNOTALYSIS_UNPROTECTED_READ __attribute__ ((unprotected_read)) #endif #endif // defined(__GNUC__) && (!defined(SWIG)) && (!defined(__clang__)) /* Use the macros above rather than using these functions directly. */ #ifdef __cplusplus extern "C" { #endif void AnnotateRWLockCreate(const char *file, int line, const volatile void *lock); void AnnotateRWLockDestroy(const char *file, int line, const volatile void *lock); void AnnotateRWLockAcquired(const char *file, int line, const volatile void *lock, long is_w); void AnnotateRWLockReleased(const char *file, int line, const volatile void *lock, long is_w); void AnnotateBarrierInit(const char *file, int line, const volatile void *barrier, long count, long reinitialization_allowed); void AnnotateBarrierWaitBefore(const char *file, int line, const volatile void *barrier); void AnnotateBarrierWaitAfter(const char *file, int line, const volatile void *barrier); void AnnotateBarrierDestroy(const char *file, int line, const volatile void *barrier); void AnnotateCondVarWait(const char *file, int line, const volatile void *cv, const volatile void *lock); void AnnotateCondVarSignal(const char *file, int line, const volatile void *cv); void AnnotateCondVarSignalAll(const char *file, int line, const volatile void *cv); void AnnotatePublishMemoryRange(const char *file, int line, const volatile void *address, long size); void AnnotateUnpublishMemoryRange(const char *file, int line, const volatile void *address, long size); void AnnotatePCQCreate(const char *file, int line, const volatile void *pcq); void AnnotatePCQDestroy(const char *file, int line, const volatile void *pcq); void AnnotatePCQPut(const char *file, int line, const volatile void *pcq); void AnnotatePCQGet(const char *file, int line, const volatile void *pcq); void AnnotateNewMemory(const char *file, int line, const volatile void *address, long size); void AnnotateExpectRace(const char *file, int line, const volatile void *address, const char *description); void AnnotateBenignRace(const char *file, int line, const volatile void *address, const char *description); void AnnotateBenignRaceSized(const char *file, int line, const volatile void *address, long size, const char *description); void AnnotateMutexIsUsedAsCondVar(const char *file, int line, const volatile void *mu); void AnnotateTraceMemory(const char *file, int line, const volatile void *arg); void AnnotateThreadName(const char *file, int line, const char *name); ANNOTALYSIS_STATIC_INLINE void AnnotateIgnoreReadsBegin(const char *file, int line) ANNOTALYSIS_IGNORE_READS_BEGIN ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY ANNOTALYSIS_STATIC_INLINE void AnnotateIgnoreReadsEnd(const char *file, int line) ANNOTALYSIS_IGNORE_READS_END ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY ANNOTALYSIS_STATIC_INLINE void AnnotateIgnoreWritesBegin(const char *file, int line) ANNOTALYSIS_IGNORE_WRITES_BEGIN ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY ANNOTALYSIS_STATIC_INLINE void AnnotateIgnoreWritesEnd(const char *file, int line) ANNOTALYSIS_IGNORE_WRITES_END ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY void AnnotateEnableRaceDetection(const char *file, int line, int enable); void AnnotateNoOp(const char *file, int line, const volatile void *arg); void AnnotateFlushState(const char *file, int line); /* Return non-zero value if running under valgrind. If "valgrind.h" is included into dynamic_annotations.c, the regular valgrind mechanism will be used. See http://valgrind.org/docs/manual/manual-core-adv.html about RUNNING_ON_VALGRIND and other valgrind "client requests". The file "valgrind.h" may be obtained by doing svn co svn://svn.valgrind.org/valgrind/trunk/include If for some reason you can't use "valgrind.h" or want to fake valgrind, there are two ways to make this function return non-zero: - Use environment variable: export RUNNING_ON_VALGRIND=1 - Make your tool intercept the function RunningOnValgrind() and change its return value. */ int RunningOnValgrind(void); /* ValgrindSlowdown returns: * 1.0, if (RunningOnValgrind() == 0) * 50.0, if (RunningOnValgrind() != 0 && getenv("VALGRIND_SLOWDOWN") == NULL) * atof(getenv("VALGRIND_SLOWDOWN")) otherwise This function can be used to scale timeout values: EXAMPLE: for (;;) { DoExpensiveBackgroundTask(); SleepForSeconds(5 * ValgrindSlowdown()); } */ double ValgrindSlowdown(void); #ifdef __cplusplus } #endif #if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus) /* ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. Instead of doing ANNOTATE_IGNORE_READS_BEGIN(); ... = x; ANNOTATE_IGNORE_READS_END(); one can use ... = ANNOTATE_UNPROTECTED_READ(x); */ template <class T> inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x) ANNOTALYSIS_UNPROTECTED_READ { ANNOTATE_IGNORE_READS_BEGIN(); T res = x; ANNOTATE_IGNORE_READS_END(); return res; } /* Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable. */ #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ namespace { \ class static_var ## _annotator { \ public: \ static_var ## _annotator() { \ ANNOTATE_BENIGN_RACE_SIZED(&static_var, \ sizeof(static_var), \ # static_var ": " description); \ } \ }; \ static static_var ## _annotator the ## static_var ## _annotator;\ } #else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */ #define ANNOTATE_UNPROTECTED_READ(x) (x) #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */ #endif /* DYNAMIC_ANNOTATIONS_ENABLED */ /* Annotalysis, a GCC based static analyzer, is able to understand and use some of the dynamic annotations defined in this file. However, dynamic annotations are usually disabled in the opt mode (to avoid additional runtime overheads) while Annotalysis only works in the opt mode. In order for Annotalysis to use these dynamic annotations when they are disabled, we re-define these annotations here. Note that unlike the original macro definitions above, these macros are expanded to calls to static inline functions so that the compiler will be able to remove the calls after the analysis. */ #ifdef ANNOTALYSIS_ONLY #undef ANNOTALYSIS_ONLY /* Undefine and re-define the macros that the static analyzer understands. */ #undef ANNOTATE_IGNORE_READS_BEGIN #define ANNOTATE_IGNORE_READS_BEGIN() \ AnnotateIgnoreReadsBegin(__FILE__, __LINE__) #undef ANNOTATE_IGNORE_READS_END #define ANNOTATE_IGNORE_READS_END() \ AnnotateIgnoreReadsEnd(__FILE__, __LINE__) #undef ANNOTATE_IGNORE_WRITES_BEGIN #define ANNOTATE_IGNORE_WRITES_BEGIN() \ AnnotateIgnoreWritesBegin(__FILE__, __LINE__) #undef ANNOTATE_IGNORE_WRITES_END #define ANNOTATE_IGNORE_WRITES_END() \ AnnotateIgnoreWritesEnd(__FILE__, __LINE__) #undef ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ do { \ ANNOTATE_IGNORE_READS_BEGIN(); \ ANNOTATE_IGNORE_WRITES_BEGIN(); \ }while(0) \ #undef ANNOTATE_IGNORE_READS_AND_WRITES_END #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ do { \ ANNOTATE_IGNORE_WRITES_END(); \ ANNOTATE_IGNORE_READS_END(); \ }while(0) \ #if defined(__cplusplus) #undef ANNOTATE_UNPROTECTED_READ template <class T> inline T ANNOTATE_UNPROTECTED_READ(const volatile T &x) ANNOTALYSIS_UNPROTECTED_READ { ANNOTATE_IGNORE_READS_BEGIN(); T res = x; ANNOTATE_IGNORE_READS_END(); return res; } #endif /* __cplusplus */ #endif /* ANNOTALYSIS_ONLY */ /* Undefine the macros intended only in this file. */ #undef ANNOTALYSIS_STATIC_INLINE #undef ANNOTALYSIS_SEMICOLON_OR_EMPTY_BODY #endif /* BASE_DYNAMIC_ANNOTATIONS_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/sysinfo.cc
.cc
33,643
892
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2006, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <config.h> #if (defined(_WIN32) || defined(__MINGW32__)) && !defined(__CYGWIN__) && !defined(__CYGWIN32) # define PLATFORM_WINDOWS 1 #endif #include <ctype.h> // for isspace() #include <stdlib.h> // for getenv() #include <stdio.h> // for snprintf(), sscanf() #include <string.h> // for memmove(), memchr(), etc. #include <fcntl.h> // for open() #include <errno.h> // for errno #ifdef HAVE_UNISTD_H #include <unistd.h> // for read() #endif #if defined __MACH__ // Mac OS X, almost certainly #include <mach-o/dyld.h> // for iterating over dll's in ProcMapsIter #include <mach-o/loader.h> // for iterating over dll's in ProcMapsIter #include <sys/types.h> #include <sys/sysctl.h> // how we figure out numcpu's on OS X #elif defined __FreeBSD__ #include <sys/sysctl.h> #elif defined __sun__ // Solaris #include <procfs.h> // for, e.g., prmap_t #elif defined(PLATFORM_WINDOWS) #include <process.h> // for getpid() (actually, _getpid()) #include <shlwapi.h> // for SHGetValueA() #include <tlhelp32.h> // for Module32First() #endif #include "base/sysinfo.h" #include "base/commandlineflags.h" #include "base/dynamic_annotations.h" // for RunningOnValgrind #include "base/logging.h" #ifdef PLATFORM_WINDOWS #ifdef MODULEENTRY32 // In a change from the usual W-A pattern, there is no A variant of // MODULEENTRY32. Tlhelp32.h #defines the W variant, but not the A. // In unicode mode, tlhelp32.h #defines MODULEENTRY32 to be // MODULEENTRY32W. These #undefs are the only way I see to get back // access to the original, ascii struct (and related functions). #undef MODULEENTRY32 #undef Module32First #undef Module32Next #undef PMODULEENTRY32 #undef LPMODULEENTRY32 #endif /* MODULEENTRY32 */ // MinGW doesn't seem to define this, perhaps some windowsen don't either. #ifndef TH32CS_SNAPMODULE32 #define TH32CS_SNAPMODULE32 0 #endif /* TH32CS_SNAPMODULE32 */ #endif /* PLATFORM_WINDOWS */ // Re-run fn until it doesn't cause EINTR. #define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR) // open/read/close can set errno, which may be illegal at this // time, so prefer making the syscalls directly if we can. #ifdef HAVE_SYS_SYSCALL_H # include <sys/syscall.h> #endif #ifdef SYS_open // solaris 11, at least sometimes, only defines SYS_openat # define safeopen(filename, mode) syscall(SYS_open, filename, mode) #else # define safeopen(filename, mode) open(filename, mode) #endif #ifdef SYS_read # define saferead(fd, buffer, size) syscall(SYS_read, fd, buffer, size) #else # define saferead(fd, buffer, size) read(fd, buffer, size) #endif #ifdef SYS_close # define safeclose(fd) syscall(SYS_close, fd) #else # define safeclose(fd) close(fd) #endif // ---------------------------------------------------------------------- // GetenvBeforeMain() // GetUniquePathFromEnv() // Some non-trivial getenv-related functions. // ---------------------------------------------------------------------- // we reimplement memcmp and friends to avoid depending on any glibc // calls too early in the process lifetime. This allows us to use // GetenvBeforeMain from inside ifunc handler static int slow_memcmp(const void *_a, const void *_b, size_t n) { const uint8_t *a = reinterpret_cast<const uint8_t *>(_a); const uint8_t *b = reinterpret_cast<const uint8_t *>(_b); while (n-- != 0) { uint8_t ac = *a++; uint8_t bc = *b++; if (ac != bc) { if (ac < bc) { return -1; } return 1; } } return 0; } static const char *slow_memchr(const char *s, int c, size_t n) { uint8_t ch = static_cast<uint8_t>(c); while (n--) { if (*s++ == ch) { return s - 1; } } return 0; } static size_t slow_strlen(const char *s) { const char *s2 = slow_memchr(s, '\0', static_cast<size_t>(-1)); return s2 - s; } // It's not safe to call getenv() in the malloc hooks, because they // might be called extremely early, before libc is done setting up // correctly. In particular, the thread library may not be done // setting up errno. So instead, we use the built-in __environ array // if it exists, and otherwise read /proc/self/environ directly, using // system calls to read the file, and thus avoid setting errno. // /proc/self/environ has a limit of how much data it exports (around // 8K), so it's not an ideal solution. const char* GetenvBeforeMain(const char* name) { const int namelen = slow_strlen(name); #if defined(HAVE___ENVIRON) // if we have it, it's declared in unistd.h if (__environ) { // can exist but be NULL, if statically linked for (char** p = __environ; *p; p++) { if (!slow_memcmp(*p, name, namelen) && (*p)[namelen] == '=') return *p + namelen+1; } return NULL; } #endif #if defined(PLATFORM_WINDOWS) // TODO(mbelshe) - repeated calls to this function will overwrite the // contents of the static buffer. static char envvar_buf[1024]; // enough to hold any envvar we care about if (!GetEnvironmentVariableA(name, envvar_buf, sizeof(envvar_buf)-1)) return NULL; return envvar_buf; #endif // static is ok because this function should only be called before // main(), when we're single-threaded. static char envbuf[16<<10]; if (*envbuf == '\0') { // haven't read the environ yet int fd = safeopen("/proc/self/environ", O_RDONLY); // The -2 below guarantees the last two bytes of the buffer will be \0\0 if (fd == -1 || // unable to open the file, fall back onto libc saferead(fd, envbuf, sizeof(envbuf) - 2) < 0) { // error reading file RAW_VLOG(1, "Unable to open /proc/self/environ, falling back " "on getenv(\"%s\"), which may not work", name); if (fd != -1) safeclose(fd); return getenv(name); } safeclose(fd); } const char* p = envbuf; while (*p != '\0') { // will happen at the \0\0 that terminates the buffer // proc file has the format NAME=value\0NAME=value\0NAME=value\0... const char* endp = (char*)slow_memchr(p, '\0', sizeof(envbuf) - (p - envbuf)); if (endp == NULL) // this entry isn't NUL terminated return NULL; else if (!slow_memcmp(p, name, namelen) && p[namelen] == '=') // it's a match return p + namelen+1; // point after = p = endp + 1; } return NULL; // env var never found } extern "C" { const char* TCMallocGetenvSafe(const char* name) { return GetenvBeforeMain(name); } } // This takes as an argument an environment-variable name (like // CPUPROFILE) whose value is supposed to be a file-path, and sets // path to that path, and returns true. If the env var doesn't exist, // or is the empty string, leave path unchanged and returns false. // The reason this is non-trivial is that this function handles munged // pathnames. Here's why: // // If we're a child process of the 'main' process, we can't just use // getenv("CPUPROFILE") -- the parent process will be using that path. // Instead we append our pid to the pathname. How do we tell if we're a // child process? Ideally we'd set an environment variable that all // our children would inherit. But -- and this is seemingly a bug in // gcc -- if you do a setenv() in a shared libarary in a global // constructor, the environment setting is lost by the time main() is // called. The only safe thing we can do in such a situation is to // modify the existing envvar. So we do a hack: in the parent, we set // the high bit of the 1st char of CPUPROFILE. In the child, we // notice the high bit is set and append the pid(). This works // assuming cpuprofile filenames don't normally have the high bit set // in their first character! If that assumption is violated, we'll // still get a profile, but one with an unexpected name. // TODO(csilvers): set an envvar instead when we can do it reliably. bool GetUniquePathFromEnv(const char* env_name, char* path) { char* envval = getenv(env_name); if (envval == NULL || *envval == '\0') return false; if (envval[0] & 128) { // high bit is set snprintf(path, PATH_MAX, "%c%s_%u", // add pid and clear high bit envval[0] & 127, envval+1, (unsigned int)(getpid())); } else { snprintf(path, PATH_MAX, "%s", envval); envval[0] |= 128; // set high bit for kids to see } return true; } void SleepForMilliseconds(int milliseconds) { #ifdef PLATFORM_WINDOWS _sleep(milliseconds); // Windows's _sleep takes milliseconds argument #else // Sleep for a few milliseconds struct timespec sleep_time; sleep_time.tv_sec = milliseconds / 1000; sleep_time.tv_nsec = (milliseconds % 1000) * 1000000; while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) ; // Ignore signals and wait for the full interval to elapse. #endif } int GetSystemCPUsCount() { #if defined(PLATFORM_WINDOWS) // Get the number of processors. SYSTEM_INFO info; GetSystemInfo(&info); return info.dwNumberOfProcessors; #else long rv = sysconf(_SC_NPROCESSORS_ONLN); if (rv < 0) { return 1; } return static_cast<int>(rv); #endif } // ---------------------------------------------------------------------- #if defined __linux__ || defined __FreeBSD__ || defined __sun__ || defined __CYGWIN__ || defined __CYGWIN32__ static void ConstructFilename(const char* spec, pid_t pid, char* buf, int buf_size) { CHECK_LT(snprintf(buf, buf_size, spec, static_cast<int>(pid ? pid : getpid())), buf_size); } #endif // A templatized helper function instantiated for Mach (OS X) only. // It can handle finding info for both 32 bits and 64 bits. // Returns true if it successfully handled the hdr, false else. #ifdef __MACH__ // Mac OS X, almost certainly template<uint32_t kMagic, uint32_t kLCSegment, typename MachHeader, typename SegmentCommand> static bool NextExtMachHelper(const mach_header* hdr, int current_image, int current_load_cmd, uint64 *start, uint64 *end, char **flags, uint64 *offset, int64 *inode, char **filename, uint64 *file_mapping, uint64 *file_pages, uint64 *anon_mapping, uint64 *anon_pages, dev_t *dev) { static char kDefaultPerms[5] = "r-xp"; if (hdr->magic != kMagic) return false; const char* lc = (const char *)hdr + sizeof(MachHeader); // TODO(csilvers): make this not-quadradic (increment and hold state) for (int j = 0; j < current_load_cmd; j++) // advance to *our* load_cmd lc += ((const load_command *)lc)->cmdsize; if (((const load_command *)lc)->cmd == kLCSegment) { const intptr_t dlloff = _dyld_get_image_vmaddr_slide(current_image); const SegmentCommand* sc = (const SegmentCommand *)lc; if (start) *start = sc->vmaddr + dlloff; if (end) *end = sc->vmaddr + sc->vmsize + dlloff; if (flags) *flags = kDefaultPerms; // can we do better? if (offset) *offset = sc->fileoff; if (inode) *inode = 0; if (filename) *filename = const_cast<char*>(_dyld_get_image_name(current_image)); if (file_mapping) *file_mapping = 0; if (file_pages) *file_pages = 0; // could we use sc->filesize? if (anon_mapping) *anon_mapping = 0; if (anon_pages) *anon_pages = 0; if (dev) *dev = 0; return true; } return false; } #endif // Finds |c| in |text|, and assign '\0' at the found position. // The original character at the modified position should be |c|. // A pointer to the modified position is stored in |endptr|. // |endptr| should not be NULL. static bool ExtractUntilChar(char *text, int c, char **endptr) { CHECK_NE(text, NULL); CHECK_NE(endptr, NULL); char *found; found = strchr(text, c); if (found == NULL) { *endptr = NULL; return false; } *endptr = found; *found = '\0'; return true; } // Increments |*text_pointer| while it points a whitespace character. // It is to follow sscanf's whilespace handling. static void SkipWhileWhitespace(char **text_pointer, int c) { if (isspace(c)) { while (isspace(**text_pointer) && isspace(*((*text_pointer) + 1))) { ++(*text_pointer); } } } template<class T> static T StringToInteger(char *text, char **endptr, int base) { assert(false); return T(); } template<> int StringToInteger<int>(char *text, char **endptr, int base) { return strtol(text, endptr, base); } template<> int64 StringToInteger<int64>(char *text, char **endptr, int base) { return strtoll(text, endptr, base); } template<> uint64 StringToInteger<uint64>(char *text, char **endptr, int base) { return strtoull(text, endptr, base); } template<typename T> static T StringToIntegerUntilChar( char *text, int base, int c, char **endptr_result) { CHECK_NE(endptr_result, NULL); *endptr_result = NULL; char *endptr_extract; if (!ExtractUntilChar(text, c, &endptr_extract)) return 0; T result; char *endptr_strto; result = StringToInteger<T>(text, &endptr_strto, base); *endptr_extract = c; if (endptr_extract != endptr_strto) return 0; *endptr_result = endptr_extract; SkipWhileWhitespace(endptr_result, c); return result; } static char *CopyStringUntilChar( char *text, unsigned out_len, int c, char *out) { char *endptr; if (!ExtractUntilChar(text, c, &endptr)) return NULL; strncpy(out, text, out_len); out[out_len-1] = '\0'; *endptr = c; SkipWhileWhitespace(&endptr, c); return endptr; } template<typename T> static bool StringToIntegerUntilCharWithCheck( T *outptr, char *text, int base, int c, char **endptr) { *outptr = StringToIntegerUntilChar<T>(*endptr, base, c, endptr); if (*endptr == NULL || **endptr == '\0') return false; ++(*endptr); return true; } static bool ParseProcMapsLine(char *text, uint64 *start, uint64 *end, char *flags, uint64 *offset, int *major, int *minor, int64 *inode, unsigned *filename_offset) { #if defined(__linux__) /* * It's similar to: * sscanf(text, "%"SCNx64"-%"SCNx64" %4s %"SCNx64" %x:%x %"SCNd64" %n", * start, end, flags, offset, major, minor, inode, filename_offset) */ char *endptr = text; if (endptr == NULL || *endptr == '\0') return false; if (!StringToIntegerUntilCharWithCheck(start, endptr, 16, '-', &endptr)) return false; if (!StringToIntegerUntilCharWithCheck(end, endptr, 16, ' ', &endptr)) return false; endptr = CopyStringUntilChar(endptr, 5, ' ', flags); if (endptr == NULL || *endptr == '\0') return false; ++endptr; if (!StringToIntegerUntilCharWithCheck(offset, endptr, 16, ' ', &endptr)) return false; if (!StringToIntegerUntilCharWithCheck(major, endptr, 16, ':', &endptr)) return false; if (!StringToIntegerUntilCharWithCheck(minor, endptr, 16, ' ', &endptr)) return false; if (!StringToIntegerUntilCharWithCheck(inode, endptr, 10, ' ', &endptr)) return false; *filename_offset = (endptr - text); return true; #else return false; #endif } ProcMapsIterator::ProcMapsIterator(pid_t pid) { Init(pid, NULL, false); } ProcMapsIterator::ProcMapsIterator(pid_t pid, Buffer *buffer) { Init(pid, buffer, false); } ProcMapsIterator::ProcMapsIterator(pid_t pid, Buffer *buffer, bool use_maps_backing) { Init(pid, buffer, use_maps_backing); } void ProcMapsIterator::Init(pid_t pid, Buffer *buffer, bool use_maps_backing) { pid_ = pid; using_maps_backing_ = use_maps_backing; dynamic_buffer_ = NULL; if (!buffer) { // If the user didn't pass in any buffer storage, allocate it // now. This is the normal case; the signal handler passes in a // static buffer. buffer = dynamic_buffer_ = new Buffer; } else { dynamic_buffer_ = NULL; } ibuf_ = buffer->buf_; stext_ = etext_ = nextline_ = ibuf_; ebuf_ = ibuf_ + Buffer::kBufSize - 1; nextline_ = ibuf_; #if defined(__linux__) || defined(__CYGWIN__) || defined(__CYGWIN32__) if (use_maps_backing) { // don't bother with clever "self" stuff in this case ConstructFilename("/proc/%d/maps_backing", pid, ibuf_, Buffer::kBufSize); } else if (pid == 0) { // We have to kludge a bit to deal with the args ConstructFilename // expects. The 1 is never used -- it's only impt. that it's not 0. ConstructFilename("/proc/self/maps", 1, ibuf_, Buffer::kBufSize); } else { ConstructFilename("/proc/%d/maps", pid, ibuf_, Buffer::kBufSize); } // No error logging since this can be called from the crash dump // handler at awkward moments. Users should call Valid() before // using. NO_INTR(fd_ = open(ibuf_, O_RDONLY)); #elif defined(__FreeBSD__) // We don't support maps_backing on freebsd if (pid == 0) { ConstructFilename("/proc/curproc/map", 1, ibuf_, Buffer::kBufSize); } else { ConstructFilename("/proc/%d/map", pid, ibuf_, Buffer::kBufSize); } NO_INTR(fd_ = open(ibuf_, O_RDONLY)); #elif defined(__sun__) if (pid == 0) { ConstructFilename("/proc/self/map", 1, ibuf_, Buffer::kBufSize); } else { ConstructFilename("/proc/%d/map", pid, ibuf_, Buffer::kBufSize); } NO_INTR(fd_ = open(ibuf_, O_RDONLY)); #elif defined(__MACH__) current_image_ = _dyld_image_count(); // count down from the top current_load_cmd_ = -1; #elif defined(PLATFORM_WINDOWS) snapshot_ = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE | TH32CS_SNAPMODULE32, GetCurrentProcessId()); memset(&module_, 0, sizeof(module_)); #else fd_ = -1; // so Valid() is always false #endif } ProcMapsIterator::~ProcMapsIterator() { #if defined(PLATFORM_WINDOWS) if (snapshot_ != INVALID_HANDLE_VALUE) CloseHandle(snapshot_); #elif defined(__MACH__) // no cleanup necessary! #else if (fd_ >= 0) NO_INTR(close(fd_)); #endif delete dynamic_buffer_; } bool ProcMapsIterator::Valid() const { #if defined(PLATFORM_WINDOWS) return snapshot_ != INVALID_HANDLE_VALUE; #elif defined(__MACH__) return 1; #else return fd_ != -1; #endif } bool ProcMapsIterator::Next(uint64 *start, uint64 *end, char **flags, uint64 *offset, int64 *inode, char **filename) { return NextExt(start, end, flags, offset, inode, filename, NULL, NULL, NULL, NULL, NULL); } // This has too many arguments. It should really be building // a map object and returning it. The problem is that this is called // when the memory allocator state is undefined, hence the arguments. bool ProcMapsIterator::NextExt(uint64 *start, uint64 *end, char **flags, uint64 *offset, int64 *inode, char **filename, uint64 *file_mapping, uint64 *file_pages, uint64 *anon_mapping, uint64 *anon_pages, dev_t *dev) { #if defined(__linux__) || defined(__FreeBSD__) || defined(__CYGWIN__) || defined(__CYGWIN32__) do { // Advance to the start of the next line stext_ = nextline_; // See if we have a complete line in the buffer already nextline_ = static_cast<char *>(memchr (stext_, '\n', etext_ - stext_)); if (!nextline_) { // Shift/fill the buffer so we do have a line int count = etext_ - stext_; // Move the current text to the start of the buffer memmove(ibuf_, stext_, count); stext_ = ibuf_; etext_ = ibuf_ + count; int nread = 0; // fill up buffer with text while (etext_ < ebuf_) { NO_INTR(nread = read(fd_, etext_, ebuf_ - etext_)); if (nread > 0) etext_ += nread; else break; } // Zero out remaining characters in buffer at EOF to avoid returning // garbage from subsequent calls. if (etext_ != ebuf_ && nread == 0) { memset(etext_, 0, ebuf_ - etext_); } *etext_ = '\n'; // sentinel; safe because ibuf extends 1 char beyond ebuf nextline_ = static_cast<char *>(memchr (stext_, '\n', etext_ + 1 - stext_)); } *nextline_ = 0; // turn newline into nul nextline_ += ((nextline_ < etext_)? 1 : 0); // skip nul if not end of text // stext_ now points at a nul-terminated line uint64 tmpstart, tmpend, tmpoffset; int64 tmpinode; int major, minor; unsigned filename_offset = 0; #if defined(__linux__) // for now, assume all linuxes have the same format if (!ParseProcMapsLine( stext_, start ? start : &tmpstart, end ? end : &tmpend, flags_, offset ? offset : &tmpoffset, &major, &minor, inode ? inode : &tmpinode, &filename_offset)) continue; #elif defined(__CYGWIN__) || defined(__CYGWIN32__) // cygwin is like linux, except the third field is the "entry point" // rather than the offset (see format_process_maps at // http://cygwin.com/cgi-bin/cvsweb.cgi/src/winsup/cygwin/fhandler_process.cc?rev=1.89&content-type=text/x-cvsweb-markup&cvsroot=src // Offset is always be 0 on cygwin: cygwin implements an mmap // by loading the whole file and then calling NtMapViewOfSection. // Cygwin also seems to set its flags kinda randomly; use windows default. char tmpflags[5]; if (offset) *offset = 0; strcpy(flags_, "r-xp"); if (sscanf(stext_, "%llx-%llx %4s %llx %x:%x %lld %n", start ? start : &tmpstart, end ? end : &tmpend, tmpflags, &tmpoffset, &major, &minor, inode ? inode : &tmpinode, &filename_offset) != 7) continue; #elif defined(__FreeBSD__) // For the format, see http://www.freebsd.org/cgi/cvsweb.cgi/src/sys/fs/procfs/procfs_map.c?rev=1.31&content-type=text/x-cvsweb-markup tmpstart = tmpend = tmpoffset = 0; tmpinode = 0; major = minor = 0; // can't get this info in freebsd if (inode) *inode = 0; // nor this if (offset) *offset = 0; // seems like this should be in there, but maybe not // start end resident privateresident obj(?) prot refcnt shadowcnt // flags copy_on_write needs_copy type filename: // 0x8048000 0x804a000 2 0 0xc104ce70 r-x 1 0 0x0 COW NC vnode /bin/cat if (sscanf(stext_, "0x%" SCNx64 " 0x%" SCNx64 " %*d %*d %*p %3s %*d %*d 0x%*x %*s %*s %*s %n", start ? start : &tmpstart, end ? end : &tmpend, flags_, &filename_offset) != 3) continue; #endif // Depending on the Linux kernel being used, there may or may not be a space // after the inode if there is no filename. sscanf will in such situations // nondeterministically either fill in filename_offset or not (the results // differ on multiple calls in the same run even with identical arguments). // We don't want to wander off somewhere beyond the end of the string. size_t stext_length = strlen(stext_); if (filename_offset == 0 || filename_offset > stext_length) filename_offset = stext_length; // We found an entry if (flags) *flags = flags_; if (filename) *filename = stext_ + filename_offset; if (dev) *dev = minor | (major << 8); if (using_maps_backing_) { // Extract and parse physical page backing info. char *backing_ptr = stext_ + filename_offset + strlen(stext_+filename_offset); // find the second '(' int paren_count = 0; while (--backing_ptr > stext_) { if (*backing_ptr == '(') { ++paren_count; if (paren_count >= 2) { uint64 tmp_file_mapping; uint64 tmp_file_pages; uint64 tmp_anon_mapping; uint64 tmp_anon_pages; sscanf(backing_ptr+1, "F %" SCNx64 " %" SCNd64 ") (A %" SCNx64 " %" SCNd64 ")", file_mapping ? file_mapping : &tmp_file_mapping, file_pages ? file_pages : &tmp_file_pages, anon_mapping ? anon_mapping : &tmp_anon_mapping, anon_pages ? anon_pages : &tmp_anon_pages); // null terminate the file name (there is a space // before the first (. backing_ptr[-1] = 0; break; } } } } return true; } while (etext_ > ibuf_); #elif defined(__sun__) // This is based on MA_READ == 4, MA_WRITE == 2, MA_EXEC == 1 static char kPerms[8][4] = { "---", "--x", "-w-", "-wx", "r--", "r-x", "rw-", "rwx" }; COMPILE_ASSERT(MA_READ == 4, solaris_ma_read_must_equal_4); COMPILE_ASSERT(MA_WRITE == 2, solaris_ma_write_must_equal_2); COMPILE_ASSERT(MA_EXEC == 1, solaris_ma_exec_must_equal_1); Buffer object_path; int nread = 0; // fill up buffer with text NO_INTR(nread = read(fd_, ibuf_, sizeof(prmap_t))); if (nread == sizeof(prmap_t)) { long inode_from_mapname = 0; prmap_t* mapinfo = reinterpret_cast<prmap_t*>(ibuf_); // Best-effort attempt to get the inode from the filename. I think the // two middle ints are major and minor device numbers, but I'm not sure. sscanf(mapinfo->pr_mapname, "ufs.%*d.%*d.%ld", &inode_from_mapname); if (pid_ == 0) { CHECK_LT(snprintf(object_path.buf_, Buffer::kBufSize, "/proc/self/path/%s", mapinfo->pr_mapname), Buffer::kBufSize); } else { CHECK_LT(snprintf(object_path.buf_, Buffer::kBufSize, "/proc/%d/path/%s", static_cast<int>(pid_), mapinfo->pr_mapname), Buffer::kBufSize); } ssize_t len = readlink(object_path.buf_, current_filename_, PATH_MAX); CHECK_LT(len, PATH_MAX); if (len < 0) len = 0; current_filename_[len] = '\0'; if (start) *start = mapinfo->pr_vaddr; if (end) *end = mapinfo->pr_vaddr + mapinfo->pr_size; if (flags) *flags = kPerms[mapinfo->pr_mflags & 7]; if (offset) *offset = mapinfo->pr_offset; if (inode) *inode = inode_from_mapname; if (filename) *filename = current_filename_; if (file_mapping) *file_mapping = 0; if (file_pages) *file_pages = 0; if (anon_mapping) *anon_mapping = 0; if (anon_pages) *anon_pages = 0; if (dev) *dev = 0; return true; } #elif defined(__MACH__) // We return a separate entry for each segment in the DLL. (TODO(csilvers): // can we do better?) A DLL ("image") has load-commands, some of which // talk about segment boundaries. // cf image_for_address from http://svn.digium.com/view/asterisk/team/oej/minivoicemail/dlfcn.c?revision=53912 for (; current_image_ >= 0; current_image_--) { const mach_header* hdr = _dyld_get_image_header(current_image_); if (!hdr) continue; if (current_load_cmd_ < 0) // set up for this image current_load_cmd_ = hdr->ncmds; // again, go from the top down // We start with the next load command (we've already looked at this one). for (current_load_cmd_--; current_load_cmd_ >= 0; current_load_cmd_--) { #ifdef MH_MAGIC_64 if (NextExtMachHelper<MH_MAGIC_64, LC_SEGMENT_64, struct mach_header_64, struct segment_command_64>( hdr, current_image_, current_load_cmd_, start, end, flags, offset, inode, filename, file_mapping, file_pages, anon_mapping, anon_pages, dev)) { return true; } #endif if (NextExtMachHelper<MH_MAGIC, LC_SEGMENT, struct mach_header, struct segment_command>( hdr, current_image_, current_load_cmd_, start, end, flags, offset, inode, filename, file_mapping, file_pages, anon_mapping, anon_pages, dev)) { return true; } } // If we get here, no more load_cmd's in this image talk about // segments. Go on to the next image. } #elif defined(PLATFORM_WINDOWS) static char kDefaultPerms[5] = "r-xp"; BOOL ok; if (module_.dwSize == 0) { // only possible before first call module_.dwSize = sizeof(module_); ok = Module32First(snapshot_, &module_); } else { ok = Module32Next(snapshot_, &module_); } if (ok) { uint64 base_addr = reinterpret_cast<DWORD_PTR>(module_.modBaseAddr); if (start) *start = base_addr; if (end) *end = base_addr + module_.modBaseSize; if (flags) *flags = kDefaultPerms; if (offset) *offset = 0; if (inode) *inode = 0; if (filename) *filename = module_.szExePath; if (file_mapping) *file_mapping = 0; if (file_pages) *file_pages = 0; if (anon_mapping) *anon_mapping = 0; if (anon_pages) *anon_pages = 0; if (dev) *dev = 0; return true; } #endif // We didn't find anything return false; } int ProcMapsIterator::FormatLine(char* buffer, int bufsize, uint64 start, uint64 end, const char *flags, uint64 offset, int64 inode, const char *filename, dev_t dev) { // We assume 'flags' looks like 'rwxp' or 'rwx'. char r = (flags && flags[0] == 'r') ? 'r' : '-'; char w = (flags && flags[0] && flags[1] == 'w') ? 'w' : '-'; char x = (flags && flags[0] && flags[1] && flags[2] == 'x') ? 'x' : '-'; // p always seems set on linux, so we set the default to 'p', not '-' char p = (flags && flags[0] && flags[1] && flags[2] && flags[3] != 'p') ? '-' : 'p'; const int rc = snprintf(buffer, bufsize, "%08" PRIx64 "-%08" PRIx64 " %c%c%c%c %08" PRIx64 " %02x:%02x %-11" PRId64 " %s\n", start, end, r,w,x,p, offset, static_cast<int>(dev/256), static_cast<int>(dev%256), inode, filename); return (rc < 0 || rc >= bufsize) ? 0 : rc; } namespace tcmalloc { // Helper to add the list of mapped shared libraries to a profile. // Fill formatted "/proc/self/maps" contents into buffer 'buf' of size 'size' // and return the actual size occupied in 'buf'. We fill wrote_all to true // if we successfully wrote all proc lines to buf, false else. // We do not provision for 0-terminating 'buf'. int FillProcSelfMaps(char buf[], int size, bool* wrote_all) { ProcMapsIterator::Buffer iterbuf; ProcMapsIterator it(0, &iterbuf); // 0 means "current pid" uint64 start, end, offset; int64 inode; char *flags, *filename; int bytes_written = 0; *wrote_all = true; while (it.Next(&start, &end, &flags, &offset, &inode, &filename)) { const int line_length = it.FormatLine(buf + bytes_written, size - bytes_written, start, end, flags, offset, inode, filename, 0); if (line_length == 0) *wrote_all = false; // failed to write this line out else bytes_written += line_length; } return bytes_written; } // Dump the same data as FillProcSelfMaps reads to fd. // It seems easier to repeat parts of FillProcSelfMaps here than to // reuse it via a call. void DumpProcSelfMaps(RawFD fd) { ProcMapsIterator::Buffer iterbuf; ProcMapsIterator it(0, &iterbuf); // 0 means "current pid" uint64 start, end, offset; int64 inode; char *flags, *filename; ProcMapsIterator::Buffer linebuf; while (it.Next(&start, &end, &flags, &offset, &inode, &filename)) { int written = it.FormatLine(linebuf.buf_, sizeof(linebuf.buf_), start, end, flags, offset, inode, filename, 0); RawWrite(fd, linebuf.buf_, written); } } } // namespace tcmalloc
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/atomicops-internals-x86.cc
.cc
4,270
113
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2007, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * This module gets enough CPU information to optimize the * atomicops module on x86. */ #include "base/atomicops.h" #include "base/basictypes.h" #include "base/googleinit.h" #include "base/logging.h" #include <string.h> // This file only makes sense with atomicops-internals-x86.h -- it // depends on structs that are defined in that file. If atomicops.h // doesn't sub-include that file, then we aren't needed, and shouldn't // try to do anything. #ifdef BASE_ATOMICOPS_INTERNALS_X86_H_ // Inline cpuid instruction. In PIC compilations, %ebx contains the address // of the global offset table. To avoid breaking such executables, this code // must preserve that register's value across cpuid instructions. #if defined(__i386__) #define cpuid(a, b, c, d, inp) \ asm ("mov %%ebx, %%edi\n" \ "cpuid\n" \ "xchg %%edi, %%ebx\n" \ : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) #elif defined (__x86_64__) #define cpuid(a, b, c, d, inp) \ asm ("mov %%rbx, %%rdi\n" \ "cpuid\n" \ "xchg %%rdi, %%rbx\n" \ : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) #endif #if defined(cpuid) // initialize the struct only on x86 // Set the flags so that code will run correctly and conservatively // until InitGoogle() is called. struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = { false, // no SSE2 false // no cmpxchg16b }; // Initialize the AtomicOps_Internalx86CPUFeatures struct. static void AtomicOps_Internalx86CPUFeaturesInit() { uint32 eax; uint32 ebx; uint32 ecx; uint32 edx; // Get vendor string (issue CPUID with eax = 0) cpuid(eax, ebx, ecx, edx, 0); char vendor[13]; memcpy(vendor, &ebx, 4); memcpy(vendor + 4, &edx, 4); memcpy(vendor + 8, &ecx, 4); vendor[12] = 0; // get feature flags in ecx/edx, and family/model in eax cpuid(eax, ebx, ecx, edx, 1); int family = (eax >> 8) & 0xf; // family and model fields int model = (eax >> 4) & 0xf; if (family == 0xf) { // use extended family and model fields family += (eax >> 20) & 0xff; model += ((eax >> 16) & 0xf) << 4; } // edx bit 26 is SSE2 which we use to tell use whether we can use mfence AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1); // ecx bit 13 indicates whether the cmpxchg16b instruction is supported AtomicOps_Internalx86CPUFeatures.has_cmpxchg16b = ((ecx >> 13) & 1); } REGISTER_MODULE_INITIALIZER(atomicops_x86, { AtomicOps_Internalx86CPUFeaturesInit(); }); #endif #endif /* ifdef BASE_ATOMICOPS_INTERNALS_X86_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/spinlock_posix-inl.h
.h
2,324
64
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2009, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * This file is a Posix-specific part of spinlock_internal.cc */ #include <config.h> #include <errno.h> #ifdef HAVE_SCHED_H #include <sched.h> /* For sched_yield() */ #endif #include <time.h> /* For nanosleep() */ namespace base { namespace internal { void SpinLockDelay(volatile Atomic32 *w, int32 value, int loop) { int save_errno = errno; if (loop == 0) { } else if (loop == 1) { sched_yield(); } else { struct timespec tm; tm.tv_sec = 0; tm.tv_nsec = base::internal::SuggestedDelayNS(loop); nanosleep(&tm, NULL); } errno = save_errno; } void SpinLockWake(volatile Atomic32 *w, bool all) { } } // namespace internal } // namespace base
Unknown
3D
mcellteam/mcell
libs/gperftools/src/base/commandlineflags.h
.h
7,010
176
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // This file is a compatibility layer that defines Google's version of // command line flags that are used for configuration. // // We put flags into their own namespace. It is purposefully // named in an opaque way that people should have trouble typing // directly. The idea is that DEFINE puts the flag in the weird // namespace, and DECLARE imports the flag from there into the // current namespace. The net result is to force people to use // DECLARE to get access to a flag, rather than saying // extern bool FLAGS_logtostderr; // or some such instead. We want this so we can put extra // functionality (like sanity-checking) in DECLARE if we want, // and make sure it is picked up everywhere. // // We also put the type of the variable in the namespace, so that // people can't DECLARE_int32 something that they DEFINE_bool'd // elsewhere. #ifndef BASE_COMMANDLINEFLAGS_H_ #define BASE_COMMANDLINEFLAGS_H_ #include <config.h> #include <string> #include <string.h> // for memchr #include <stdlib.h> // for getenv #include "base/basictypes.h" #define DECLARE_VARIABLE(type, name) \ namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \ extern PERFTOOLS_DLL_DECL type FLAGS_##name; \ } \ using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name #define DEFINE_VARIABLE(type, name, value, meaning) \ namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \ PERFTOOLS_DLL_DECL type FLAGS_##name(value); \ char FLAGS_no##name; \ } \ using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name // bool specialization #define DECLARE_bool(name) \ DECLARE_VARIABLE(bool, name) #define DEFINE_bool(name, value, meaning) \ DEFINE_VARIABLE(bool, name, value, meaning) // int32 specialization #define DECLARE_int32(name) \ DECLARE_VARIABLE(int32, name) #define DEFINE_int32(name, value, meaning) \ DEFINE_VARIABLE(int32, name, value, meaning) // int64 specialization #define DECLARE_int64(name) \ DECLARE_VARIABLE(int64, name) #define DEFINE_int64(name, value, meaning) \ DEFINE_VARIABLE(int64, name, value, meaning) #define DECLARE_uint64(name) \ DECLARE_VARIABLE(uint64, name) #define DEFINE_uint64(name, value, meaning) \ DEFINE_VARIABLE(uint64, name, value, meaning) // double specialization #define DECLARE_double(name) \ DECLARE_VARIABLE(double, name) #define DEFINE_double(name, value, meaning) \ DEFINE_VARIABLE(double, name, value, meaning) // Special case for string, because we have to specify the namespace // std::string, which doesn't play nicely with our FLAG__namespace hackery. #define DECLARE_string(name) \ namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \ extern std::string FLAGS_##name; \ } \ using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name #define DEFINE_string(name, value, meaning) \ namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \ std::string FLAGS_##name(value); \ char FLAGS_no##name; \ } \ using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name // implemented in sysinfo.cc namespace tcmalloc { namespace commandlineflags { inline bool StringToBool(const char *value, bool def) { if (!value) { return def; } switch (value[0]) { case 't': case 'T': case 'y': case 'Y': case '1': case '\0': return true; } return false; } inline int StringToInt(const char *value, int def) { if (!value) { return def; } return strtol(value, NULL, 10); } inline long long StringToLongLong(const char *value, long long def) { if (!value) { return def; } return strtoll(value, NULL, 10); } inline double StringToDouble(const char *value, double def) { if (!value) { return def; } return strtod(value, NULL); } } } // These macros (could be functions, but I don't want to bother with a .cc // file), make it easier to initialize flags from the environment. #define EnvToString(envname, dflt) \ (!getenv(envname) ? (dflt) : getenv(envname)) #define EnvToBool(envname, dflt) \ tcmalloc::commandlineflags::StringToBool(getenv(envname), dflt) #define EnvToInt(envname, dflt) \ tcmalloc::commandlineflags::StringToInt(getenv(envname), dflt) #define EnvToInt64(envname, dflt) \ tcmalloc::commandlineflags::StringToLongLong(getenv(envname), dflt) #define EnvToDouble(envname, dflt) \ tcmalloc::commandlineflags::StringToDouble(getenv(envname), dflt) #endif // BASE_COMMANDLINEFLAGS_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/google/malloc_hook_c.h
.h
1,858
38
/* Copyright (c) 2008, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The code has moved to gperftools/. Use that include-directory for * new code. */ #if defined(__GNUC__) && !defined(GPERFTOOLS_SUPPRESS_LEGACY_WARNING) #warning "google/malloc_hook_c.h is deprecated. Use gperftools/malloc_hook_c.h instead" #endif #include <gperftools/malloc_hook_c.h>
Unknown
3D
mcellteam/mcell
libs/gperftools/src/google/malloc_extension.h
.h
1,863
37
// Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* The code has moved to gperftools/. Use that include-directory for * new code. */ #if defined(__GNUC__) && !defined(GPERFTOOLS_SUPPRESS_LEGACY_WARNING) #warning "google/malloc_extension.h is deprecated. Use gperftools/malloc_extension.h instead" #endif #include <gperftools/malloc_extension.h>
Unknown
3D
mcellteam/mcell
libs/gperftools/src/google/malloc_hook.h
.h
1,848
37
// Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* The code has moved to gperftools/. Use that include-directory for * new code. */ #if defined(__GNUC__) && !defined(GPERFTOOLS_SUPPRESS_LEGACY_WARNING) #warning "google/malloc_hook.h is deprecated. Use gperftools/malloc_hook.h instead" #endif #include <gperftools/malloc_hook.h>
Unknown
3D
mcellteam/mcell
libs/gperftools/src/google/profiler.h
.h
1,843
38
/* Copyright (c) 2005, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The code has moved to gperftools/. Use that include-directory for * new code. */ #if defined(__GNUC__) && !defined(GPERFTOOLS_SUPPRESS_LEGACY_WARNING) #warning "google/profiler.h is deprecated. Use gperftools/profiler.h instead" #endif #include <gperftools/profiler.h>
Unknown
3D
mcellteam/mcell
libs/gperftools/src/google/stacktrace.h
.h
1,845
37
// Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* The code has moved to gperftools/. Use that include-directory for * new code. */ #if defined(__GNUC__) && !defined(GPERFTOOLS_SUPPRESS_LEGACY_WARNING) #warning "google/stacktrace.h is deprecated. Use gperftools/stacktrace.h instead" #endif #include <gperftools/stacktrace.h>
Unknown
3D
mcellteam/mcell
libs/gperftools/src/google/heap-checker.h
.h
1,848
37
// Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* The code has moved to gperftools/. Use that include-directory for * new code. */ #if defined(__GNUC__) && !defined(GPERFTOOLS_SUPPRESS_LEGACY_WARNING) #warning "google/heap-checker.h is deprecated. Use gperftools/heap-checker.h instead" #endif #include <gperftools/heap-checker.h>
Unknown
3D
mcellteam/mcell
libs/gperftools/src/google/heap-profiler.h
.h
1,858
38
/* Copyright (c) 2005, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The code has moved to gperftools/. Use that include-directory for * new code. */ #if defined(__GNUC__) && !defined(GPERFTOOLS_SUPPRESS_LEGACY_WARNING) #warning "google/heap-profiler.h is deprecated. Use gperftools/heap-profiler.h instead" #endif #include <gperftools/heap-profiler.h>
Unknown
3D
mcellteam/mcell
libs/gperftools/src/google/tcmalloc.h
.h
1,843
38
/* Copyright (c) 2003, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The code has moved to gperftools/. Use that include-directory for * new code. */ #if defined(__GNUC__) && !defined(GPERFTOOLS_SUPPRESS_LEGACY_WARNING) #warning "google/tcmalloc.h is deprecated. Use gperftools/tcmalloc.h instead" #endif #include <gperftools/tcmalloc.h>
Unknown
3D
mcellteam/mcell
libs/gperftools/src/google/malloc_extension_c.h
.h
1,873
38
/* Copyright (c) 2008, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The code has moved to gperftools/. Use that include-directory for * new code. */ #if defined(__GNUC__) && !defined(GPERFTOOLS_SUPPRESS_LEGACY_WARNING) #warning "google/malloc_extension_c.h is deprecated. Use gperftools/malloc_extension_c.h instead" #endif #include <gperftools/malloc_extension_c.h>
Unknown
3D
mcellteam/mcell
libs/gperftools/src/gperftools/malloc_hook_c.h
.h
6,905
174
/* Copyright (c) 2008, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * -- * Author: Craig Silverstein * * C shims for the C++ malloc_hook.h. See malloc_hook.h for details * on how to use these. */ #ifndef _MALLOC_HOOK_C_H_ #define _MALLOC_HOOK_C_H_ #include <stddef.h> #include <sys/types.h> /* Annoying stuff for windows; makes sure clients can import these functions */ #ifndef PERFTOOLS_DLL_DECL # ifdef _WIN32 # define PERFTOOLS_DLL_DECL __declspec(dllimport) # else # define PERFTOOLS_DLL_DECL # endif #endif #ifdef __cplusplus extern "C" { #endif /* Get the current stack trace. Try to skip all routines up to and * and including the caller of MallocHook::Invoke*. * Use "skip_count" (similarly to GetStackTrace from stacktrace.h) * as a hint about how many routines to skip if better information * is not available. */ PERFTOOLS_DLL_DECL int MallocHook_GetCallerStackTrace(void** result, int max_depth, int skip_count); /* The MallocHook_{Add,Remove}*Hook functions return 1 on success and 0 on * failure. */ typedef void (*MallocHook_NewHook)(const void* ptr, size_t size); PERFTOOLS_DLL_DECL int MallocHook_AddNewHook(MallocHook_NewHook hook); PERFTOOLS_DLL_DECL int MallocHook_RemoveNewHook(MallocHook_NewHook hook); typedef void (*MallocHook_DeleteHook)(const void* ptr); PERFTOOLS_DLL_DECL int MallocHook_AddDeleteHook(MallocHook_DeleteHook hook); PERFTOOLS_DLL_DECL int MallocHook_RemoveDeleteHook(MallocHook_DeleteHook hook); typedef void (*MallocHook_PreMmapHook)(const void *start, size_t size, int protection, int flags, int fd, off_t offset); PERFTOOLS_DLL_DECL int MallocHook_AddPreMmapHook(MallocHook_PreMmapHook hook); PERFTOOLS_DLL_DECL int MallocHook_RemovePreMmapHook(MallocHook_PreMmapHook hook); typedef void (*MallocHook_MmapHook)(const void* result, const void* start, size_t size, int protection, int flags, int fd, off_t offset); PERFTOOLS_DLL_DECL int MallocHook_AddMmapHook(MallocHook_MmapHook hook); PERFTOOLS_DLL_DECL int MallocHook_RemoveMmapHook(MallocHook_MmapHook hook); typedef int (*MallocHook_MmapReplacement)(const void* start, size_t size, int protection, int flags, int fd, off_t offset, void** result); int MallocHook_SetMmapReplacement(MallocHook_MmapReplacement hook); int MallocHook_RemoveMmapReplacement(MallocHook_MmapReplacement hook); typedef void (*MallocHook_MunmapHook)(const void* ptr, size_t size); PERFTOOLS_DLL_DECL int MallocHook_AddMunmapHook(MallocHook_MunmapHook hook); PERFTOOLS_DLL_DECL int MallocHook_RemoveMunmapHook(MallocHook_MunmapHook hook); typedef int (*MallocHook_MunmapReplacement)(const void* ptr, size_t size, int* result); int MallocHook_SetMunmapReplacement(MallocHook_MunmapReplacement hook); int MallocHook_RemoveMunmapReplacement(MallocHook_MunmapReplacement hook); typedef void (*MallocHook_MremapHook)(const void* result, const void* old_addr, size_t old_size, size_t new_size, int flags, const void* new_addr); PERFTOOLS_DLL_DECL int MallocHook_AddMremapHook(MallocHook_MremapHook hook); PERFTOOLS_DLL_DECL int MallocHook_RemoveMremapHook(MallocHook_MremapHook hook); typedef void (*MallocHook_PreSbrkHook)(ptrdiff_t increment); PERFTOOLS_DLL_DECL int MallocHook_AddPreSbrkHook(MallocHook_PreSbrkHook hook); PERFTOOLS_DLL_DECL int MallocHook_RemovePreSbrkHook(MallocHook_PreSbrkHook hook); typedef void (*MallocHook_SbrkHook)(const void* result, ptrdiff_t increment); PERFTOOLS_DLL_DECL int MallocHook_AddSbrkHook(MallocHook_SbrkHook hook); PERFTOOLS_DLL_DECL int MallocHook_RemoveSbrkHook(MallocHook_SbrkHook hook); /* The following are DEPRECATED. */ PERFTOOLS_DLL_DECL MallocHook_NewHook MallocHook_SetNewHook(MallocHook_NewHook hook); PERFTOOLS_DLL_DECL MallocHook_DeleteHook MallocHook_SetDeleteHook(MallocHook_DeleteHook hook); PERFTOOLS_DLL_DECL MallocHook_PreMmapHook MallocHook_SetPreMmapHook(MallocHook_PreMmapHook hook); PERFTOOLS_DLL_DECL MallocHook_MmapHook MallocHook_SetMmapHook(MallocHook_MmapHook hook); PERFTOOLS_DLL_DECL MallocHook_MunmapHook MallocHook_SetMunmapHook(MallocHook_MunmapHook hook); PERFTOOLS_DLL_DECL MallocHook_MremapHook MallocHook_SetMremapHook(MallocHook_MremapHook hook); PERFTOOLS_DLL_DECL MallocHook_PreSbrkHook MallocHook_SetPreSbrkHook(MallocHook_PreSbrkHook hook); PERFTOOLS_DLL_DECL MallocHook_SbrkHook MallocHook_SetSbrkHook(MallocHook_SbrkHook hook); /* End of DEPRECATED functions. */ #ifdef __cplusplus } // extern "C" #endif #endif /* _MALLOC_HOOK_C_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/gperftools/malloc_extension.h
.h
19,844
447
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Sanjay Ghemawat <opensource@google.com> // // Extra extensions exported by some malloc implementations. These // extensions are accessed through a virtual base class so an // application can link against a malloc that does not implement these // extensions, and it will get default versions that do nothing. // // NOTE FOR C USERS: If you wish to use this functionality from within // a C program, see malloc_extension_c.h. #ifndef BASE_MALLOC_EXTENSION_H_ #define BASE_MALLOC_EXTENSION_H_ #include <stddef.h> // I can't #include config.h in this public API file, but I should // really use configure (and make malloc_extension.h a .in file) to // figure out if the system has stdint.h or not. But I'm lazy, so // for now I'm assuming it's a problem only with MSVC. #ifndef _MSC_VER #include <stdint.h> #endif #include <string> #include <vector> // Annoying stuff for windows -- makes sure clients can import these functions #ifndef PERFTOOLS_DLL_DECL # ifdef _WIN32 # define PERFTOOLS_DLL_DECL __declspec(dllimport) # else # define PERFTOOLS_DLL_DECL # endif #endif static const int kMallocHistogramSize = 64; // One day, we could support other types of writers (perhaps for C?) typedef std::string MallocExtensionWriter; namespace base { struct MallocRange; } // Interface to a pluggable system allocator. class PERFTOOLS_DLL_DECL SysAllocator { public: SysAllocator() { } virtual ~SysAllocator(); // Allocates "size"-byte of memory from system aligned with "alignment". // Returns NULL if failed. Otherwise, the returned pointer p up to and // including (p + actual_size -1) have been allocated. virtual void* Alloc(size_t size, size_t *actual_size, size_t alignment) = 0; }; // The default implementations of the following routines do nothing. // All implementations should be thread-safe; the current one // (TCMallocImplementation) is. class PERFTOOLS_DLL_DECL MallocExtension { public: virtual ~MallocExtension(); // Call this very early in the program execution -- say, in a global // constructor -- to set up parameters and state needed by all // instrumented malloc implemenatations. One example: this routine // sets environemnt variables to tell STL to use libc's malloc() // instead of doing its own memory management. This is safe to call // multiple times, as long as each time is before threads start up. static void Initialize(); // See "verify_memory.h" to see what these routines do virtual bool VerifyAllMemory(); virtual bool VerifyNewMemory(const void* p); virtual bool VerifyArrayNewMemory(const void* p); virtual bool VerifyMallocMemory(const void* p); virtual bool MallocMemoryStats(int* blocks, size_t* total, int histogram[kMallocHistogramSize]); // Get a human readable description of the following malloc data structures. // - Total inuse memory by application. // - Free memory(thread, central and page heap), // - Freelist of central cache, each class. // - Page heap freelist. // The state is stored as a null-terminated string // in a prefix of "buffer[0,buffer_length-1]". // REQUIRES: buffer_length > 0. virtual void GetStats(char* buffer, int buffer_length); // Outputs to "writer" a sample of live objects and the stack traces // that allocated these objects. The format of the returned output // is equivalent to the output of the heap profiler and can // therefore be passed to "pprof". This function is equivalent to // ReadStackTraces. The main difference is that this function returns // serialized data appropriately formatted for use by the pprof tool. // // Since gperftools 2.8 heap samples are not de-duplicated by the // library anymore. // // NOTE: by default, tcmalloc does not do any heap sampling, and this // function will always return an empty sample. To get useful // data from GetHeapSample, you must also set the environment // variable TCMALLOC_SAMPLE_PARAMETER to a value such as 524288. virtual void GetHeapSample(MallocExtensionWriter* writer); // Outputs to "writer" the stack traces that caused growth in the // address space size. The format of the returned output is // equivalent to the output of the heap profiler and can therefore // be passed to "pprof". This function is equivalent to // ReadHeapGrowthStackTraces. The main difference is that this function // returns serialized data appropriately formatted for use by the // pprof tool. (This does not depend on, or require, // TCMALLOC_SAMPLE_PARAMETER.) virtual void GetHeapGrowthStacks(MallocExtensionWriter* writer); // Invokes func(arg, range) for every controlled memory // range. *range is filled in with information about the range. // // This is a best-effort interface useful only for performance // analysis. The implementation may not call func at all. typedef void (RangeFunction)(void*, const base::MallocRange*); virtual void Ranges(void* arg, RangeFunction func); // ------------------------------------------------------------------- // Control operations for getting and setting malloc implementation // specific parameters. Some currently useful properties: // // generic // ------- // "generic.current_allocated_bytes" // Number of bytes currently allocated by application // This property is not writable. // // "generic.heap_size" // Number of bytes in the heap == // current_allocated_bytes + // fragmentation + // freed memory regions // This property is not writable. // // "generic.total_physical_bytes" // Estimate of total bytes of the physical memory usage by the // allocator == // current_allocated_bytes + // fragmentation + // metadata // This property is not writable. // // tcmalloc // -------- // "tcmalloc.max_total_thread_cache_bytes" // Upper limit on total number of bytes stored across all // per-thread caches. Default: 16MB. // // "tcmalloc.current_total_thread_cache_bytes" // Number of bytes used across all thread caches. // This property is not writable. // // "tcmalloc.central_cache_free_bytes" // Number of free bytes in the central cache that have been // assigned to size classes. They always count towards virtual // memory usage, and unless the underlying memory is swapped out // by the OS, they also count towards physical memory usage. // This property is not writable. // // "tcmalloc.transfer_cache_free_bytes" // Number of free bytes that are waiting to be transfered between // the central cache and a thread cache. They always count // towards virtual memory usage, and unless the underlying memory // is swapped out by the OS, they also count towards physical // memory usage. This property is not writable. // // "tcmalloc.thread_cache_free_bytes" // Number of free bytes in thread caches. They always count // towards virtual memory usage, and unless the underlying memory // is swapped out by the OS, they also count towards physical // memory usage. This property is not writable. // // "tcmalloc.pageheap_free_bytes" // Number of bytes in free, mapped pages in page heap. These // bytes can be used to fulfill allocation requests. They // always count towards virtual memory usage, and unless the // underlying memory is swapped out by the OS, they also count // towards physical memory usage. This property is not writable. // // "tcmalloc.pageheap_unmapped_bytes" // Number of bytes in free, unmapped pages in page heap. // These are bytes that have been released back to the OS, // possibly by one of the MallocExtension "Release" calls. // They can be used to fulfill allocation requests, but // typically incur a page fault. They always count towards // virtual memory usage, and depending on the OS, typically // do not count towards physical memory usage. This property // is not writable. // ------------------------------------------------------------------- // Get the named "property"'s value. Returns true if the property // is known. Returns false if the property is not a valid property // name for the current malloc implementation. // REQUIRES: property != NULL; value != NULL virtual bool GetNumericProperty(const char* property, size_t* value); // Set the named "property"'s value. Returns true if the property // is known and writable. Returns false if the property is not a // valid property name for the current malloc implementation, or // is not writable. // REQUIRES: property != NULL virtual bool SetNumericProperty(const char* property, size_t value); // Mark the current thread as "idle". This routine may optionally // be called by threads as a hint to the malloc implementation that // any thread-specific resources should be released. Note: this may // be an expensive routine, so it should not be called too often. // // Also, if the code that calls this routine will go to sleep for // a while, it should take care to not allocate anything between // the call to this routine and the beginning of the sleep. // // Most malloc implementations ignore this routine. virtual void MarkThreadIdle(); // Mark the current thread as "busy". This routine should be // called after MarkThreadIdle() if the thread will now do more // work. If this method is not called, performance may suffer. // // Most malloc implementations ignore this routine. virtual void MarkThreadBusy(); // Gets the system allocator used by the malloc extension instance. Returns // NULL for malloc implementations that do not support pluggable system // allocators. virtual SysAllocator* GetSystemAllocator(); // Sets the system allocator to the specified. // // Users could register their own system allocators for malloc implementation // that supports pluggable system allocators, such as TCMalloc, by doing: // alloc = new MyOwnSysAllocator(); // MallocExtension::instance()->SetSystemAllocator(alloc); // It's up to users whether to fall back (recommended) to the default // system allocator (use GetSystemAllocator() above) or not. The caller is // responsible to any necessary locking. // See tcmalloc/system-alloc.h for the interface and // tcmalloc/memfs_malloc.cc for the examples. // // It's a no-op for malloc implementations that do not support pluggable // system allocators. virtual void SetSystemAllocator(SysAllocator *a); // Try to release num_bytes of free memory back to the operating // system for reuse. Use this extension with caution -- to get this // memory back may require faulting pages back in by the OS, and // that may be slow. (Currently only implemented in tcmalloc.) virtual void ReleaseToSystem(size_t num_bytes); // Same as ReleaseToSystem() but release as much memory as possible. virtual void ReleaseFreeMemory(); // Sets the rate at which we release unused memory to the system. // Zero means we never release memory back to the system. Increase // this flag to return memory faster; decrease it to return memory // slower. Reasonable rates are in the range [0,10]. (Currently // only implemented in tcmalloc). virtual void SetMemoryReleaseRate(double rate); // Gets the release rate. Returns a value < 0 if unknown. virtual double GetMemoryReleaseRate(); // Returns the estimated number of bytes that will be allocated for // a request of "size" bytes. This is an estimate: an allocation of // SIZE bytes may reserve more bytes, but will never reserve less. // (Currently only implemented in tcmalloc, other implementations // always return SIZE.) // This is equivalent to malloc_good_size() in OS X. virtual size_t GetEstimatedAllocatedSize(size_t size); // Returns the actual number N of bytes reserved by tcmalloc for the // pointer p. The client is allowed to use the range of bytes // [p, p+N) in any way it wishes (i.e. N is the "usable size" of this // allocation). This number may be equal to or greater than the number // of bytes requested when p was allocated. // p must have been allocated by this malloc implementation, // must not be an interior pointer -- that is, must be exactly // the pointer returned to by malloc() et al., not some offset // from that -- and should not have been freed yet. p may be NULL. // (Currently only implemented in tcmalloc; other implementations // will return 0.) // This is equivalent to malloc_size() in OS X, malloc_usable_size() // in glibc, and _msize() for windows. virtual size_t GetAllocatedSize(const void* p); // Returns kOwned if this malloc implementation allocated the memory // pointed to by p, or kNotOwned if some other malloc implementation // allocated it or p is NULL. May also return kUnknownOwnership if // the malloc implementation does not keep track of ownership. // REQUIRES: p must be a value returned from a previous call to // malloc(), calloc(), realloc(), memalign(), posix_memalign(), // valloc(), pvalloc(), new, or new[], and must refer to memory that // is currently allocated (so, for instance, you should not pass in // a pointer after having called free() on it). enum Ownership { // NOTE: Enum values MUST be kept in sync with the version in // malloc_extension_c.h kUnknownOwnership = 0, kOwned, kNotOwned }; virtual Ownership GetOwnership(const void* p); // The current malloc implementation. Always non-NULL. static MallocExtension* instance(); // Change the malloc implementation. Typically called by the // malloc implementation during initialization. static void Register(MallocExtension* implementation); // Returns detailed information about malloc's freelists. For each list, // return a FreeListInfo: struct FreeListInfo { size_t min_object_size; size_t max_object_size; size_t total_bytes_free; const char* type; }; // Each item in the vector refers to a different freelist. The lists // are identified by the range of allocations that objects in the // list can satisfy ([min_object_size, max_object_size]) and the // type of freelist (see below). The current size of the list is // returned in total_bytes_free (which count against a processes // resident and virtual size). // // Currently supported types are: // // "tcmalloc.page{_unmapped}" - tcmalloc's page heap. An entry for each size // class in the page heap is returned. Bytes in "page_unmapped" // are no longer backed by physical memory and do not count against // the resident size of a process. // // "tcmalloc.large{_unmapped}" - tcmalloc's list of objects larger // than the largest page heap size class. Only one "large" // entry is returned. There is no upper-bound on the size // of objects in the large free list; this call returns // kint64max for max_object_size. Bytes in // "large_unmapped" are no longer backed by physical memory // and do not count against the resident size of a process. // // "tcmalloc.central" - tcmalloc's central free-list. One entry per // size-class is returned. Never unmapped. // // "debug.free_queue" - free objects queued by the debug allocator // and not returned to tcmalloc. // // "tcmalloc.thread" - tcmalloc's per-thread caches. Never unmapped. virtual void GetFreeListSizes(std::vector<FreeListInfo>* v); // Get a list of stack traces of sampled allocation points. Returns // a pointer to a "new[]-ed" result array, and stores the sample // period in "sample_period". // // The state is stored as a sequence of adjacent entries // in the returned array. Each entry has the following form: // uintptr_t count; // Number of objects with following trace // uintptr_t size; // Total size of objects with following trace // uintptr_t depth; // Number of PC values in stack trace // void* stack[depth]; // PC values that form the stack trace // // The list of entries is terminated by a "count" of 0. // // It is the responsibility of the caller to "delete[]" the returned array. // // May return NULL to indicate no results. // // This is an internal extension. Callers should use the more // convenient "GetHeapSample(string*)" method defined above. virtual void** ReadStackTraces(int* sample_period); // Like ReadStackTraces(), but returns stack traces that caused growth // in the address space size. virtual void** ReadHeapGrowthStackTraces(); // Returns the size in bytes of the calling threads cache. virtual size_t GetThreadCacheSize(); // Like MarkThreadIdle, but does not destroy the internal data // structures of the thread cache. When the thread resumes, it wil // have an empty cache but will not need to pay to reconstruct the // cache data structures. virtual void MarkThreadTemporarilyIdle(); }; namespace base { // Information passed per range. More fields may be added later. struct MallocRange { enum Type { INUSE, // Application is using this range FREE, // Range is currently free UNMAPPED, // Backing physical memory has been returned to the OS UNKNOWN // More enum values may be added in the future }; uintptr_t address; // Address of range size_t length; // Byte length of range Type type; // Type of this range double fraction; // Fraction of range that is being used (0 if !INUSE) // Perhaps add the following: // - stack trace if this range was sampled // - heap growth stack trace if applicable to this range // - age when allocated (for inuse) or freed (if not in use) }; } // namespace base #endif // BASE_MALLOC_EXTENSION_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/gperftools/malloc_hook.h
.h
15,372
360
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Sanjay Ghemawat // // Some of our malloc implementations can invoke the following hooks whenever // memory is allocated or deallocated. MallocHook is thread-safe, and things // you do before calling AddFooHook(MyHook) are visible to any resulting calls // to MyHook. Hooks must be thread-safe. If you write: // // CHECK(MallocHook::AddNewHook(&MyNewHook)); // // MyNewHook will be invoked in subsequent calls in the current thread, but // there are no guarantees on when it might be invoked in other threads. // // There are a limited number of slots available for each hook type. Add*Hook // will return false if there are no slots available. Remove*Hook will return // false if the given hook was not already installed. // // The order in which individual hooks are called in Invoke*Hook is undefined. // // It is safe for a hook to remove itself within Invoke*Hook and add other // hooks. Any hooks added inside a hook invocation (for the same hook type) // will not be invoked for the current invocation. // // One important user of these hooks is the heap profiler. // // CAVEAT: If you add new MallocHook::Invoke* calls then those calls must be // directly in the code of the (de)allocation function that is provided to the // user and that function must have an ATTRIBUTE_SECTION(malloc_hook) attribute. // // Note: the Invoke*Hook() functions are defined in malloc_hook-inl.h. If you // need to invoke a hook (which you shouldn't unless you're part of tcmalloc), // be sure to #include malloc_hook-inl.h in addition to malloc_hook.h. // // NOTE FOR C USERS: If you want to use malloc_hook functionality from // a C program, #include malloc_hook_c.h instead of this file. #ifndef _MALLOC_HOOK_H_ #define _MALLOC_HOOK_H_ #include <stddef.h> #include <sys/types.h> extern "C" { #include "malloc_hook_c.h" // a C version of the malloc_hook interface } // Annoying stuff for windows -- makes sure clients can import these functions #ifndef PERFTOOLS_DLL_DECL # ifdef _WIN32 # define PERFTOOLS_DLL_DECL __declspec(dllimport) # else # define PERFTOOLS_DLL_DECL # endif #endif // The C++ methods below call the C version (MallocHook_*), and thus // convert between an int and a bool. Windows complains about this // (a "performance warning") which we don't care about, so we suppress. #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4800) #endif // Note: malloc_hook_c.h defines MallocHook_*Hook and // MallocHook_{Add,Remove}*Hook. The version of these inside the MallocHook // class are defined in terms of the malloc_hook_c version. See malloc_hook_c.h // for details of these types/functions. class PERFTOOLS_DLL_DECL MallocHook { public: // The NewHook is invoked whenever an object is allocated. // It may be passed NULL if the allocator returned NULL. typedef MallocHook_NewHook NewHook; inline static bool AddNewHook(NewHook hook) { return MallocHook_AddNewHook(hook); } inline static bool RemoveNewHook(NewHook hook) { return MallocHook_RemoveNewHook(hook); } inline static void InvokeNewHook(const void* p, size_t s); // The DeleteHook is invoked whenever an object is deallocated. // It may be passed NULL if the caller is trying to delete NULL. typedef MallocHook_DeleteHook DeleteHook; inline static bool AddDeleteHook(DeleteHook hook) { return MallocHook_AddDeleteHook(hook); } inline static bool RemoveDeleteHook(DeleteHook hook) { return MallocHook_RemoveDeleteHook(hook); } inline static void InvokeDeleteHook(const void* p); // The PreMmapHook is invoked with mmap or mmap64 arguments just // before the call is actually made. Such a hook may be useful // in memory limited contexts, to catch allocations that will exceed // a memory limit, and take outside actions to increase that limit. typedef MallocHook_PreMmapHook PreMmapHook; inline static bool AddPreMmapHook(PreMmapHook hook) { return MallocHook_AddPreMmapHook(hook); } inline static bool RemovePreMmapHook(PreMmapHook hook) { return MallocHook_RemovePreMmapHook(hook); } inline static void InvokePreMmapHook(const void* start, size_t size, int protection, int flags, int fd, off_t offset); // The MmapReplacement is invoked after the PreMmapHook but before // the call is actually made. The MmapReplacement should return true // if it handled the call, or false if it is still necessary to // call mmap/mmap64. // This should be used only by experts, and users must be be // extremely careful to avoid recursive calls to mmap. The replacement // should be async signal safe. // Only one MmapReplacement is supported. After setting an MmapReplacement // you must call RemoveMmapReplacement before calling SetMmapReplacement // again. typedef MallocHook_MmapReplacement MmapReplacement; inline static bool SetMmapReplacement(MmapReplacement hook) { return MallocHook_SetMmapReplacement(hook); } inline static bool RemoveMmapReplacement(MmapReplacement hook) { return MallocHook_RemoveMmapReplacement(hook); } inline static bool InvokeMmapReplacement(const void* start, size_t size, int protection, int flags, int fd, off_t offset, void** result); // The MmapHook is invoked whenever a region of memory is mapped. // It may be passed MAP_FAILED if the mmap failed. typedef MallocHook_MmapHook MmapHook; inline static bool AddMmapHook(MmapHook hook) { return MallocHook_AddMmapHook(hook); } inline static bool RemoveMmapHook(MmapHook hook) { return MallocHook_RemoveMmapHook(hook); } inline static void InvokeMmapHook(const void* result, const void* start, size_t size, int protection, int flags, int fd, off_t offset); // The MunmapReplacement is invoked with munmap arguments just before // the call is actually made. The MunmapReplacement should return true // if it handled the call, or false if it is still necessary to // call munmap. // This should be used only by experts. The replacement should be // async signal safe. // Only one MunmapReplacement is supported. After setting an // MunmapReplacement you must call RemoveMunmapReplacement before // calling SetMunmapReplacement again. typedef MallocHook_MunmapReplacement MunmapReplacement; inline static bool SetMunmapReplacement(MunmapReplacement hook) { return MallocHook_SetMunmapReplacement(hook); } inline static bool RemoveMunmapReplacement(MunmapReplacement hook) { return MallocHook_RemoveMunmapReplacement(hook); } inline static bool InvokeMunmapReplacement(const void* p, size_t size, int* result); // The MunmapHook is invoked whenever a region of memory is unmapped. typedef MallocHook_MunmapHook MunmapHook; inline static bool AddMunmapHook(MunmapHook hook) { return MallocHook_AddMunmapHook(hook); } inline static bool RemoveMunmapHook(MunmapHook hook) { return MallocHook_RemoveMunmapHook(hook); } inline static void InvokeMunmapHook(const void* p, size_t size); // The MremapHook is invoked whenever a region of memory is remapped. typedef MallocHook_MremapHook MremapHook; inline static bool AddMremapHook(MremapHook hook) { return MallocHook_AddMremapHook(hook); } inline static bool RemoveMremapHook(MremapHook hook) { return MallocHook_RemoveMremapHook(hook); } inline static void InvokeMremapHook(const void* result, const void* old_addr, size_t old_size, size_t new_size, int flags, const void* new_addr); // The PreSbrkHook is invoked just before sbrk is called -- except when // the increment is 0. This is because sbrk(0) is often called // to get the top of the memory stack, and is not actually a // memory-allocation call. It may be useful in memory-limited contexts, // to catch allocations that will exceed the limit and take outside // actions to increase such a limit. typedef MallocHook_PreSbrkHook PreSbrkHook; inline static bool AddPreSbrkHook(PreSbrkHook hook) { return MallocHook_AddPreSbrkHook(hook); } inline static bool RemovePreSbrkHook(PreSbrkHook hook) { return MallocHook_RemovePreSbrkHook(hook); } inline static void InvokePreSbrkHook(ptrdiff_t increment); // The SbrkHook is invoked whenever sbrk is called -- except when // the increment is 0. This is because sbrk(0) is often called // to get the top of the memory stack, and is not actually a // memory-allocation call. typedef MallocHook_SbrkHook SbrkHook; inline static bool AddSbrkHook(SbrkHook hook) { return MallocHook_AddSbrkHook(hook); } inline static bool RemoveSbrkHook(SbrkHook hook) { return MallocHook_RemoveSbrkHook(hook); } inline static void InvokeSbrkHook(const void* result, ptrdiff_t increment); // Get the current stack trace. Try to skip all routines up to and // and including the caller of MallocHook::Invoke*. // Use "skip_count" (similarly to GetStackTrace from stacktrace.h) // as a hint about how many routines to skip if better information // is not available. inline static int GetCallerStackTrace(void** result, int max_depth, int skip_count) { return MallocHook_GetCallerStackTrace(result, max_depth, skip_count); } // Unhooked versions of mmap() and munmap(). These should be used // only by experts, since they bypass heapchecking, etc. // Note: These do not run hooks, but they still use the MmapReplacement // and MunmapReplacement. static void* UnhookedMMap(void *start, size_t length, int prot, int flags, int fd, off_t offset); static int UnhookedMUnmap(void *start, size_t length); // The following are DEPRECATED. inline static NewHook GetNewHook(); inline static NewHook SetNewHook(NewHook hook) { return MallocHook_SetNewHook(hook); } inline static DeleteHook GetDeleteHook(); inline static DeleteHook SetDeleteHook(DeleteHook hook) { return MallocHook_SetDeleteHook(hook); } inline static PreMmapHook GetPreMmapHook(); inline static PreMmapHook SetPreMmapHook(PreMmapHook hook) { return MallocHook_SetPreMmapHook(hook); } inline static MmapHook GetMmapHook(); inline static MmapHook SetMmapHook(MmapHook hook) { return MallocHook_SetMmapHook(hook); } inline static MunmapHook GetMunmapHook(); inline static MunmapHook SetMunmapHook(MunmapHook hook) { return MallocHook_SetMunmapHook(hook); } inline static MremapHook GetMremapHook(); inline static MremapHook SetMremapHook(MremapHook hook) { return MallocHook_SetMremapHook(hook); } inline static PreSbrkHook GetPreSbrkHook(); inline static PreSbrkHook SetPreSbrkHook(PreSbrkHook hook) { return MallocHook_SetPreSbrkHook(hook); } inline static SbrkHook GetSbrkHook(); inline static SbrkHook SetSbrkHook(SbrkHook hook) { return MallocHook_SetSbrkHook(hook); } // End of DEPRECATED methods. private: // Slow path versions of Invoke*Hook. static void InvokeNewHookSlow(const void* p, size_t s); static void InvokeDeleteHookSlow(const void* p); static void InvokePreMmapHookSlow(const void* start, size_t size, int protection, int flags, int fd, off_t offset); static void InvokeMmapHookSlow(const void* result, const void* start, size_t size, int protection, int flags, int fd, off_t offset); static bool InvokeMmapReplacementSlow(const void* start, size_t size, int protection, int flags, int fd, off_t offset, void** result); static void InvokeMunmapHookSlow(const void* p, size_t size); static bool InvokeMunmapReplacementSlow(const void* p, size_t size, int* result); static void InvokeMremapHookSlow(const void* result, const void* old_addr, size_t old_size, size_t new_size, int flags, const void* new_addr); static void InvokePreSbrkHookSlow(ptrdiff_t increment); static void InvokeSbrkHookSlow(const void* result, ptrdiff_t increment); }; #ifdef _MSC_VER #pragma warning(pop) #endif #endif /* _MALLOC_HOOK_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/gperftools/profiler.h
.h
6,273
170
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2005, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Sanjay Ghemawat * * Module for CPU profiling based on periodic pc-sampling. * * For full(er) information, see docs/cpuprofile.html * * This module is linked into your program with * no slowdown caused by this unless you activate the profiler * using one of the following methods: * * 1. Before starting the program, set the environment variable * "CPUPROFILE" to be the name of the file to which the profile * data should be written. * * 2. Programmatically, start and stop the profiler using the * routines "ProfilerStart(filename)" and "ProfilerStop()". * * * (Note: if using linux 2.4 or earlier, only the main thread may be * profiled.) * * Use pprof to view the resulting profile output. * % pprof <path_to_executable> <profile_file_name> * % pprof --gv <path_to_executable> <profile_file_name> * * These functions are thread-safe. */ #ifndef BASE_PROFILER_H_ #define BASE_PROFILER_H_ #include <time.h> /* For time_t */ /* Annoying stuff for windows; makes sure clients can import these functions */ #ifndef PERFTOOLS_DLL_DECL # ifdef _WIN32 # define PERFTOOLS_DLL_DECL __declspec(dllimport) # else # define PERFTOOLS_DLL_DECL # endif #endif /* All this code should be usable from within C apps. */ #ifdef __cplusplus extern "C" { #endif /* Profiler options, for use with ProfilerStartWithOptions. To use: * * struct ProfilerOptions options; * memset(&options, 0, sizeof options); * * then fill in fields as needed. * * This structure is intended to be usable from C code, so no constructor * is provided to initialize it. (Use memset as described above). */ struct ProfilerOptions { /* Filter function and argument. * * If filter_in_thread is not NULL, when a profiling tick is delivered * the profiler will call: * * (*filter_in_thread)(filter_in_thread_arg) * * If it returns nonzero, the sample will be included in the profile. * Note that filter_in_thread runs in a signal handler, so must be * async-signal-safe. * * A typical use would be to set up filter results for each thread * in the system before starting the profiler, then to make * filter_in_thread be a very simple function which retrieves those * results in an async-signal-safe way. Retrieval could be done * using thread-specific data, or using a shared data structure that * supports async-signal-safe lookups. */ int (*filter_in_thread)(void *arg); void *filter_in_thread_arg; }; /* Start profiling and write profile info into fname, discarding any * existing profiling data in that file. * * This is equivalent to calling ProfilerStartWithOptions(fname, NULL). */ PERFTOOLS_DLL_DECL int ProfilerStart(const char* fname); /* Start profiling and write profile into fname, discarding any * existing profiling data in that file. * * The profiler is configured using the options given by 'options'. * Options which are not specified are given default values. * * 'options' may be NULL, in which case all are given default values. * * Returns nonzero if profiling was started successfully, or zero else. */ PERFTOOLS_DLL_DECL int ProfilerStartWithOptions( const char *fname, const struct ProfilerOptions *options); /* Stop profiling. Can be started again with ProfilerStart(), but * the currently accumulated profiling data will be cleared. */ PERFTOOLS_DLL_DECL void ProfilerStop(void); /* Flush any currently buffered profiling state to the profile file. * Has no effect if the profiler has not been started. */ PERFTOOLS_DLL_DECL void ProfilerFlush(void); /* DEPRECATED: these functions were used to enable/disable profiling * in the current thread, but no longer do anything. */ PERFTOOLS_DLL_DECL void ProfilerEnable(void); PERFTOOLS_DLL_DECL void ProfilerDisable(void); /* Returns nonzero if profile is currently enabled, zero if it's not. */ PERFTOOLS_DLL_DECL int ProfilingIsEnabledForAllThreads(void); /* Routine for registering new threads with the profiler. */ PERFTOOLS_DLL_DECL void ProfilerRegisterThread(void); /* Stores state about profiler's current status into "*state". */ struct ProfilerState { int enabled; /* Is profiling currently enabled? */ time_t start_time; /* If enabled, when was profiling started? */ char profile_name[1024]; /* Name of profile file being written, or '\0' */ int samples_gathered; /* Number of samples gathered so far (or 0) */ }; PERFTOOLS_DLL_DECL void ProfilerGetCurrentState(struct ProfilerState* state); #ifdef __cplusplus } // extern "C" #endif #endif /* BASE_PROFILER_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/gperftools/stacktrace.h
.h
4,955
118
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Sanjay Ghemawat // // Routines to extract the current stack trace. These functions are // thread-safe. #ifndef GOOGLE_STACKTRACE_H_ #define GOOGLE_STACKTRACE_H_ // Annoying stuff for windows -- makes sure clients can import these functions #ifndef PERFTOOLS_DLL_DECL # ifdef _WIN32 # define PERFTOOLS_DLL_DECL __declspec(dllimport) # else # define PERFTOOLS_DLL_DECL # endif #endif // Skips the most recent "skip_count" stack frames (also skips the // frame generated for the "GetStackFrames" routine itself), and then // records the pc values for up to the next "max_depth" frames in // "result", and the corresponding stack frame sizes in "sizes". // Returns the number of values recorded in "result"/"sizes". // // Example: // main() { foo(); } // foo() { bar(); } // bar() { // void* result[10]; // int sizes[10]; // int depth = GetStackFrames(result, sizes, 10, 1); // } // // The GetStackFrames call will skip the frame for "bar". It will // return 2 and will produce pc values that map to the following // procedures: // result[0] foo // result[1] main // (Actually, there may be a few more entries after "main" to account for // startup procedures.) // And corresponding stack frame sizes will also be recorded: // sizes[0] 16 // sizes[1] 16 // (Stack frame sizes of 16 above are just for illustration purposes.) // Stack frame sizes of 0 or less indicate that those frame sizes couldn't // be identified. // // This routine may return fewer stack frame entries than are // available. Also note that "result" and "sizes" must both be non-NULL. extern PERFTOOLS_DLL_DECL int GetStackFrames(void** result, int* sizes, int max_depth, int skip_count); // Same as above, but to be used from a signal handler. The "uc" parameter // should be the pointer to ucontext_t which was passed as the 3rd parameter // to sa_sigaction signal handler. It may help the unwinder to get a // better stack trace under certain conditions. The "uc" may safely be NULL. extern PERFTOOLS_DLL_DECL int GetStackFramesWithContext(void** result, int* sizes, int max_depth, int skip_count, const void *uc); // This is similar to the GetStackFrames routine, except that it returns // the stack trace only, and not the stack frame sizes as well. // Example: // main() { foo(); } // foo() { bar(); } // bar() { // void* result[10]; // int depth = GetStackTrace(result, 10, 1); // } // // This produces: // result[0] foo // result[1] main // .... ... // // "result" must not be NULL. extern PERFTOOLS_DLL_DECL int GetStackTrace(void** result, int max_depth, int skip_count); // Same as above, but to be used from a signal handler. The "uc" parameter // should be the pointer to ucontext_t which was passed as the 3rd parameter // to sa_sigaction signal handler. It may help the unwinder to get a // better stack trace under certain conditions. The "uc" may safely be NULL. extern PERFTOOLS_DLL_DECL int GetStackTraceWithContext(void** result, int max_depth, int skip_count, const void *uc); #endif /* GOOGLE_STACKTRACE_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/gperftools/heap-checker.h
.h
17,703
423
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Maxim Lifantsev (with design ideas by Sanjay Ghemawat) // // // Module for detecing heap (memory) leaks. // // For full(er) information, see docs/heap_checker.html // // This module can be linked into programs with // no slowdown caused by this unless you activate the leak-checker: // // 1. Set the environment variable HEAPCHEK to _type_ before // running the program. // // _type_ is usually "normal" but can also be "minimal", "strict", or // "draconian". (See the html file for other options, like 'local'.) // // After that, just run your binary. If the heap-checker detects // a memory leak at program-exit, it will print instructions on how // to track down the leak. #ifndef BASE_HEAP_CHECKER_H_ #define BASE_HEAP_CHECKER_H_ #include <sys/types.h> // for size_t // I can't #include config.h in this public API file, but I should // really use configure (and make malloc_extension.h a .in file) to // figure out if the system has stdint.h or not. But I'm lazy, so // for now I'm assuming it's a problem only with MSVC. #ifndef _MSC_VER #include <stdint.h> // for uintptr_t #endif #include <stdarg.h> // for va_list #include <vector> // Annoying stuff for windows -- makes sure clients can import these functions #ifndef PERFTOOLS_DLL_DECL # ifdef _WIN32 # define PERFTOOLS_DLL_DECL __declspec(dllimport) # else # define PERFTOOLS_DLL_DECL # endif #endif // The class is thread-safe with respect to all the provided static methods, // as well as HeapLeakChecker objects: they can be accessed by multiple threads. class PERFTOOLS_DLL_DECL HeapLeakChecker { public: // ----------------------------------------------------------------------- // // Static functions for working with (whole-program) leak checking. // If heap leak checking is currently active in some mode // e.g. if leak checking was started (and is still active now) // due to HEAPCHECK=... defined in the environment. // The return value reflects iff HeapLeakChecker objects manually // constructed right now will be doing leak checking or nothing. // Note that we can go from active to inactive state during InitGoogle() // if FLAGS_heap_check gets set to "" by some code before/during InitGoogle(). static bool IsActive(); // Return pointer to the whole-program checker if it has been created // and NULL otherwise. // Once GlobalChecker() returns non-NULL that object will not disappear and // will be returned by all later GlobalChecker calls. // This is mainly to access BytesLeaked() and ObjectsLeaked() (see below) // for the whole-program checker after one calls NoGlobalLeaks() // or similar and gets false. static HeapLeakChecker* GlobalChecker(); // Do whole-program leak check now (if it was activated for this binary); // return false only if it was activated and has failed. // The mode of the check is controlled by the command-line flags. // This method can be called repeatedly. // Things like GlobalChecker()->SameHeap() can also be called explicitly // to do the desired flavor of the check. static bool NoGlobalLeaks(); // If whole-program checker if active, // cancel its automatic execution after main() exits. // This requires that some leak check (e.g. NoGlobalLeaks()) // has been called at least once on the whole-program checker. static void CancelGlobalCheck(); // ----------------------------------------------------------------------- // // Non-static functions for starting and doing leak checking. // Start checking and name the leak check performed. // The name is used in naming dumped profiles // and needs to be unique only within your binary. // It must also be a string that can be a part of a file name, // in particular not contain path expressions. explicit HeapLeakChecker(const char *name); // Destructor (verifies that some *NoLeaks or *SameHeap method // has been called at least once). ~HeapLeakChecker(); // These used to be different but are all the same now: they return // true iff all memory allocated since this HeapLeakChecker object // was constructor is still reachable from global state. // // Because we fork to convert addresses to symbol-names, and forking // is not thread-safe, and we may be called in a threaded context, // we do not try to symbolize addresses when called manually. bool NoLeaks() { return DoNoLeaks(DO_NOT_SYMBOLIZE); } // These forms are obsolete; use NoLeaks() instead. // TODO(csilvers): mark as DEPRECATED. bool QuickNoLeaks() { return NoLeaks(); } bool BriefNoLeaks() { return NoLeaks(); } bool SameHeap() { return NoLeaks(); } bool QuickSameHeap() { return NoLeaks(); } bool BriefSameHeap() { return NoLeaks(); } // Detailed information about the number of leaked bytes and objects // (both of these can be negative as well). // These are available only after a *SameHeap or *NoLeaks // method has been called. // Note that it's possible for both of these to be zero // while SameHeap() or NoLeaks() returned false in case // of a heap state change that is significant // but preserves the byte and object counts. ssize_t BytesLeaked() const; ssize_t ObjectsLeaked() const; // ----------------------------------------------------------------------- // // Static helpers to make us ignore certain leaks. // Scoped helper class. Should be allocated on the stack inside a // block of code. Any heap allocations done in the code block // covered by the scoped object (including in nested function calls // done by the code block) will not be reported as leaks. This is // the recommended replacement for the GetDisableChecksStart() and // DisableChecksToHereFrom() routines below. // // Example: // void Foo() { // HeapLeakChecker::Disabler disabler; // ... code that allocates objects whose leaks should be ignored ... // } // // REQUIRES: Destructor runs in same thread as constructor class Disabler { public: Disabler(); ~Disabler(); private: Disabler(const Disabler&); // disallow copy void operator=(const Disabler&); // and assign }; // Ignore an object located at 'ptr' (can go at the start or into the object) // as well as all heap objects (transitively) referenced from it for the // purposes of heap leak checking. Returns 'ptr' so that one can write // static T* obj = IgnoreObject(new T(...)); // // If 'ptr' does not point to an active allocated object at the time of this // call, it is ignored; but if it does, the object must not get deleted from // the heap later on. // // See also HiddenPointer, below, if you need to prevent a pointer from // being traversed by the heap checker but do not wish to transitively // whitelist objects referenced through it. template <typename T> static T* IgnoreObject(T* ptr) { DoIgnoreObject(static_cast<const void*>(const_cast<const T*>(ptr))); return ptr; } // Undo what an earlier IgnoreObject() call promised and asked to do. // At the time of this call 'ptr' must point at or inside of an active // allocated object which was previously registered with IgnoreObject(). static void UnIgnoreObject(const void* ptr); // ----------------------------------------------------------------------- // // Internal types defined in .cc class Allocator; struct RangeValue; private: // ----------------------------------------------------------------------- // // Various helpers // Create the name of the heap profile file. // Should be deleted via Allocator::Free(). char* MakeProfileNameLocked(); // Helper for constructors void Create(const char *name, bool make_start_snapshot); enum ShouldSymbolize { SYMBOLIZE, DO_NOT_SYMBOLIZE }; // Helper for *NoLeaks and *SameHeap bool DoNoLeaks(ShouldSymbolize should_symbolize); // Helper for NoGlobalLeaks, also called by the global destructor. static bool NoGlobalLeaksMaybeSymbolize(ShouldSymbolize should_symbolize); // These used to be public, but they are now deprecated. // Will remove entirely when all internal uses are fixed. // In the meantime, use friendship so the unittest can still test them. static void* GetDisableChecksStart(); static void DisableChecksToHereFrom(const void* start_address); static void DisableChecksIn(const char* pattern); friend void RangeDisabledLeaks(); friend void NamedTwoDisabledLeaks(); friend void* RunNamedDisabledLeaks(void*); friend void TestHeapLeakCheckerNamedDisabling(); // Actually implements IgnoreObject(). static void DoIgnoreObject(const void* ptr); // Disable checks based on stack trace entry at a depth <= // max_depth. Used to hide allocations done inside some special // libraries. static void DisableChecksFromToLocked(const void* start_address, const void* end_address, int max_depth); // Helper for DoNoLeaks to ignore all objects reachable from all live data static void IgnoreAllLiveObjectsLocked(const void* self_stack_top); // Callback we pass to TCMalloc_ListAllProcessThreads (see thread_lister.h) // that is invoked when all threads of our process are found and stopped. // The call back does the things needed to ignore live data reachable from // thread stacks and registers for all our threads // as well as do other global-live-data ignoring // (via IgnoreNonThreadLiveObjectsLocked) // during the quiet state of all threads being stopped. // For the argument meaning see the comment by TCMalloc_ListAllProcessThreads. // Here we only use num_threads and thread_pids, that TCMalloc_ListAllProcessThreads // fills for us with the number and pids of all the threads of our process // it found and attached to. static int IgnoreLiveThreadsLocked(void* parameter, int num_threads, pid_t* thread_pids, va_list ap); // Helper for IgnoreAllLiveObjectsLocked and IgnoreLiveThreadsLocked // that we prefer to execute from IgnoreLiveThreadsLocked // while all threads are stopped. // This helper does live object discovery and ignoring // for all objects that are reachable from everything // not related to thread stacks and registers. static void IgnoreNonThreadLiveObjectsLocked(); // Helper for IgnoreNonThreadLiveObjectsLocked and IgnoreLiveThreadsLocked // to discover and ignore all heap objects // reachable from currently considered live objects // (live_objects static global variable in out .cc file). // "name", "name2" are two strings that we print one after another // in a debug message to describe what kind of live object sources // are being used. static void IgnoreLiveObjectsLocked(const char* name, const char* name2); // Do the overall whole-program heap leak check if needed; // returns true when did the leak check. static bool DoMainHeapCheck(); // Type of task for UseProcMapsLocked enum ProcMapsTask { RECORD_GLOBAL_DATA, DISABLE_LIBRARY_ALLOCS }; // Success/Error Return codes for UseProcMapsLocked. enum ProcMapsResult { PROC_MAPS_USED, CANT_OPEN_PROC_MAPS, NO_SHARED_LIBS_IN_PROC_MAPS }; // Read /proc/self/maps, parse it, and do the 'proc_maps_task' for each line. static ProcMapsResult UseProcMapsLocked(ProcMapsTask proc_maps_task); // A ProcMapsTask to disable allocations from 'library' // that is mapped to [start_address..end_address) // (only if library is a certain system library). static void DisableLibraryAllocsLocked(const char* library, uintptr_t start_address, uintptr_t end_address); // Return true iff "*ptr" points to a heap object // ("*ptr" can point at the start or inside of a heap object // so that this works e.g. for pointers to C++ arrays, C++ strings, // multiple-inherited objects, or pointers to members). // We also fill *object_size for this object then // and we move "*ptr" to point to the very start of the heap object. static inline bool HaveOnHeapLocked(const void** ptr, size_t* object_size); // Helper to shutdown heap leak checker when it's not needed // or can't function properly. static void TurnItselfOffLocked(); // Internally-used c-tor to start whole-executable checking. HeapLeakChecker(); // ----------------------------------------------------------------------- // // Friends and externally accessed helpers. // Helper for VerifyHeapProfileTableStackGet in the unittest // to get the recorded allocation caller for ptr, // which must be a heap object. static const void* GetAllocCaller(void* ptr); friend void VerifyHeapProfileTableStackGet(); // This gets to execute before constructors for all global objects static void BeforeConstructorsLocked(); friend void HeapLeakChecker_BeforeConstructors(); // This gets to execute after destructors for all global objects friend void HeapLeakChecker_AfterDestructors(); // Full starting of recommended whole-program checking. friend void HeapLeakChecker_InternalInitStart(); // Runs REGISTER_HEAPCHECK_CLEANUP cleanups and potentially // calls DoMainHeapCheck friend void HeapLeakChecker_RunHeapCleanups(); // ----------------------------------------------------------------------- // // Member data. class SpinLock* lock_; // to make HeapLeakChecker objects thread-safe const char* name_; // our remembered name (we own it) // NULL means this leak checker is a noop // Snapshot taken when the checker was created. May be NULL // for the global heap checker object. We use void* instead of // HeapProfileTable::Snapshot* to avoid including heap-profile-table.h. void* start_snapshot_; bool has_checked_; // if we have done the leak check, so these are ready: ssize_t inuse_bytes_increase_; // bytes-in-use increase for this checker ssize_t inuse_allocs_increase_; // allocations-in-use increase // for this checker bool keep_profiles_; // iff we should keep the heap profiles we've made // ----------------------------------------------------------------------- // // Disallow "evil" constructors. HeapLeakChecker(const HeapLeakChecker&); void operator=(const HeapLeakChecker&); }; // Holds a pointer that will not be traversed by the heap checker. // Contrast with HeapLeakChecker::IgnoreObject(o), in which o and // all objects reachable from o are ignored by the heap checker. template <class T> class HiddenPointer { public: explicit HiddenPointer(T* t) : masked_t_(reinterpret_cast<uintptr_t>(t) ^ kHideMask) { } // Returns unhidden pointer. Be careful where you save the result. T* get() const { return reinterpret_cast<T*>(masked_t_ ^ kHideMask); } private: // Arbitrary value, but not such that xor'ing with it is likely // to map one valid pointer to another valid pointer: static const uintptr_t kHideMask = static_cast<uintptr_t>(0xF03A5F7BF03A5F7Bll); uintptr_t masked_t_; }; // A class that exists solely to run its destructor. This class should not be // used directly, but instead by the REGISTER_HEAPCHECK_CLEANUP macro below. class PERFTOOLS_DLL_DECL HeapCleaner { public: typedef void (*void_function)(void); HeapCleaner(void_function f); static void RunHeapCleanups(); private: static std::vector<void_function>* heap_cleanups_; }; // A macro to declare module heap check cleanup tasks // (they run only if we are doing heap leak checking.) // 'body' should be the cleanup code to run. 'name' doesn't matter, // but must be unique amongst all REGISTER_HEAPCHECK_CLEANUP calls. #define REGISTER_HEAPCHECK_CLEANUP(name, body) \ namespace { \ void heapcheck_cleanup_##name() { body; } \ static HeapCleaner heapcheck_cleaner_##name(&heapcheck_cleanup_##name); \ } #endif // BASE_HEAP_CHECKER_H_
Unknown
3D
mcellteam/mcell
libs/gperftools/src/gperftools/heap-profiler.h
.h
4,027
106
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2005, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Sanjay Ghemawat * * Module for heap-profiling. * * For full(er) information, see docs/heapprofile.html * * This module can be linked into your program with * no slowdown caused by this unless you activate the profiler * using one of the following methods: * * 1. Before starting the program, set the environment variable * "HEAPPROFILE" to be the name of the file to which the profile * data should be written. * * 2. Programmatically, start and stop the profiler using the * routines "HeapProfilerStart(filename)" and "HeapProfilerStop()". * */ #ifndef BASE_HEAP_PROFILER_H_ #define BASE_HEAP_PROFILER_H_ #include <stddef.h> /* Annoying stuff for windows; makes sure clients can import these functions */ #ifndef PERFTOOLS_DLL_DECL # ifdef _WIN32 # define PERFTOOLS_DLL_DECL __declspec(dllimport) # else # define PERFTOOLS_DLL_DECL # endif #endif /* All this code should be usable from within C apps. */ #ifdef __cplusplus extern "C" { #endif /* Start profiling and arrange to write profile data to file names * of the form: "prefix.0000", "prefix.0001", ... */ PERFTOOLS_DLL_DECL void HeapProfilerStart(const char* prefix); /* Returns non-zero if we are currently profiling the heap. (Returns * an int rather than a bool so it's usable from C.) This is true * between calls to HeapProfilerStart() and HeapProfilerStop(), and * also if the program has been run with HEAPPROFILER, or some other * way to turn on whole-program profiling. */ int IsHeapProfilerRunning(); /* Stop heap profiling. Can be restarted again with HeapProfilerStart(), * but the currently accumulated profiling information will be cleared. */ PERFTOOLS_DLL_DECL void HeapProfilerStop(); /* Dump a profile now - can be used for dumping at a hopefully * quiescent state in your program, in order to more easily track down * memory leaks. Will include the reason in the logged message */ PERFTOOLS_DLL_DECL void HeapProfilerDump(const char *reason); /* Generate current heap profiling information. * Returns an empty string when heap profiling is not active. * The returned pointer is a '\0'-terminated string allocated using malloc() * and should be free()-ed as soon as the caller does not need it anymore. */ PERFTOOLS_DLL_DECL char* GetHeapProfile(); #ifdef __cplusplus } // extern "C" #endif #endif /* BASE_HEAP_PROFILER_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/gperftools/nallocx.h
.h
936
38
#ifndef _NALLOCX_H_ #define _NALLOCX_H_ #include <stddef.h> #ifndef PERFTOOLS_DLL_DECL # ifdef _WIN32 # define PERFTOOLS_DLL_DECL __declspec(dllimport) # else # define PERFTOOLS_DLL_DECL # endif #endif #ifdef __cplusplus extern "C" { #endif #define MALLOCX_LG_ALIGN(la) ((int)(la)) /* * The nallocx function allocates no memory, but it performs the same size * computation as the malloc function, and returns the real size of the * allocation that would result from the equivalent malloc function call. * nallocx is a malloc extension originally implemented by jemalloc: * http://www.unix.com/man-page/freebsd/3/nallocx/ * * Note, we only support MALLOCX_LG_ALIGN flag and nothing else. */ PERFTOOLS_DLL_DECL size_t nallocx(size_t size, int flags); /* same as above but never weak */ PERFTOOLS_DLL_DECL size_t tc_nallocx(size_t size, int flags); #ifdef __cplusplus } /* extern "C" */ #endif #endif /* _NALLOCX_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/gperftools/malloc_extension_c.h
.h
4,201
102
/* Copyright (c) 2008, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * -- * Author: Craig Silverstein * * C shims for the C++ malloc_extension.h. See malloc_extension.h for * details. Note these C shims always work on * MallocExtension::instance(); it is not possible to have more than * one MallocExtension object in C applications. */ #ifndef _MALLOC_EXTENSION_C_H_ #define _MALLOC_EXTENSION_C_H_ #include <stddef.h> #include <sys/types.h> /* Annoying stuff for windows -- makes sure clients can import these fns */ #ifndef PERFTOOLS_DLL_DECL # ifdef _WIN32 # define PERFTOOLS_DLL_DECL __declspec(dllimport) # else # define PERFTOOLS_DLL_DECL # endif #endif #ifdef __cplusplus extern "C" { #endif #define kMallocExtensionHistogramSize 64 PERFTOOLS_DLL_DECL int MallocExtension_VerifyAllMemory(void); PERFTOOLS_DLL_DECL int MallocExtension_VerifyNewMemory(const void* p); PERFTOOLS_DLL_DECL int MallocExtension_VerifyArrayNewMemory(const void* p); PERFTOOLS_DLL_DECL int MallocExtension_VerifyMallocMemory(const void* p); PERFTOOLS_DLL_DECL int MallocExtension_MallocMemoryStats(int* blocks, size_t* total, int histogram[kMallocExtensionHistogramSize]); PERFTOOLS_DLL_DECL void MallocExtension_GetStats(char* buffer, int buffer_length); /* TODO(csilvers): write a C version of these routines, that perhaps * takes a function ptr and a void *. */ /* void MallocExtension_GetHeapSample(string* result); */ /* void MallocExtension_GetHeapGrowthStacks(string* result); */ PERFTOOLS_DLL_DECL int MallocExtension_GetNumericProperty(const char* property, size_t* value); PERFTOOLS_DLL_DECL int MallocExtension_SetNumericProperty(const char* property, size_t value); PERFTOOLS_DLL_DECL void MallocExtension_MarkThreadIdle(void); PERFTOOLS_DLL_DECL void MallocExtension_MarkThreadBusy(void); PERFTOOLS_DLL_DECL void MallocExtension_ReleaseToSystem(size_t num_bytes); PERFTOOLS_DLL_DECL void MallocExtension_ReleaseFreeMemory(void); PERFTOOLS_DLL_DECL size_t MallocExtension_GetEstimatedAllocatedSize(size_t size); PERFTOOLS_DLL_DECL size_t MallocExtension_GetAllocatedSize(const void* p); PERFTOOLS_DLL_DECL size_t MallocExtension_GetThreadCacheSize(void); PERFTOOLS_DLL_DECL void MallocExtension_MarkThreadTemporarilyIdle(void); /* * NOTE: These enum values MUST be kept in sync with the version in * malloc_extension.h */ typedef enum { MallocExtension_kUnknownOwnership = 0, MallocExtension_kOwned, MallocExtension_kNotOwned } MallocExtension_Ownership; PERFTOOLS_DLL_DECL MallocExtension_Ownership MallocExtension_GetOwnership(const void* p); #ifdef __cplusplus } /* extern "C" */ #endif #endif /* _MALLOC_EXTENSION_C_H_ */
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/current_allocated_bytes_test.cc
.cc
2,822
65
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2011, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // // Author: Craig Silverstein // This tests the accounting done by tcmalloc. When we allocate and // free a small buffer, the number of bytes used by the application // before the alloc+free should match the number of bytes used after. // However, the internal data structures used by tcmalloc will be // quite different -- new spans will have been allocated, etc. This // is, thus, a simple test that we account properly for the internal // data structures, so that we report the actual application-used // bytes properly. #include "config_for_unittests.h" #include <stdlib.h> #include <stdio.h> #include <gperftools/malloc_extension.h> #include "base/logging.h" int main() { // We don't do accounting right when using debugallocation.cc, so // turn off the test then. TODO(csilvers): get this working too. #ifdef NDEBUG static const char kCurrent[] = "generic.current_allocated_bytes"; size_t before_bytes, after_bytes; MallocExtension::instance()->GetNumericProperty(kCurrent, &before_bytes); free(malloc(200)); MallocExtension::instance()->GetNumericProperty(kCurrent, &after_bytes); CHECK_EQ(before_bytes, after_bytes); #endif printf("PASS\n"); return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/atomicops_unittest.cc
.cc
5,630
163
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2006, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * --- * Author: Sanjay Ghemawat */ #include <stdio.h> #include "base/logging.h" #include "base/atomicops.h" #define GG_ULONGLONG(x) static_cast<uint64>(x) #define NUM_BITS(T) (sizeof(T) * 8) template <class AtomicType> static void TestCompareAndSwap(AtomicType (*compare_and_swap_func) (volatile AtomicType*, AtomicType, AtomicType)) { AtomicType value = 0; AtomicType prev = (*compare_and_swap_func)(&value, 0, 1); ASSERT_EQ(1, value); ASSERT_EQ(0, prev); // Use test value that has non-zero bits in both halves, more for testing // 64-bit implementation on 32-bit platforms. const AtomicType k_test_val = (GG_ULONGLONG(1) << (NUM_BITS(AtomicType) - 2)) + 11; value = k_test_val; prev = (*compare_and_swap_func)(&value, 0, 5); ASSERT_EQ(k_test_val, value); ASSERT_EQ(k_test_val, prev); value = k_test_val; prev = (*compare_and_swap_func)(&value, k_test_val, 5); ASSERT_EQ(5, value); ASSERT_EQ(k_test_val, prev); } template <class AtomicType> static void TestAtomicExchange(AtomicType (*atomic_exchange_func) (volatile AtomicType*, AtomicType)) { AtomicType value = 0; AtomicType new_value = (*atomic_exchange_func)(&value, 1); ASSERT_EQ(1, value); ASSERT_EQ(0, new_value); // Use test value that has non-zero bits in both halves, more for testing // 64-bit implementation on 32-bit platforms. const AtomicType k_test_val = (GG_ULONGLONG(1) << (NUM_BITS(AtomicType) - 2)) + 11; value = k_test_val; new_value = (*atomic_exchange_func)(&value, k_test_val); ASSERT_EQ(k_test_val, value); ASSERT_EQ(k_test_val, new_value); value = k_test_val; new_value = (*atomic_exchange_func)(&value, 5); ASSERT_EQ(5, value); ASSERT_EQ(k_test_val, new_value); } // This is a simple sanity check that values are correct. Not testing // atomicity template <class AtomicType> static void TestStore() { const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL); const AtomicType kVal2 = static_cast<AtomicType>(-1); AtomicType value; base::subtle::NoBarrier_Store(&value, kVal1); ASSERT_EQ(kVal1, value); base::subtle::NoBarrier_Store(&value, kVal2); ASSERT_EQ(kVal2, value); base::subtle::Acquire_Store(&value, kVal1); ASSERT_EQ(kVal1, value); base::subtle::Acquire_Store(&value, kVal2); ASSERT_EQ(kVal2, value); base::subtle::Release_Store(&value, kVal1); ASSERT_EQ(kVal1, value); base::subtle::Release_Store(&value, kVal2); ASSERT_EQ(kVal2, value); } // This is a simple sanity check that values are correct. Not testing // atomicity template <class AtomicType> static void TestLoad() { const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL); const AtomicType kVal2 = static_cast<AtomicType>(-1); AtomicType value; value = kVal1; ASSERT_EQ(kVal1, base::subtle::NoBarrier_Load(&value)); value = kVal2; ASSERT_EQ(kVal2, base::subtle::NoBarrier_Load(&value)); value = kVal1; ASSERT_EQ(kVal1, base::subtle::Acquire_Load(&value)); value = kVal2; ASSERT_EQ(kVal2, base::subtle::Acquire_Load(&value)); value = kVal1; ASSERT_EQ(kVal1, base::subtle::Release_Load(&value)); value = kVal2; ASSERT_EQ(kVal2, base::subtle::Release_Load(&value)); } template <class AtomicType> static void TestAtomicOps() { TestCompareAndSwap<AtomicType>(base::subtle::NoBarrier_CompareAndSwap); TestCompareAndSwap<AtomicType>(base::subtle::Acquire_CompareAndSwap); TestCompareAndSwap<AtomicType>(base::subtle::Release_CompareAndSwap); TestAtomicExchange<AtomicType>(base::subtle::NoBarrier_AtomicExchange); TestAtomicExchange<AtomicType>(base::subtle::Acquire_AtomicExchange); TestAtomicExchange<AtomicType>(base::subtle::Release_AtomicExchange); TestStore<AtomicType>(); TestLoad<AtomicType>(); } int main(int argc, char** argv) { TestAtomicOps<AtomicWord>(); TestAtomicOps<Atomic32>(); printf("PASS\n"); return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/thread_dealloc_unittest.cc
.cc
3,015
85
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2004, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Sanjay Ghemawat // // Check that we do not leak memory when cycling through lots of threads. #include "config_for_unittests.h" #include <stdio.h> #ifdef HAVE_UNISTD_H #include <unistd.h> // for sleep() #endif #include "base/logging.h" #include <gperftools/malloc_extension.h> #include "tests/testutil.h" // for RunThread() // Size/number of objects to allocate per thread (1 MB per thread) static const int kObjectSize = 1024; static const int kNumObjects = 1024; // Number of threads to create and destroy static const int kNumThreads = 1000; // Allocate lots of stuff static void AllocStuff() { void** objects = new void*[kNumObjects]; for (int i = 0; i < kNumObjects; i++) { objects[i] = malloc(kObjectSize); } for (int i = 0; i < kNumObjects; i++) { free(objects[i]); } delete[] objects; } int main(int argc, char** argv) { static const int kDisplaySize = 1048576; char* display = new char[kDisplaySize]; for (int i = 0; i < kNumThreads; i++) { RunThread(&AllocStuff); if (((i+1) % 200) == 0) { fprintf(stderr, "Iteration: %d of %d\n", (i+1), kNumThreads); MallocExtension::instance()->GetStats(display, kDisplaySize); fprintf(stderr, "%s\n", display); } } delete[] display; printf("PASS\n"); #ifdef HAVE_UNISTD_H sleep(1); // Prevent exit race problem with glibc #endif return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/malloc_extension_test.cc
.cc
4,337
99
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2008, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Craig Silverstein // // Simple test of malloc_extension. Includes test of C shims. #include "config_for_unittests.h" #include <stdio.h> #include <sys/types.h> #include "base/logging.h" #include <gperftools/malloc_extension.h> #include <gperftools/malloc_extension_c.h> int main(int argc, char** argv) { void* a = malloc(1000); size_t cxx_bytes_used, c_bytes_used; ASSERT_TRUE(MallocExtension::instance()->GetNumericProperty( "generic.current_allocated_bytes", &cxx_bytes_used)); ASSERT_TRUE(MallocExtension_GetNumericProperty( "generic.current_allocated_bytes", &c_bytes_used)); ASSERT_GT(cxx_bytes_used, 1000); ASSERT_EQ(cxx_bytes_used, c_bytes_used); ASSERT_TRUE(MallocExtension::instance()->VerifyAllMemory()); ASSERT_TRUE(MallocExtension_VerifyAllMemory()); ASSERT_EQ(MallocExtension::kOwned, MallocExtension::instance()->GetOwnership(a)); // TODO(csilvers): this relies on undocumented behavior that // GetOwnership works on stack-allocated variables. Use a better test. ASSERT_EQ(MallocExtension::kNotOwned, MallocExtension::instance()->GetOwnership(&cxx_bytes_used)); ASSERT_EQ(MallocExtension::kNotOwned, MallocExtension::instance()->GetOwnership(NULL)); ASSERT_GE(MallocExtension::instance()->GetAllocatedSize(a), 1000); // This is just a sanity check. If we allocated too much, tcmalloc is broken ASSERT_LE(MallocExtension::instance()->GetAllocatedSize(a), 5000); ASSERT_GE(MallocExtension::instance()->GetEstimatedAllocatedSize(1000), 1000); for (int i = 0; i < 10; ++i) { void *p = malloc(i); ASSERT_GE(MallocExtension::instance()->GetAllocatedSize(p), MallocExtension::instance()->GetEstimatedAllocatedSize(i)); free(p); } // Check the c-shim version too. ASSERT_EQ(MallocExtension_kOwned, MallocExtension_GetOwnership(a)); ASSERT_EQ(MallocExtension_kNotOwned, MallocExtension_GetOwnership(&cxx_bytes_used)); ASSERT_EQ(MallocExtension_kNotOwned, MallocExtension_GetOwnership(NULL)); ASSERT_GE(MallocExtension_GetAllocatedSize(a), 1000); ASSERT_LE(MallocExtension_GetAllocatedSize(a), 5000); ASSERT_GE(MallocExtension_GetEstimatedAllocatedSize(1000), 1000); free(a); // Verify that the .cc file and .h file have the same enum values. ASSERT_EQ(static_cast<int>(MallocExtension::kUnknownOwnership), static_cast<int>(MallocExtension_kUnknownOwnership)); ASSERT_EQ(static_cast<int>(MallocExtension::kOwned), static_cast<int>(MallocExtension_kOwned)); ASSERT_EQ(static_cast<int>(MallocExtension::kNotOwned), static_cast<int>(MallocExtension_kNotOwned)); printf("DONE\n"); return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/pagemap_unittest.cc
.cc
5,843
179
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2003, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Sanjay Ghemawat #include "config_for_unittests.h" #include <stdio.h> #include <stdlib.h> #if defined HAVE_STDINT_H #include <stdint.h> // to get intptr_t #elif defined HAVE_INTTYPES_H #include <inttypes.h> // another place intptr_t might be defined #endif #include <sys/types.h> #include <vector> #include "base/logging.h" #include "pagemap.h" using std::vector; static void Permute(vector<intptr_t>* elements) { if (elements->empty()) return; const size_t num_elements = elements->size(); for (size_t i = num_elements - 1; i > 0; --i) { const size_t newpos = rand() % (i + 1); const intptr_t tmp = (*elements)[i]; // swap (*elements)[i] = (*elements)[newpos]; (*elements)[newpos] = tmp; } } // Note: we leak memory every time a map is constructed, so do not // create too many maps. // Test specified map type template <class Type> void TestMap(int limit, bool limit_is_below_the_overflow_boundary) { RAW_LOG(INFO, "Running test with %d iterations...\n", limit); { // Test sequential ensure/assignment Type map(malloc); for (intptr_t i = 0; i < static_cast<intptr_t>(limit); i++) { map.Ensure(i, 1); map.set(i, (void*)(i+1)); CHECK_EQ(map.get(i), (void*)(i+1)); } for (intptr_t i = 0; i < static_cast<intptr_t>(limit); i++) { CHECK_EQ(map.get(i), (void*)(i+1)); } } { // Test bulk Ensure Type map(malloc); map.Ensure(0, limit); for (intptr_t i = 0; i < static_cast<intptr_t>(limit); i++) { map.set(i, (void*)(i+1)); CHECK_EQ(map.get(i), (void*)(i+1)); } for (intptr_t i = 0; i < static_cast<intptr_t>(limit); i++) { CHECK_EQ(map.get(i), (void*)(i+1)); } } // Test that we correctly notice overflow { Type map(malloc); CHECK_EQ(map.Ensure(limit, limit+1), limit_is_below_the_overflow_boundary); } { // Test randomized accesses srand(301); // srand isn't great, but it's portable vector<intptr_t> elements; for (intptr_t i = 0; i < static_cast<intptr_t>(limit); i++) elements.push_back(i); Permute(&elements); Type map(malloc); for (intptr_t i = 0; i < static_cast<intptr_t>(limit); i++) { map.Ensure(elements[i], 1); map.set(elements[i], (void*)(elements[i]+1)); CHECK_EQ(map.get(elements[i]), (void*)(elements[i]+1)); } for (intptr_t i = 0; i < static_cast<intptr_t>(limit); i++) { CHECK_EQ(map.get(i), (void*)(i+1)); } } } // REQUIRES: BITS==10, i.e., valid range is [0,1023]. // Representations for different types will end up being: // PageMap1: array[1024] // PageMap2: array[32][32] // PageMap3: array[16][16][4] template <class Type> void TestNext(const char* name) { RAW_LOG(ERROR, "Running NextTest %s\n", name); Type map(malloc); char a, b, c, d, e; // When map is empty CHECK(map.Next(0) == NULL); CHECK(map.Next(5) == NULL); CHECK(map.Next(1<<30) == NULL); // Add a single value map.Ensure(40, 1); map.set(40, &a); CHECK(map.Next(0) == &a); CHECK(map.Next(39) == &a); CHECK(map.Next(40) == &a); CHECK(map.Next(41) == NULL); CHECK(map.Next(1<<30) == NULL); // Add a few values map.Ensure(41, 1); map.Ensure(100, 3); map.set(41, &b); map.set(100, &c); map.set(101, &d); map.set(102, &e); CHECK(map.Next(0) == &a); CHECK(map.Next(39) == &a); CHECK(map.Next(40) == &a); CHECK(map.Next(41) == &b); CHECK(map.Next(42) == &c); CHECK(map.Next(63) == &c); CHECK(map.Next(64) == &c); CHECK(map.Next(65) == &c); CHECK(map.Next(99) == &c); CHECK(map.Next(100) == &c); CHECK(map.Next(101) == &d); CHECK(map.Next(102) == &e); CHECK(map.Next(103) == NULL); } int main(int argc, char** argv) { TestMap< TCMalloc_PageMap1<10> > (100, true); TestMap< TCMalloc_PageMap1<10> > (1 << 10, false); TestMap< TCMalloc_PageMap2<20> > (100, true); TestMap< TCMalloc_PageMap2<20> > (1 << 20, false); TestMap< TCMalloc_PageMap3<20> > (100, true); TestMap< TCMalloc_PageMap3<20> > (1 << 20, false); TestNext< TCMalloc_PageMap1<10> >("PageMap1"); TestNext< TCMalloc_PageMap2<10> >("PageMap2"); TestNext< TCMalloc_PageMap3<10> >("PageMap3"); printf("PASS\n"); return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/debugallocation_test.cc
.cc
11,912
333
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2007, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Fred Akalin #include <stdio.h> #include <stdlib.h> #include <string.h> // for memcmp #include <vector> #include "gperftools/malloc_extension.h" #include "gperftools/tcmalloc.h" #include "base/logging.h" using std::vector; vector<void (*)()> g_testlist; // the tests to run #define TEST(a, b) \ struct Test_##a##_##b { \ Test_##a##_##b() { g_testlist.push_back(&Run); } \ static void Run(); \ }; \ static Test_##a##_##b g_test_##a##_##b; \ void Test_##a##_##b::Run() static int RUN_ALL_TESTS() { vector<void (*)()>::const_iterator it; for (it = g_testlist.begin(); it != g_testlist.end(); ++it) { (*it)(); // The test will error-exit if there's a problem. } fprintf(stderr, "\nPassed %d tests\n\nPASS\n", static_cast<int>(g_testlist.size())); return 0; } // The death tests are meant to be run from a shell-script driver, which // passes in an integer saying which death test to run. We store that // test-to-run here, and in the macro use a counter to see when we get // to that test, so we can run it. static int test_to_run = 0; // set in main() based on argv static int test_counter = 0; // incremented every time the macro is called #define IF_DEBUG_EXPECT_DEATH(statement, regex) do { \ if (test_counter++ == test_to_run) { \ fprintf(stderr, "Expected regex:%s\n", regex); \ statement; \ } \ } while (false) // This flag won't be compiled in in opt mode. DECLARE_int32(max_free_queue_size); // Test match as well as mismatch rules. But do not test on OS X; on // OS X the OS converts new/new[] to malloc before it gets to us, so // we are unable to catch these mismatch errors. #ifndef __APPLE__ TEST(DebugAllocationTest, DeallocMismatch) { // malloc can be matched only by free // new can be matched only by delete and delete(nothrow) // new[] can be matched only by delete[] and delete[](nothrow) // new(nothrow) can be matched only by delete and delete(nothrow) // new(nothrow)[] can be matched only by delete[] and delete[](nothrow) // Allocate with malloc. { int* x = static_cast<int*>(malloc(sizeof(*x))); IF_DEBUG_EXPECT_DEATH(delete x, "mismatch.*being dealloc.*delete"); IF_DEBUG_EXPECT_DEATH(delete [] x, "mismatch.*being dealloc.*delete *[[]"); // Should work fine. free(x); } // Allocate with new. { int* x = new int; int* y = new int; IF_DEBUG_EXPECT_DEATH(free(x), "mismatch.*being dealloc.*free"); IF_DEBUG_EXPECT_DEATH(delete [] x, "mismatch.*being dealloc.*delete *[[]"); delete x; ::operator delete(y, std::nothrow); } // Allocate with new[]. { int* x = new int[1]; int* y = new int[1]; IF_DEBUG_EXPECT_DEATH(free(x), "mismatch.*being dealloc.*free"); IF_DEBUG_EXPECT_DEATH(delete x, "mismatch.*being dealloc.*delete"); delete [] x; ::operator delete[](y, std::nothrow); } // Allocate with new(nothrow). { int* x = new(std::nothrow) int; int* y = new(std::nothrow) int; IF_DEBUG_EXPECT_DEATH(free(x), "mismatch.*being dealloc.*free"); IF_DEBUG_EXPECT_DEATH(delete [] x, "mismatch.*being dealloc.*delete *[[]"); delete x; ::operator delete(y, std::nothrow); } // Allocate with new(nothrow)[]. { int* x = new(std::nothrow) int[1]; int* y = new(std::nothrow) int[1]; IF_DEBUG_EXPECT_DEATH(free(x), "mismatch.*being dealloc.*free"); IF_DEBUG_EXPECT_DEATH(delete x, "mismatch.*being dealloc.*delete"); delete [] x; ::operator delete[](y, std::nothrow); } } #endif // #ifdef OS_MACOSX TEST(DebugAllocationTest, DoubleFree) { int* pint = new int; delete pint; IF_DEBUG_EXPECT_DEATH(delete pint, "has been already deallocated"); } TEST(DebugAllocationTest, StompBefore) { int* pint = new int; #ifndef NDEBUG // don't stomp memory if we're not in a position to detect it pint[-1] = 5; IF_DEBUG_EXPECT_DEATH(delete pint, "a word before object"); #endif } TEST(DebugAllocationTest, StompAfter) { int* pint = new int; #ifndef NDEBUG // don't stomp memory if we're not in a position to detect it pint[1] = 5; IF_DEBUG_EXPECT_DEATH(delete pint, "a word after object"); #endif } TEST(DebugAllocationTest, FreeQueueTest) { // Verify that the allocator doesn't return blocks that were recently freed. int* x = new int; int* old_x = x; delete x; x = new int; #if 1 // This check should not be read as a universal guarantee of behavior. If // other threads are executing, it would be theoretically possible for this // check to fail despite the efforts of debugallocation.cc to the contrary. // It should always hold under the controlled conditions of this unittest, // however. EXPECT_NE(x, old_x); // Allocator shouldn't return recently freed blocks #else // The below check passes, but since it isn't *required* to pass, I've left // it commented out. // EXPECT_EQ(x, old_x); #endif old_x = NULL; // avoid breaking opt build with an unused variable warning. delete x; } TEST(DebugAllocationTest, DanglingPointerWriteTest) { // This test can only be run if debugging. // // If not debugging, the 'new' following the dangling write might not be // safe. When debugging, we expect the (trashed) deleted block to be on the // list of recently-freed blocks, so the following 'new' will be safe. #if 1 int* x = new int; delete x; int poisoned_x_value = *x; *x = 1; // a dangling write. char* s = new char[FLAGS_max_free_queue_size]; // When we delete s, we push the storage that was previously allocated to x // off the end of the free queue. At that point, the write to that memory // will be detected. IF_DEBUG_EXPECT_DEATH(delete [] s, "Memory was written to after being freed."); // restore the poisoned value of x so that we can delete s without causing a // crash. *x = poisoned_x_value; delete [] s; #endif } TEST(DebugAllocationTest, DanglingWriteAtExitTest) { int *x = new int; delete x; int old_x_value = *x; *x = 1; // verify that dangling writes are caught at program termination if the // corrupted block never got pushed off of the end of the free queue. IF_DEBUG_EXPECT_DEATH(exit(0), "Memory was written to after being freed."); *x = old_x_value; // restore x so that the test can exit successfully. } TEST(DebugAllocationTest, StackTraceWithDanglingWriteAtExitTest) { int *x = new int; delete x; int old_x_value = *x; *x = 1; // verify that we also get a stack trace when we have a dangling write. // The " @ " is part of the stack trace output. IF_DEBUG_EXPECT_DEATH(exit(0), " @ .*main"); *x = old_x_value; // restore x so that the test can exit successfully. } static size_t CurrentlyAllocatedBytes() { size_t value; CHECK(MallocExtension::instance()->GetNumericProperty( "generic.current_allocated_bytes", &value)); return value; } TEST(DebugAllocationTest, CurrentlyAllocated) { // Clear the free queue #if 1 FLAGS_max_free_queue_size = 0; // Force a round-trip through the queue management code so that the // new size is seen and the queue of recently-freed blocks is flushed. free(malloc(1)); FLAGS_max_free_queue_size = 1048576; #endif // Free something and check that it disappears from allocated bytes // immediately. char* p = new char[1000]; size_t after_malloc = CurrentlyAllocatedBytes(); delete[] p; size_t after_free = CurrentlyAllocatedBytes(); EXPECT_LE(after_free, after_malloc - 1000); } TEST(DebugAllocationTest, GetAllocatedSizeTest) { #if 1 // When debug_allocation is in effect, GetAllocatedSize should return // exactly requested size, since debug_allocation doesn't allow users // to write more than that. for (int i = 0; i < 10; ++i) { void *p = malloc(i); EXPECT_EQ(i, MallocExtension::instance()->GetAllocatedSize(p)); free(p); } #endif void* a = malloc(1000); EXPECT_GE(MallocExtension::instance()->GetAllocatedSize(a), 1000); // This is just a sanity check. If we allocated too much, alloc is broken EXPECT_LE(MallocExtension::instance()->GetAllocatedSize(a), 5000); EXPECT_GE(MallocExtension::instance()->GetEstimatedAllocatedSize(1000), 1000); free(a); } TEST(DebugAllocationTest, HugeAlloc) { // This must not be a const variable so it doesn't form an // integral-constant-expression which can be *statically* rejected by the // compiler as too large for the allocation. size_t kTooBig = ~static_cast<size_t>(0); void* a = NULL; #ifndef NDEBUG a = malloc(kTooBig); EXPECT_EQ(NULL, a); // kAlsoTooBig is small enough not to get caught by debugallocation's check, // but will still fall through to tcmalloc's check. This must also be // a non-const variable. See kTooBig for more details. size_t kAlsoTooBig = kTooBig - 1024; a = malloc(kAlsoTooBig); EXPECT_EQ(NULL, a); #endif } // based on test program contributed by mikesart@gmail.com aka // mikesart@valvesoftware.com. See issue-464. TEST(DebugAllocationTest, ReallocAfterMemalign) { char stuff[50]; memset(stuff, 0x11, sizeof(stuff)); void *p = tc_memalign(16, sizeof(stuff)); EXPECT_NE(p, NULL); memcpy(stuff, p, sizeof(stuff)); p = realloc(p, sizeof(stuff) + 10); EXPECT_NE(p, NULL); int rv = memcmp(stuff, p, sizeof(stuff)); EXPECT_EQ(rv, 0); } int main(int argc, char** argv) { // If you run without args, we run the non-death parts of the test. // Otherwise, argv[1] should be a number saying which death-test // to run. We will output a regexp we expect the death-message // to include, and then run the given death test (which hopefully // will produce that error message). If argv[1] > the number of // death tests, we will run only the non-death parts. One way to // tell when you are done with all tests is when no 'expected // regexp' message is printed for a given argv[1]. if (argc < 2) { test_to_run = -1; // will never match } else { test_to_run = atoi(argv[1]); } return RUN_ALL_TESTS(); }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/addressmap_unittest.cc
.cc
5,953
172
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Sanjay Ghemawat #include <stdlib.h> // for rand() #include <vector> #include <set> #include <algorithm> #include <utility> #include "addressmap-inl.h" #include "base/logging.h" #include "base/commandlineflags.h" DEFINE_int32(iters, 20, "Number of test iterations"); DEFINE_int32(N, 100000, "Number of elements to test per iteration"); using std::pair; using std::make_pair; using std::vector; using std::set; using std::random_shuffle; struct UniformRandomNumberGenerator { size_t Uniform(size_t max_size) { if (max_size == 0) return 0; return rand() % max_size; // not a great random-number fn, but portable } }; static UniformRandomNumberGenerator rnd; // pair of associated value and object size typedef pair<int, size_t> ValueT; struct PtrAndSize { char* ptr; size_t size; PtrAndSize(char* p, size_t s) : ptr(p), size(s) {} }; size_t SizeFunc(const ValueT& v) { return v.second; } static void SetCheckCallback(const void* ptr, ValueT* val, set<pair<const void*, int> >* check_set) { check_set->insert(make_pair(ptr, val->first)); } int main(int argc, char** argv) { // Get a bunch of pointers const int N = FLAGS_N; static const int kMaxRealSize = 49; // 100Mb to stress not finding previous object (AddressMap's cluster is 1Mb): static const size_t kMaxSize = 100*1000*1000; vector<PtrAndSize> ptrs_and_sizes; for (int i = 0; i < N; ++i) { size_t s = rnd.Uniform(kMaxRealSize); ptrs_and_sizes.push_back(PtrAndSize(new char[s], s)); } for (int x = 0; x < FLAGS_iters; ++x) { RAW_LOG(INFO, "Iteration %d/%d...\n", x, FLAGS_iters); // Permute pointers to get rid of allocation order issues random_shuffle(ptrs_and_sizes.begin(), ptrs_and_sizes.end()); AddressMap<ValueT> map(malloc, free); const ValueT* result; const void* res_p; // Insert a bunch of entries for (int i = 0; i < N; ++i) { char* p = ptrs_and_sizes[i].ptr; CHECK(!map.Find(p)); int offs = rnd.Uniform(ptrs_and_sizes[i].size); CHECK(!map.FindInside(&SizeFunc, kMaxSize, p + offs, &res_p)); map.Insert(p, make_pair(i, ptrs_and_sizes[i].size)); CHECK(result = map.Find(p)); CHECK_EQ(result->first, i); CHECK(result = map.FindInside(&SizeFunc, kMaxRealSize, p + offs, &res_p)); CHECK_EQ(res_p, p); CHECK_EQ(result->first, i); map.Insert(p, make_pair(i + N, ptrs_and_sizes[i].size)); CHECK(result = map.Find(p)); CHECK_EQ(result->first, i + N); } // Delete the even entries for (int i = 0; i < N; i += 2) { void* p = ptrs_and_sizes[i].ptr; ValueT removed; CHECK(map.FindAndRemove(p, &removed)); CHECK_EQ(removed.first, i + N); } // Lookup the odd entries and adjust them for (int i = 1; i < N; i += 2) { char* p = ptrs_and_sizes[i].ptr; CHECK(result = map.Find(p)); CHECK_EQ(result->first, i + N); int offs = rnd.Uniform(ptrs_and_sizes[i].size); CHECK(result = map.FindInside(&SizeFunc, kMaxRealSize, p + offs, &res_p)); CHECK_EQ(res_p, p); CHECK_EQ(result->first, i + N); map.Insert(p, make_pair(i + 2*N, ptrs_and_sizes[i].size)); CHECK(result = map.Find(p)); CHECK_EQ(result->first, i + 2*N); } // Insert even entries back for (int i = 0; i < N; i += 2) { char* p = ptrs_and_sizes[i].ptr; int offs = rnd.Uniform(ptrs_and_sizes[i].size); CHECK(!map.FindInside(&SizeFunc, kMaxSize, p + offs, &res_p)); map.Insert(p, make_pair(i + 2*N, ptrs_and_sizes[i].size)); CHECK(result = map.Find(p)); CHECK_EQ(result->first, i + 2*N); CHECK(result = map.FindInside(&SizeFunc, kMaxRealSize, p + offs, &res_p)); CHECK_EQ(res_p, p); CHECK_EQ(result->first, i + 2*N); } // Check all entries set<pair<const void*, int> > check_set; map.Iterate(SetCheckCallback, &check_set); CHECK_EQ(check_set.size(), N); for (int i = 0; i < N; ++i) { void* p = ptrs_and_sizes[i].ptr; check_set.erase(make_pair(p, i + 2*N)); CHECK(result = map.Find(p)); CHECK_EQ(result->first, i + 2*N); } CHECK_EQ(check_set.size(), 0); } for (int i = 0; i < N; ++i) { delete[] ptrs_and_sizes[i].ptr; } printf("PASS\n"); return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/sampling_test.cc
.cc
3,016
84
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2008, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Craig Silverstein // // This tests ReadStackTraces and ReadGrowthStackTraces. It does this // by doing a bunch of allocations and then calling those functions. // A driver shell-script can call this, and then call pprof, and // verify the expected output. The output is written to // argv[1].heap and argv[1].growth #include "config_for_unittests.h" #include <stdio.h> #include <stdlib.h> #include <string> #include "base/logging.h" #include <gperftools/malloc_extension.h> using std::string; extern "C" void* AllocateAllocate() ATTRIBUTE_NOINLINE; extern "C" void* AllocateAllocate() { // The VLOG's are mostly to discourage inlining VLOG(1, "Allocating some more"); void* p = malloc(10000); VLOG(1, "Done allocating"); return p; } static void WriteStringToFile(const string& s, const string& filename) { FILE* fp = fopen(filename.c_str(), "w"); fwrite(s.data(), 1, s.length(), fp); fclose(fp); } int main(int argc, char** argv) { if (argc < 2) { fprintf(stderr, "USAGE: %s <base of output files>\n", argv[0]); exit(1); } for (int i = 0; i < 8000; i++) { AllocateAllocate(); } string s; MallocExtension::instance()->GetHeapSample(&s); WriteStringToFile(s, string(argv[1]) + ".heap"); s.clear(); MallocExtension::instance()->GetHeapGrowthStacks(&s); WriteStringToFile(s, string(argv[1]) + ".growth"); return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/tcmalloc_unittest.sh
.sh
2,908
85
#!/bin/sh # Copyright (c) 2013, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # --- # Author: Adhemerval Zanella # # Runs the tcmalloc_unittest with various environment variables. # This is necessary because tuning some environment variables # (TCMALLOC_TRANSFER_NUM_OBJ for instance) should not change program # behavior, just performance. BINDIR="${BINDIR:-.}" TCMALLOC_UNITTEST="${1:-$BINDIR/tcmalloc_unittest}" TMPDIR=/tmp/tcmalloc_unittest rm -rf $TMPDIR || exit 2 mkdir $TMPDIR || exit 3 run_unittest() { if $TCMALLOC_UNITTEST > $TMPDIR/output 2>&1; then echo "OK" else echo "FAILED" echo "Output from the failed run:" echo "----" cat $TMPDIR/output echo "----" exit 4 fi } # $1: value of tcmalloc_unittest env. var. run_check_transfer_num_obj() { [ -n "$1" ] && export TCMALLOC_TRANSFER_NUM_OBJ="$1" echo -n "Testing $TCMALLOC_UNITTEST with TCMALLOC_TRANSFER_NUM_OBJ=$1 ... " run_unittest } run_check_transfer_num_obj "" run_check_transfer_num_obj "40" run_check_transfer_num_obj "4096" echo -n "Testing $TCMALLOC_UNITTEST with TCMALLOC_AGGRESSIVE_DECOMMIT=t ... " TCMALLOC_AGGRESSIVE_DECOMMIT=t run_unittest echo -n "Testing $TCMALLOC_UNITTEST with TCMALLOC_HEAP_LIMIT_MB=512 ... " TCMALLOC_HEAP_LIMIT_MB=512 run_unittest echo -n "Testing $TCMALLOC_UNITTEST with TCMALLOC_ENABLE_SIZED_DELETE=t ..." TCMALLOC_ENABLE_SIZED_DELETE=t run_unittest echo "PASS"
Shell
3D
mcellteam/mcell
libs/gperftools/src/tests/simple_compat_test.cc
.cc
2,881
72
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2012, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Craig Silverstein // // This just verifies that we can compile code that #includes stuff // via the backwards-compatibility 'google/' #include-dir. It does // not include config.h on purpose, to better simulate a perftools // client. #include <stddef.h> #include <stdio.h> #define GPERFTOOLS_SUPPRESS_LEGACY_WARNING #include <google/heap-checker.h> #include <google/heap-profiler.h> #include <google/malloc_extension.h> #include <google/malloc_extension_c.h> #include <google/malloc_hook.h> #include <google/malloc_hook_c.h> #include <google/profiler.h> #include <google/stacktrace.h> #include <google/tcmalloc.h> // We don't link in -lprofiler for this test, so be sure not to make // any function calls that require the cpu-profiler code. The // heap-profiler is ok. HeapLeakChecker::Disabler* heap_checker_h; void (*heap_profiler_h)(const char*) = &HeapProfilerStart; MallocExtension::Ownership malloc_extension_h; MallocExtension_Ownership malloc_extension_c_h; MallocHook::NewHook* malloc_hook_h; MallocHook_NewHook* malloc_hook_c_h; ProfilerOptions* profiler_h; int (*stacktrace_h)(void**, int, int) = &GetStackTrace; void* (*tcmalloc_h)(size_t) = &tc_new; int main(int argc, char** argv) { printf("PASS\n"); return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/frag_unittest.cc
.cc
4,820
134
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2003, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Sanjay Ghemawat // // Test speed of handling fragmented heap #include "config_for_unittests.h" #include <stdlib.h> #include <stdio.h> #ifdef HAVE_SYS_RESOURCE_H #include <sys/time.h> // for struct timeval #include <sys/resource.h> // for getrusage #endif #ifdef _WIN32 #include <windows.h> // for GetTickCount() #endif #include <vector> #include "base/logging.h" #include "common.h" #include <gperftools/malloc_extension.h> using std::vector; int main(int argc, char** argv) { // Make kAllocSize one page larger than the maximum small object size. static const int kAllocSize = kMaxSize + kPageSize; // Allocate 400MB in total. static const int kTotalAlloc = 400 << 20; static const int kAllocIterations = kTotalAlloc / kAllocSize; // Allocate lots of objects vector<char*> saved(kAllocIterations); for (int i = 0; i < kAllocIterations; i++) { saved[i] = new char[kAllocSize]; } // Check the current "slack". size_t slack_before; MallocExtension::instance()->GetNumericProperty("tcmalloc.slack_bytes", &slack_before); // Free alternating ones to fragment heap size_t free_bytes = 0; for (int i = 0; i < saved.size(); i += 2) { delete[] saved[i]; free_bytes += kAllocSize; } // Check that slack delta is within 10% of expected. size_t slack_after; MallocExtension::instance()->GetNumericProperty("tcmalloc.slack_bytes", &slack_after); CHECK_GE(slack_after, slack_before); size_t slack = slack_after - slack_before; CHECK_GT(double(slack), 0.9*free_bytes); CHECK_LT(double(slack), 1.1*free_bytes); // Dump malloc stats static const int kBufSize = 1<<20; char* buffer = new char[kBufSize]; MallocExtension::instance()->GetStats(buffer, kBufSize); VLOG(1, "%s", buffer); delete[] buffer; // Now do timing tests for (int i = 0; i < 5; i++) { static const int kIterations = 100000; #ifdef HAVE_SYS_RESOURCE_H struct rusage r; getrusage(RUSAGE_SELF, &r); // figure out user-time spent on this struct timeval tv_start = r.ru_utime; #elif defined(_WIN32) long long int tv_start = GetTickCount(); #else # error No way to calculate time on your system #endif for (int i = 0; i < kIterations; i++) { size_t s; MallocExtension::instance()->GetNumericProperty("tcmalloc.slack_bytes", &s); } #ifdef HAVE_SYS_RESOURCE_H getrusage(RUSAGE_SELF, &r); struct timeval tv_end = r.ru_utime; int64 sumsec = static_cast<int64>(tv_end.tv_sec) - tv_start.tv_sec; int64 sumusec = static_cast<int64>(tv_end.tv_usec) - tv_start.tv_usec; #elif defined(_WIN32) long long int tv_end = GetTickCount(); int64 sumsec = (tv_end - tv_start) / 1000; // Resolution in windows is only to the millisecond, alas int64 sumusec = ((tv_end - tv_start) % 1000) * 1000; #else # error No way to calculate time on your system #endif fprintf(stderr, "getproperty: %6.1f ns/call\n", (sumsec * 1e9 + sumusec * 1e3) / kIterations); } printf("PASS\n"); return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/large_heap_fragmentation_unittest.cc
.cc
2,471
63
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // This is a unit test for exercising fragmentation of large (over 1 // meg) page spans. It makes sure that allocations/releases of // increasing memory chunks do not blowup memory // usage. See also https://code.google.com/p/gperftools/issues/detail?id=368 #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include "base/logging.h" #include "common.h" #include <gperftools/malloc_extension.h> int main (int argc, char** argv) { for (int pass = 1; pass <= 3; pass++) { size_t size = 100*1024*1024; while (size < 500*1024*1024) { void *ptr = malloc(size); free(ptr); size += 20000; size_t heap_size = static_cast<size_t>(-1); MallocExtension::instance()->GetNumericProperty("generic.heap_size", &heap_size); CHECK_LT(heap_size, 1*1024*1024*1024); } } printf("PASS\n"); return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/raw_printer_test.cc
.cc
1,847
65
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright 2009 Google Inc. All Rights Reserved. // Author: sanjay@google.com (Sanjay Ghemawat) // // Use of this source code is governed by a BSD-style license that can // be found in the LICENSE file. #include "raw_printer.h" #include <stdio.h> #include <string> #include "base/logging.h" using std::string; #define TEST(a, b) void TEST_##a##_##b() #define RUN_TEST(a, b) TEST_##a##_##b() TEST(RawPrinter, Empty) { char buffer[1]; base::RawPrinter printer(buffer, arraysize(buffer)); CHECK_EQ(0, printer.length()); CHECK_EQ(string(""), buffer); CHECK_EQ(0, printer.space_left()); printer.Printf("foo"); CHECK_EQ(string(""), string(buffer)); CHECK_EQ(0, printer.length()); CHECK_EQ(0, printer.space_left()); } TEST(RawPrinter, PartiallyFilled) { char buffer[100]; base::RawPrinter printer(buffer, arraysize(buffer)); printer.Printf("%s %s", "hello", "world"); CHECK_EQ(string("hello world"), string(buffer)); CHECK_EQ(11, printer.length()); CHECK_LT(0, printer.space_left()); } TEST(RawPrinter, Truncated) { char buffer[3]; base::RawPrinter printer(buffer, arraysize(buffer)); printer.Printf("%d", 12345678); CHECK_EQ(string("12"), string(buffer)); CHECK_EQ(2, printer.length()); CHECK_EQ(0, printer.space_left()); } TEST(RawPrinter, ExactlyFilled) { char buffer[12]; base::RawPrinter printer(buffer, arraysize(buffer)); printer.Printf("%s %s", "hello", "world"); CHECK_EQ(string("hello world"), string(buffer)); CHECK_EQ(11, printer.length()); CHECK_EQ(0, printer.space_left()); } int main(int argc, char **argv) { RUN_TEST(RawPrinter, Empty); RUN_TEST(RawPrinter, PartiallyFilled); RUN_TEST(RawPrinter, Truncated); RUN_TEST(RawPrinter, ExactlyFilled); printf("PASS\n"); return 0; // 0 means success }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/stack_trace_table_test.cc
.cc
2,922
94
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright 2009 Google Inc. All Rights Reserved. // Author: fikes@google.com (Andrew Fikes) // // Use of this source code is governed by a BSD-style license that can // be found in the LICENSE file. #include "config_for_unittests.h" #include <stdio.h> // for puts() #include "stack_trace_table.h" #include "base/logging.h" #include "base/spinlock.h" #include "static_vars.h" #undef ARRAYSIZE // may be defined on, eg, windows #define ARRAYSIZE(a) ( sizeof(a) / sizeof(*(a)) ) static void CheckTracesAndReset(tcmalloc::StackTraceTable* table, const uintptr_t* expected, int len) { void** entries = table->ReadStackTracesAndClear(); for (int i = 0; i < len; ++i) { CHECK_EQ(reinterpret_cast<uintptr_t>(entries[i]), expected[i]); } delete[] entries; } static void AddTrace(tcmalloc::StackTraceTable* table, const tcmalloc::StackTrace& t) { // Normally we'd need this lock, but since the test is single-threaded // we don't. I comment it out on windows because the DLL-decl thing // is really annoying in this case. #ifndef _MSC_VER SpinLockHolder h(tcmalloc::Static::pageheap_lock()); #endif table->AddTrace(t); } int main(int argc, char **argv) { tcmalloc::StackTraceTable table; // Empty table CHECK_EQ(table.depth_total(), 0); CHECK_EQ(table.bucket_total(), 0); static const uintptr_t k1[] = {0}; CheckTracesAndReset(&table, k1, ARRAYSIZE(k1)); tcmalloc::StackTrace t1; t1.size = static_cast<uintptr_t>(1024); t1.depth = static_cast<uintptr_t>(2); t1.stack[0] = reinterpret_cast<void*>(1); t1.stack[1] = reinterpret_cast<void*>(2); tcmalloc::StackTrace t2; t2.size = static_cast<uintptr_t>(512); t2.depth = static_cast<uintptr_t>(2); t2.stack[0] = reinterpret_cast<void*>(2); t2.stack[1] = reinterpret_cast<void*>(1); // Table w/ just t1 AddTrace(&table, t1); CHECK_EQ(table.depth_total(), 2); CHECK_EQ(table.bucket_total(), 1); static const uintptr_t k2[] = {1, 1024, 2, 1, 2, 0}; CheckTracesAndReset(&table, k2, ARRAYSIZE(k2)); // Table w/ t1, t2 AddTrace(&table, t1); AddTrace(&table, t2); CHECK_EQ(table.depth_total(), 4); CHECK_EQ(table.bucket_total(), 2); static const uintptr_t k3[] = {1, 512, 2, 2, 1, 1, 1024, 2, 1, 2, 0}; CheckTracesAndReset(&table, k3, ARRAYSIZE(k3)); // Table w/ t1, t3 // Same stack as t1, but w/ different size tcmalloc::StackTrace t3; t3.size = static_cast<uintptr_t>(2); t3.depth = static_cast<uintptr_t>(2); t3.stack[0] = reinterpret_cast<void*>(1); t3.stack[1] = reinterpret_cast<void*>(2); AddTrace(&table, t1); AddTrace(&table, t3); CHECK_EQ(table.depth_total(), 4); CHECK_EQ(table.bucket_total(), 2); static const uintptr_t k5[] = {1, 2, 2, 1, 2, 1, 1024, 2, 1, 2, 0}; CheckTracesAndReset(&table, k5, ARRAYSIZE(k5)); puts("PASS"); return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/memalign_unittest.cc
.cc
6,892
222
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2004, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Sanjay Ghemawat // // Check memalign related routines. // // We can't really do a huge amount of checking, but at the very // least, the following code checks that return values are properly // aligned, and that writing into the objects works. #include "config_for_unittests.h" // Complicated ordering requirements. tcmalloc.h defines (indirectly) // _POSIX_C_SOURCE, which it needs so stdlib.h defines posix_memalign. // unistd.h, on the other hand, requires _POSIX_C_SOURCE to be unset, // at least on Mac OS X, in order to define getpagesize. The solution // is to #include unistd.h first. This is safe because unistd.h // doesn't sub-include stdlib.h, so we'll still get posix_memalign // when we #include stdlib.h. Blah. #ifdef HAVE_UNISTD_H #include <unistd.h> // for getpagesize() #endif #include "tcmalloc.h" // must come early, to pick up posix_memalign #include <assert.h> #include <stdlib.h> // defines posix_memalign #include <stdio.h> // for the printf at the end #ifdef HAVE_STDINT_H #include <stdint.h> // for uintptr_t #endif #ifdef HAVE_UNISTD_H #include <unistd.h> // for getpagesize() #endif // Malloc can be in several places on older versions of OS X. #if defined(HAVE_MALLOC_H) #include <malloc.h> // for memalign() and valloc() #elif defined(HAVE_MALLOC_MALLOC_H) #include <malloc/malloc.h> #elif defined(HAVE_SYS_MALLOC_H) #include <sys/malloc.h> #endif #include "base/basictypes.h" #include "base/logging.h" #include "tests/testutil.h" // Return the next interesting size/delta to check. Returns -1 if no more. static int NextSize(int size) { if (size < 100) { return size+1; } else if (size < 1048576) { // Find next power of two int power = 1; while (power < size) { power <<= 1; } // Yield (power-1, power, power+1) if (size < power-1) { return power-1; } else if (size == power-1) { return power; } else { assert(size == power); return power+1; } } else { return -1; } } // Shortform for cast static uintptr_t Number(void* p) { return reinterpret_cast<uintptr_t>(p); } // Check alignment static void CheckAlignment(void* p, int align) { if ((Number(p) & (align-1)) != 0) LOG(FATAL, "wrong alignment; wanted 0x%x; got %p\n", align, p); } // Fill a buffer of the specified size with a predetermined pattern static void Fill(void* p, int n, char seed) { unsigned char* buffer = reinterpret_cast<unsigned char*>(p); for (int i = 0; i < n; i++) { buffer[i] = ((seed + i) & 0xff); } } // Check that the specified buffer has the predetermined pattern // generated by Fill() static bool Valid(const void* p, int n, char seed) { const unsigned char* buffer = reinterpret_cast<const unsigned char*>(p); for (int i = 0; i < n; i++) { if (buffer[i] != ((seed + i) & 0xff)) { return false; } } return true; } int main(int argc, char** argv) { SetTestResourceLimit(); // Try allocating data with a bunch of alignments and sizes for (int a = 1; a < 1048576; a *= 2) { for (int s = 0; s != -1; s = NextSize(s)) { void* ptr = memalign(a, s); CheckAlignment(ptr, a); Fill(ptr, s, 'x'); CHECK(Valid(ptr, s, 'x')); free(ptr); if ((a >= sizeof(void*)) && ((a & (a-1)) == 0)) { CHECK(posix_memalign(&ptr, a, s) == 0); CheckAlignment(ptr, a); Fill(ptr, s, 'y'); CHECK(Valid(ptr, s, 'y')); free(ptr); } } } { // Check various corner cases void* p1 = memalign(1<<20, 1<<19); void* p2 = memalign(1<<19, 1<<19); void* p3 = memalign(1<<21, 1<<19); CheckAlignment(p1, 1<<20); CheckAlignment(p2, 1<<19); CheckAlignment(p3, 1<<21); Fill(p1, 1<<19, 'a'); Fill(p2, 1<<19, 'b'); Fill(p3, 1<<19, 'c'); CHECK(Valid(p1, 1<<19, 'a')); CHECK(Valid(p2, 1<<19, 'b')); CHECK(Valid(p3, 1<<19, 'c')); free(p1); free(p2); free(p3); } { // posix_memalign void* ptr; CHECK(posix_memalign(&ptr, 0, 1) == EINVAL); CHECK(posix_memalign(&ptr, sizeof(void*)/2, 1) == EINVAL); CHECK(posix_memalign(&ptr, sizeof(void*)+1, 1) == EINVAL); CHECK(posix_memalign(&ptr, 4097, 1) == EINVAL); // Grab some memory so that the big allocation below will definitely fail. void* p_small = malloc(4*1048576); CHECK(p_small != NULL); // Make sure overflow is returned as ENOMEM const size_t zero = 0; static const size_t kMinusNTimes = 10; for ( size_t i = 1; i < kMinusNTimes; ++i ) { int r = posix_memalign(&ptr, 1024, zero - i); CHECK(r == ENOMEM); } free(p_small); } const int pagesize = getpagesize(); { // valloc for (int s = 0; s != -1; s = NextSize(s)) { void* p = valloc(s); CheckAlignment(p, pagesize); Fill(p, s, 'v'); CHECK(Valid(p, s, 'v')); free(p); } } { // pvalloc for (int s = 0; s != -1; s = NextSize(s)) { void* p = pvalloc(s); CheckAlignment(p, pagesize); int alloc_needed = ((s + pagesize - 1) / pagesize) * pagesize; Fill(p, alloc_needed, 'x'); CHECK(Valid(p, alloc_needed, 'x')); free(p); } } printf("PASS\n"); return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/profiledata_unittest.cc
.cc
20,144
613
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2007, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // --- // Author: Chris Demetriou // // This file contains the unit tests for the ProfileData class. #if defined HAVE_STDINT_H #include <stdint.h> // to get uintptr_t #elif defined HAVE_INTTYPES_H #include <inttypes.h> // another place uintptr_t might be defined #endif #include <sys/stat.h> #include <sys/types.h> #include <fcntl.h> #include <string.h> #include <string> #include "profiledata.h" #include "base/commandlineflags.h" #include "base/logging.h" using std::string; // Some helpful macros for the test class #define TEST_F(cls, fn) void cls :: fn() namespace { template<typename T> class scoped_array { public: scoped_array(T* data) : data_(data) { } ~scoped_array() { delete[] data_; } T* get() { return data_; } T& operator[](int i) { return data_[i]; } private: T* const data_; }; // Re-runs fn until it doesn't cause EINTR. #define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR) // Read up to "count" bytes from file descriptor "fd" into the buffer // starting at "buf" while handling short reads and EINTR. On // success, return the number of bytes read. Otherwise, return -1. static ssize_t ReadPersistent(const int fd, void *buf, const size_t count) { CHECK_GE(fd, 0); char *buf0 = reinterpret_cast<char *>(buf); ssize_t num_bytes = 0; while (num_bytes < count) { ssize_t len; NO_INTR(len = read(fd, buf0 + num_bytes, count - num_bytes)); if (len < 0) { // There was an error other than EINTR. return -1; } if (len == 0) { // Reached EOF. break; } num_bytes += len; } CHECK(num_bytes <= count); return num_bytes; } // Thin wrapper around a file descriptor so that the file descriptor // gets closed for sure. struct FileDescriptor { const int fd_; explicit FileDescriptor(int fd) : fd_(fd) {} ~FileDescriptor() { if (fd_ >= 0) { NO_INTR(close(fd_)); } } int get() { return fd_; } }; // must be the same as with ProfileData::Slot. typedef uintptr_t ProfileDataSlot; // Quick and dirty function to make a number into a void* for use in a // sample. inline void* V(intptr_t x) { return reinterpret_cast<void*>(x); } // String returned by ProfileDataChecker helper functions to indicate success. const char kNoError[] = ""; class ProfileDataChecker { public: ProfileDataChecker() { const char* tmpdir = getenv("TMPDIR"); if (tmpdir == NULL) tmpdir = "/tmp"; mkdir(tmpdir, 0755); // if necessary filename_ = string(tmpdir) + "/profiledata_unittest.tmp"; } string filename() const { return filename_; } // Checks the first 'num_slots' profile data slots in the file // against the data pointed to by 'slots'. Returns kNoError if the // data matched, otherwise returns an indication of the cause of the // mismatch. string Check(const ProfileDataSlot* slots, int num_slots) { return CheckWithSkips(slots, num_slots, NULL, 0); } // Checks the first 'num_slots' profile data slots in the file // against the data pointed to by 'slots', skipping over entries // described by 'skips' and 'num_skips'. // // 'skips' must be a sorted list of (0-based) slot numbers to be // skipped, of length 'num_skips'. Note that 'num_slots' includes // any skipped slots, i.e., the first 'num_slots' profile data slots // will be considered, but some may be skipped. // // Returns kNoError if the data matched, otherwise returns an // indication of the cause of the mismatch. string CheckWithSkips(const ProfileDataSlot* slots, int num_slots, const int* skips, int num_skips); // Validate that a profile is correctly formed. The profile is // assumed to have been created by the same kind of binary (e.g., // same slot size, same endian, etc.) as is validating the profile. // // Returns kNoError if the profile appears valid, otherwise returns // an indication of the problem with the profile. string ValidateProfile(); private: string filename_; }; string ProfileDataChecker::CheckWithSkips(const ProfileDataSlot* slots, int num_slots, const int* skips, int num_skips) { FileDescriptor fd(open(filename_.c_str(), O_RDONLY)); if (fd.get() < 0) return "file open error"; scoped_array<ProfileDataSlot> filedata(new ProfileDataSlot[num_slots]); size_t expected_bytes = num_slots * sizeof filedata[0]; ssize_t bytes_read = ReadPersistent(fd.get(), filedata.get(), expected_bytes); if (expected_bytes != bytes_read) return "file too small"; for (int i = 0; i < num_slots; i++) { if (num_skips > 0 && *skips == i) { num_skips--; skips++; continue; } if (slots[i] != filedata[i]) return "data mismatch"; } return kNoError; } string ProfileDataChecker::ValidateProfile() { FileDescriptor fd(open(filename_.c_str(), O_RDONLY)); if (fd.get() < 0) return "file open error"; struct stat statbuf; if (fstat(fd.get(), &statbuf) != 0) return "fstat error"; if (statbuf.st_size != static_cast<ssize_t>(statbuf.st_size)) return "file impossibly large"; ssize_t filesize = statbuf.st_size; scoped_array<char> filedata(new char[filesize]); if (ReadPersistent(fd.get(), filedata.get(), filesize) != filesize) return "read of whole file failed"; // Must have enough data for the header and the trailer. if (filesize < (5 + 3) * sizeof(ProfileDataSlot)) return "not enough data in profile for header + trailer"; // Check the header if (reinterpret_cast<ProfileDataSlot*>(filedata.get())[0] != 0) return "error in header: non-zero count"; if (reinterpret_cast<ProfileDataSlot*>(filedata.get())[1] != 3) return "error in header: num_slots != 3"; if (reinterpret_cast<ProfileDataSlot*>(filedata.get())[2] != 0) return "error in header: non-zero format version"; // Period (slot 3) can have any value. if (reinterpret_cast<ProfileDataSlot*>(filedata.get())[4] != 0) return "error in header: non-zero padding value"; ssize_t cur_offset = 5 * sizeof(ProfileDataSlot); // While there are samples, skip them. Each sample consists of // at least three slots. bool seen_trailer = false; while (!seen_trailer) { if (cur_offset > filesize - 3 * sizeof(ProfileDataSlot)) return "truncated sample header"; ProfileDataSlot* sample = reinterpret_cast<ProfileDataSlot*>(filedata.get() + cur_offset); ProfileDataSlot slots_this_sample = 2 + sample[1]; ssize_t size_this_sample = slots_this_sample * sizeof(ProfileDataSlot); if (cur_offset > filesize - size_this_sample) return "truncated sample"; if (sample[0] == 0 && sample[1] == 1 && sample[2] == 0) { seen_trailer = true; } else { if (sample[0] < 1) return "error in sample: sample count < 1"; if (sample[1] < 1) return "error in sample: num_pcs < 1"; for (int i = 2; i < slots_this_sample; i++) { if (sample[i] == 0) return "error in sample: NULL PC"; } } cur_offset += size_this_sample; } // There must be at least one line in the (text) list of mapped objects, // and it must be terminated by a newline. Note, the use of newline // here and below Might not be reasonable on non-UNIX systems. if (cur_offset >= filesize) return "no list of mapped objects"; if (filedata[filesize - 1] != '\n') return "profile did not end with a complete line"; while (cur_offset < filesize) { char* line_start = filedata.get() + cur_offset; // Find the end of the line, and replace it with a NUL for easier // scanning. char* line_end = strchr(line_start, '\n'); *line_end = '\0'; // Advance past any leading space. It's allowed in some lines, // but not in others. bool has_leading_space = false; char* line_cur = line_start; while (*line_cur == ' ') { has_leading_space = true; line_cur++; } bool found_match = false; // Check for build lines. if (!found_match) { found_match = (strncmp(line_cur, "build=", 6) == 0); // Anything may follow "build=", and leading space is allowed. } // A line from ProcMapsIterator::FormatLine, of the form: // // 40000000-40015000 r-xp 00000000 03:01 12845071 /lib/ld-2.3.2.so // // Leading space is not allowed. The filename may be omitted or // may consist of multiple words, so we scan only up to the // space before the filename. if (!found_match) { int chars_scanned = -1; sscanf(line_cur, "%*x-%*x %*c%*c%*c%*c %*x %*x:%*x %*d %n", &chars_scanned); found_match = (chars_scanned > 0 && !has_leading_space); } // A line from DumpAddressMap, of the form: // // 40000000-40015000: /lib/ld-2.3.2.so // // Leading space is allowed. The filename may be omitted or may // consist of multiple words, so we scan only up to the space // before the filename. if (!found_match) { int chars_scanned = -1; sscanf(line_cur, "%*x-%*x: %n", &chars_scanned); found_match = (chars_scanned > 0); } if (!found_match) return "unrecognized line in text section"; cur_offset += (line_end - line_start) + 1; } return kNoError; } class ProfileDataTest { protected: void ExpectStopped() { EXPECT_FALSE(collector_.enabled()); } void ExpectRunningSamples(int samples) { ProfileData::State state; collector_.GetCurrentState(&state); EXPECT_TRUE(state.enabled); EXPECT_EQ(samples, state.samples_gathered); } void ExpectSameState(const ProfileData::State& before, const ProfileData::State& after) { EXPECT_EQ(before.enabled, after.enabled); EXPECT_EQ(before.samples_gathered, after.samples_gathered); EXPECT_EQ(before.start_time, after.start_time); EXPECT_STREQ(before.profile_name, after.profile_name); } ProfileData collector_; ProfileDataChecker checker_; private: // The tests to run void OpsWhenStopped(); void StartStopEmpty(); void StartStopNoOptionsEmpty(); void StartWhenStarted(); void StartStopEmpty2(); void CollectOne(); void CollectTwoMatching(); void CollectTwoFlush(); void StartResetRestart(); public: #define RUN(test) do { \ printf("Running %s\n", #test); \ ProfileDataTest pdt; \ pdt.test(); \ } while (0) static int RUN_ALL_TESTS() { RUN(OpsWhenStopped); RUN(StartStopEmpty); RUN(StartWhenStarted); RUN(StartStopEmpty2); RUN(CollectOne); RUN(CollectTwoMatching); RUN(CollectTwoFlush); RUN(StartResetRestart); RUN(StartStopNoOptionsEmpty); return 0; } }; // Check that various operations are safe when stopped. TEST_F(ProfileDataTest, OpsWhenStopped) { ExpectStopped(); EXPECT_FALSE(collector_.enabled()); // Verify that state is disabled, all-empty/all-0 ProfileData::State state_before; collector_.GetCurrentState(&state_before); EXPECT_FALSE(state_before.enabled); EXPECT_EQ(0, state_before.samples_gathered); EXPECT_EQ(0, state_before.start_time); EXPECT_STREQ("", state_before.profile_name); // Safe to call stop again. collector_.Stop(); // Safe to call FlushTable. collector_.FlushTable(); // Safe to call Add. const void *trace[] = { V(100), V(101), V(102), V(103), V(104) }; collector_.Add(arraysize(trace), trace); ProfileData::State state_after; collector_.GetCurrentState(&state_after); ExpectSameState(state_before, state_after); } // Start and Stop, collecting no samples. Verify output contents. TEST_F(ProfileDataTest, StartStopEmpty) { const int frequency = 1; ProfileDataSlot slots[] = { 0, 3, 0, 1000000 / frequency, 0, // binary header 0, 1, 0 // binary trailer }; ExpectStopped(); ProfileData::Options options; options.set_frequency(frequency); EXPECT_TRUE(collector_.Start(checker_.filename().c_str(), options)); ExpectRunningSamples(0); collector_.Stop(); ExpectStopped(); EXPECT_EQ(kNoError, checker_.ValidateProfile()); EXPECT_EQ(kNoError, checker_.Check(slots, arraysize(slots))); } // Start and Stop with no options, collecting no samples. Verify // output contents. TEST_F(ProfileDataTest, StartStopNoOptionsEmpty) { // We're not requesting a specific period, implementation can do // whatever it likes. ProfileDataSlot slots[] = { 0, 3, 0, 0 /* skipped */, 0, // binary header 0, 1, 0 // binary trailer }; int slots_to_skip[] = { 3 }; ExpectStopped(); EXPECT_TRUE(collector_.Start(checker_.filename().c_str(), ProfileData::Options())); ExpectRunningSamples(0); collector_.Stop(); ExpectStopped(); EXPECT_EQ(kNoError, checker_.ValidateProfile()); EXPECT_EQ(kNoError, checker_.CheckWithSkips(slots, arraysize(slots), slots_to_skip, arraysize(slots_to_skip))); } // Start after already started. Should return false and not impact // collected data or state. TEST_F(ProfileDataTest, StartWhenStarted) { const int frequency = 1; ProfileDataSlot slots[] = { 0, 3, 0, 1000000 / frequency, 0, // binary header 0, 1, 0 // binary trailer }; ProfileData::Options options; options.set_frequency(frequency); EXPECT_TRUE(collector_.Start(checker_.filename().c_str(), options)); ProfileData::State state_before; collector_.GetCurrentState(&state_before); options.set_frequency(frequency * 2); CHECK(!collector_.Start("foobar", options)); ProfileData::State state_after; collector_.GetCurrentState(&state_after); ExpectSameState(state_before, state_after); collector_.Stop(); ExpectStopped(); EXPECT_EQ(kNoError, checker_.ValidateProfile()); EXPECT_EQ(kNoError, checker_.Check(slots, arraysize(slots))); } // Like StartStopEmpty, but uses a different file name and frequency. TEST_F(ProfileDataTest, StartStopEmpty2) { const int frequency = 2; ProfileDataSlot slots[] = { 0, 3, 0, 1000000 / frequency, 0, // binary header 0, 1, 0 // binary trailer }; ExpectStopped(); ProfileData::Options options; options.set_frequency(frequency); EXPECT_TRUE(collector_.Start(checker_.filename().c_str(), options)); ExpectRunningSamples(0); collector_.Stop(); ExpectStopped(); EXPECT_EQ(kNoError, checker_.ValidateProfile()); EXPECT_EQ(kNoError, checker_.Check(slots, arraysize(slots))); } TEST_F(ProfileDataTest, CollectOne) { const int frequency = 2; ProfileDataSlot slots[] = { 0, 3, 0, 1000000 / frequency, 0, // binary header 1, 5, 100, 101, 102, 103, 104, // our sample 0, 1, 0 // binary trailer }; ExpectStopped(); ProfileData::Options options; options.set_frequency(frequency); EXPECT_TRUE(collector_.Start(checker_.filename().c_str(), options)); ExpectRunningSamples(0); const void *trace[] = { V(100), V(101), V(102), V(103), V(104) }; collector_.Add(arraysize(trace), trace); ExpectRunningSamples(1); collector_.Stop(); ExpectStopped(); EXPECT_EQ(kNoError, checker_.ValidateProfile()); EXPECT_EQ(kNoError, checker_.Check(slots, arraysize(slots))); } TEST_F(ProfileDataTest, CollectTwoMatching) { const int frequency = 2; ProfileDataSlot slots[] = { 0, 3, 0, 1000000 / frequency, 0, // binary header 2, 5, 100, 201, 302, 403, 504, // our two samples 0, 1, 0 // binary trailer }; ExpectStopped(); ProfileData::Options options; options.set_frequency(frequency); EXPECT_TRUE(collector_.Start(checker_.filename().c_str(), options)); ExpectRunningSamples(0); for (int i = 0; i < 2; ++i) { const void *trace[] = { V(100), V(201), V(302), V(403), V(504) }; collector_.Add(arraysize(trace), trace); ExpectRunningSamples(i + 1); } collector_.Stop(); ExpectStopped(); EXPECT_EQ(kNoError, checker_.ValidateProfile()); EXPECT_EQ(kNoError, checker_.Check(slots, arraysize(slots))); } TEST_F(ProfileDataTest, CollectTwoFlush) { const int frequency = 2; ProfileDataSlot slots[] = { 0, 3, 0, 1000000 / frequency, 0, // binary header 1, 5, 100, 201, 302, 403, 504, // first sample (flushed) 1, 5, 100, 201, 302, 403, 504, // second identical sample 0, 1, 0 // binary trailer }; ExpectStopped(); ProfileData::Options options; options.set_frequency(frequency); EXPECT_TRUE(collector_.Start(checker_.filename().c_str(), options)); ExpectRunningSamples(0); const void *trace[] = { V(100), V(201), V(302), V(403), V(504) }; collector_.Add(arraysize(trace), trace); ExpectRunningSamples(1); collector_.FlushTable(); collector_.Add(arraysize(trace), trace); ExpectRunningSamples(2); collector_.Stop(); ExpectStopped(); EXPECT_EQ(kNoError, checker_.ValidateProfile()); EXPECT_EQ(kNoError, checker_.Check(slots, arraysize(slots))); } // Start then reset, verify that the result is *not* a valid profile. // Then start again and make sure the result is OK. TEST_F(ProfileDataTest, StartResetRestart) { ExpectStopped(); ProfileData::Options options; options.set_frequency(1); EXPECT_TRUE(collector_.Start(checker_.filename().c_str(), options)); ExpectRunningSamples(0); collector_.Reset(); ExpectStopped(); // We expect the resulting file to be empty. This is a minimal test // of ValidateProfile. EXPECT_NE(kNoError, checker_.ValidateProfile()); struct stat statbuf; EXPECT_EQ(0, stat(checker_.filename().c_str(), &statbuf)); EXPECT_EQ(0, statbuf.st_size); const int frequency = 2; // Different frequency than used above. ProfileDataSlot slots[] = { 0, 3, 0, 1000000 / frequency, 0, // binary header 0, 1, 0 // binary trailer }; options.set_frequency(frequency); EXPECT_TRUE(collector_.Start(checker_.filename().c_str(), options)); ExpectRunningSamples(0); collector_.Stop(); ExpectStopped(); EXPECT_EQ(kNoError, checker_.ValidateProfile()); EXPECT_EQ(kNoError, checker_.Check(slots, arraysize(slots))); } } // namespace int main(int argc, char** argv) { int rc = ProfileDataTest::RUN_ALL_TESTS(); printf("%s\n", rc == 0 ? "PASS" : "FAIL"); return rc; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/heap-profiler_unittest.cc
.cc
5,320
169
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Craig Silverstein // // A small program that just exercises our heap profiler by allocating // memory and letting the heap-profiler emit a profile. We don't test // threads (TODO). By itself, this unittest tests that the heap-profiler // doesn't crash on simple programs, but its output can be analyzed by // another testing script to actually verify correctness. See, eg, // heap-profiler_unittest.sh. #include "config_for_unittests.h" #include <stdlib.h> #include <stdio.h> #include <fcntl.h> // for mkdir() #include <sys/stat.h> // for mkdir() on freebsd and os x #ifdef HAVE_UNISTD_H #include <unistd.h> // for fork() #endif #include <sys/wait.h> // for wait() #include <string> #include "base/basictypes.h" #include "base/logging.h" #include <gperftools/heap-profiler.h> using std::string; static const int kMaxCount = 100000; int* g_array[kMaxCount]; // an array of int-vectors static ATTRIBUTE_NOINLINE void Allocate(int start, int end, int size) { // NOTE: we're using this to prevent gcc 5 from merging otherwise // identical Allocate & Allocate2 functions. VLOG(10, "Allocate"); for (int i = start; i < end; ++i) { if (i < kMaxCount) g_array[i] = new int[size]; } } static ATTRIBUTE_NOINLINE void Allocate2(int start, int end, int size) { VLOG(10, "Allocate2"); for (int i = start; i < end; ++i) { if (i < kMaxCount) g_array[i] = new int[size]; } } static void Deallocate(int start, int end) { for (int i = start; i < end; ++i) { delete[] g_array[i]; g_array[i] = 0; } } static void TestHeapProfilerStartStopIsRunning() { // If you run this with whole-program heap-profiling on, than // IsHeapProfilerRunning should return true. if (!IsHeapProfilerRunning()) { const char* tmpdir = getenv("TMPDIR"); if (tmpdir == NULL) tmpdir = "/tmp"; mkdir(tmpdir, 0755); // if necessary HeapProfilerStart((string(tmpdir) + "/start_stop").c_str()); CHECK(IsHeapProfilerRunning()); Allocate(0, 40, 100); Deallocate(0, 40); HeapProfilerStop(); CHECK(!IsHeapProfilerRunning()); } } static void TestDumpHeapProfiler() { // If you run this with whole-program heap-profiling on, than // IsHeapProfilerRunning should return true. if (!IsHeapProfilerRunning()) { const char* tmpdir = getenv("TMPDIR"); if (tmpdir == NULL) tmpdir = "/tmp"; mkdir(tmpdir, 0755); // if necessary HeapProfilerStart((string(tmpdir) + "/dump").c_str()); CHECK(IsHeapProfilerRunning()); Allocate(0, 40, 100); Deallocate(0, 40); char* output = GetHeapProfile(); free(output); HeapProfilerStop(); } } int main(int argc, char** argv) { if (argc > 2 || (argc == 2 && argv[1][0] == '-')) { printf("USAGE: %s [number of children to fork]\n", argv[0]); exit(0); } int num_forks = 0; if (argc == 2) { num_forks = atoi(argv[1]); } TestHeapProfilerStartStopIsRunning(); TestDumpHeapProfiler(); Allocate(0, 40, 100); Deallocate(0, 40); Allocate(0, 40, 100); Allocate(0, 40, 100); Allocate2(40, 400, 1000); Allocate2(400, 1000, 10000); Deallocate(0, 1000); Allocate(0, 100, 100000); Deallocate(0, 10); Deallocate(10, 20); Deallocate(90, 100); Deallocate(20, 90); while (num_forks-- > 0) { switch (fork()) { case -1: printf("FORK failed!\n"); return 1; case 0: // child return execl(argv[0], argv[0], NULL); // run child with no args default: wait(NULL); // we'll let the kids run one at a time } } printf("DONE.\n"); return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/heap-profiler_unittest.sh
.sh
5,542
148
#!/bin/sh # Copyright (c) 2005, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # --- # Author: Craig Silverstein # # Runs the heap-profiler unittest and makes sure the profile looks appropriate. # # We run under the assumption that if $HEAP_PROFILER is run with --help, # it prints a usage line of the form # USAGE: <actual executable being run> [...] # # This is because libtool sometimes turns the 'executable' into a # shell script which runs an actual binary somewhere else. # We expect BINDIR and PPROF_PATH to be set in the environment. # If not, we set them to some reasonable values BINDIR="${BINDIR:-.}" PPROF_PATH="${PPROF_PATH:-$BINDIR/src/pprof}" if [ "x$1" = "x-h" -o "x$1" = "x--help" ]; then echo "USAGE: $0 [unittest dir] [path to pprof]" echo " By default, unittest_dir=$BINDIR, pprof_path=$PPROF_PATH" exit 1 fi HEAP_PROFILER="${1:-$BINDIR/heap-profiler_unittest}" PPROF="${2:-$PPROF_PATH}" TEST_TMPDIR=`mktemp -d /tmp/heap-profiler_unittest.XXXXXX` # It's meaningful to the profiler, so make sure we know its state unset HEAPPROFILE num_failures=0 # Given one profile (to check the contents of that profile) or two # profiles (to check the diff between the profiles), and a function # name, verify that the function name takes up at least 90% of the # allocated memory. The function name is actually specified first. VerifyMemFunction() { function="$1" shift # get program name. Note we have to unset HEAPPROFILE so running # help doesn't overwrite existing profiles. exec=`unset HEAPPROFILE; $HEAP_PROFILER --help | awk '{print $2; exit;}'` if [ $# = 2 ]; then [ -f "$1" ] || { echo "Profile not found: $1"; exit 1; } [ -f "$2" ] || { echo "Profile not found: $2"; exit 1; } $PPROF --base="$1" $exec "$2" >"$TEST_TMPDIR/output.pprof" 2>&1 else [ -f "$1" ] || { echo "Profile not found: $1"; exit 1; } $PPROF $exec "$1" >"$TEST_TMPDIR/output.pprof" 2>&1 fi cat "$TEST_TMPDIR/output.pprof" \ | tr -d % | awk '$6 ~ /^'$function'$/ && $2 > 90 {exit 1;}' if [ $? != 1 ]; then echo echo "--- Test failed for $function: didn't account for 90% of executable memory" echo "--- Program output:" cat "$TEST_TMPDIR/output" echo "--- pprof output:" cat "$TEST_TMPDIR/output.pprof" echo "---" num_failures=`expr $num_failures + 1` fi } VerifyOutputContains() { text="$1" if ! grep "$text" "$TEST_TMPDIR/output" >/dev/null 2>&1; then echo "--- Test failed: output does not contain '$text'" echo "--- Program output:" cat "$TEST_TMPDIR/output" echo "---" num_failures=`expr $num_failures + 1` fi } HEAPPROFILE="$TEST_TMPDIR/test" HEAP_PROFILE_INUSE_INTERVAL="10240" # need this to be 10Kb HEAP_PROFILE_ALLOCATION_INTERVAL="$HEAP_PROFILE_INUSE_INTERVAL" HEAP_PROFILE_DEALLOCATION_INTERVAL="$HEAP_PROFILE_INUSE_INTERVAL" export HEAPPROFILE export HEAP_PROFILE_INUSE_INTERVAL export HEAP_PROFILE_ALLOCATION_INTERVAL export HEAP_PROFILE_DEALLOCATION_INTERVAL # We make the unittest run a child process, to test that the child # process doesn't try to write a heap profile as well and step on the # parent's toes. If it does, we expect the parent-test to fail. $HEAP_PROFILER 1 >$TEST_TMPDIR/output 2>&1 # run program, with 1 child proc VerifyMemFunction Allocate2 "$HEAPPROFILE.1329.heap" VerifyMemFunction Allocate "$HEAPPROFILE.1448.heap" "$HEAPPROFILE.1548.heap" # Check the child process got to emit its own profile as well. VerifyMemFunction Allocate2 "$HEAPPROFILE"_*.1329.heap VerifyMemFunction Allocate "$HEAPPROFILE"_*.1448.heap "$HEAPPROFILE"_*.1548.heap # Make sure we logged both about allocating and deallocating memory VerifyOutputContains "62 MB allocated" VerifyOutputContains "62 MB freed" # Now try running without --heap_profile specified, to allow # testing of the HeapProfileStart/Stop functionality. $HEAP_PROFILER >"$TEST_TMPDIR/output2" 2>&1 rm -rf $TEST_TMPDIR # clean up if [ $num_failures = 0 ]; then echo "PASS" else echo "Tests finished with $num_failures failures" fi exit $num_failures
Shell
3D
mcellteam/mcell
libs/gperftools/src/tests/sampling_test.sh
.sh
3,780
95
#!/bin/sh # Copyright (c) 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # --- # Author: Craig Silverstein # # This is a test that tcmalloc creates, and pprof reads, sampling data # correctly: both for the heap profile (ReadStackTraces) and for # growth in the heap sized (ReadGrowthStackTraces). BINDIR="${BINDIR:-.}" PPROF_PATH="${PPROF_PATH:-$BINDIR/src/pprof}" if [ "x$1" = "x-h" -o "x$1" = "x--help" ]; then echo "USAGE: $0 [unittest dir] [path to pprof]" echo " By default, unittest_dir=$BINDIR, pprof_path=$PPROF_PATH" exit 1 fi SAMPLING_TEST="${1:-$BINDIR/sampling_test}" PPROF="${2:-$PPROF_PATH}" OUTDIR="/tmp/sampling_test_dir" # libtool is annoying, and puts the actual executable in a different # directory, replacing the seeming-executable with a shell script. # We use the error output of sampling_test to indicate its real location SAMPLING_TEST_BINARY=`"$SAMPLING_TEST" 2>&1 | awk '/USAGE/ {print $2; exit;}'` # A kludge for cygwin. Unfortunately, 'test -f' says that 'foo' exists # even when it doesn't, and only foo.exe exists. Other unix utilities # (like nm) need you to say 'foo.exe'. We use one such utility, cat, to # see what the *real* binary name is. if ! cat "$SAMPLING_TEST_BINARY" >/dev/null 2>&1; then SAMPLING_TEST_BINARY="$SAMPLING_TEST_BINARY".exe fi die() { # runs the command given as arguments, and then dies. echo "FAILED. Output from $@" echo "----" "$@" echo "----" exit 1 } rm -rf "$OUTDIR" || die "Unable to delete $OUTDIR" mkdir "$OUTDIR" || die "Unable to create $OUTDIR" # This puts the output into out.heap and out.growth. It allocates # 8*10^7 bytes of memory, which is 76M. Because we sample, the # estimate may be a bit high or a bit low: we accept anything from # 50M to 99M. "$SAMPLING_TEST" "$OUTDIR/out" echo "Testing heap output..." "$PPROF" --text "$SAMPLING_TEST_BINARY" "$OUTDIR/out.heap" \ | grep '[5-9][0-9]\.[0-9][ 0-9.%]*_*AllocateAllocate' >/dev/null \ || die "$PPROF" --text "$SAMPLING_TEST_BINARY" "$OUTDIR/out.heap" echo "OK" echo "Testing growth output..." "$PPROF" --text "$SAMPLING_TEST_BINARY" "$OUTDIR/out.growth" \ | grep '[5-9][0-9]\.[0-9][ 0-9.%]*_*AllocateAllocate' >/dev/null \ || die "$PPROF" --text "$SAMPLING_TEST_BINARY" "$OUTDIR/out.growth" echo "OK" echo "PASS"
Shell
3D
mcellteam/mcell
libs/gperftools/src/tests/malloc_hook_test.cc
.cc
12,156
368
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2011, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // ---- // Author: llib@google.com (Bill Clarke) #include "config_for_unittests.h" #include <assert.h> #include <stdio.h> #ifdef HAVE_MMAP #include <sys/mman.h> #endif #ifdef HAVE_UNISTD_H #include <unistd.h> // for sleep() #endif #include <algorithm> #include <string> #include <vector> #include <gperftools/malloc_hook.h> #include "malloc_hook-inl.h" #include "base/logging.h" #include "base/simple_mutex.h" #include "base/sysinfo.h" #include "tests/testutil.h" // On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old // form of the name instead. #ifndef MAP_ANONYMOUS # define MAP_ANONYMOUS MAP_ANON #endif namespace { using std::string; using std::vector; vector<void (*)()> g_testlist; // the tests to run #define TEST(a, b) \ struct Test_##a##_##b { \ Test_##a##_##b() { g_testlist.push_back(&Run); } \ static void Run(); \ }; \ static Test_##a##_##b g_test_##a##_##b; \ void Test_##a##_##b::Run() static int RUN_ALL_TESTS() { vector<void (*)()>::const_iterator it; for (it = g_testlist.begin(); it != g_testlist.end(); ++it) { (*it)(); // The test will error-exit if there's a problem. } fprintf(stderr, "\nPassed %d tests\n\nPASS\n", static_cast<int>(g_testlist.size())); return 0; } void Sleep(int seconds) { #ifdef _MSC_VER _sleep(seconds * 1000); // Windows's _sleep takes milliseconds argument #else sleep(seconds); #endif } using std::min; using base::internal::kHookListMaxValues; // Since HookList is a template and is defined in malloc_hook.cc, we can only // use an instantiation of it from malloc_hook.cc. We then reinterpret those // values as integers for testing. typedef base::internal::HookList<MallocHook::NewHook> TestHookList; int TestHookList_Traverse(const TestHookList& list, uintptr_t* output_array, int n) { MallocHook::NewHook values_as_hooks[kHookListMaxValues]; int result = list.Traverse(values_as_hooks, min(n, kHookListMaxValues)); for (int i = 0; i < result; ++i) { output_array[i] = reinterpret_cast<const uintptr_t>(*values_as_hooks[i]); } return result; } bool TestHookList_Add(TestHookList* list, int val) { return list->Add(reinterpret_cast<MallocHook::NewHook>(val)); } bool TestHookList_Remove(TestHookList* list, int val) { return list->Remove(reinterpret_cast<MallocHook::NewHook>(val)); } // Note that this is almost the same as INIT_HOOK_LIST in malloc_hook.cc without // the cast. #define INIT_HOOK_LIST(initial_value) { 1, { initial_value } } TEST(HookListTest, InitialValueExists) { TestHookList list = INIT_HOOK_LIST(69); uintptr_t values[2] = { 0, 0 }; EXPECT_EQ(1, TestHookList_Traverse(list, values, 2)); EXPECT_EQ(69, values[0]); EXPECT_EQ(1, list.priv_end); } TEST(HookListTest, CanRemoveInitialValue) { TestHookList list = INIT_HOOK_LIST(69); ASSERT_TRUE(TestHookList_Remove(&list, 69)); EXPECT_EQ(0, list.priv_end); uintptr_t values[2] = { 0, 0 }; EXPECT_EQ(0, TestHookList_Traverse(list, values, 2)); } TEST(HookListTest, AddAppends) { TestHookList list = INIT_HOOK_LIST(69); ASSERT_TRUE(TestHookList_Add(&list, 42)); EXPECT_EQ(2, list.priv_end); uintptr_t values[2] = { 0, 0 }; EXPECT_EQ(2, TestHookList_Traverse(list, values, 2)); EXPECT_EQ(69, values[0]); EXPECT_EQ(42, values[1]); } TEST(HookListTest, RemoveWorksAndWillClearSize) { TestHookList list = INIT_HOOK_LIST(69); ASSERT_TRUE(TestHookList_Add(&list, 42)); ASSERT_TRUE(TestHookList_Remove(&list, 69)); EXPECT_EQ(2, list.priv_end); uintptr_t values[2] = { 0, 0 }; EXPECT_EQ(1, TestHookList_Traverse(list, values, 2)); EXPECT_EQ(42, values[0]); ASSERT_TRUE(TestHookList_Remove(&list, 42)); EXPECT_EQ(0, list.priv_end); EXPECT_EQ(0, TestHookList_Traverse(list, values, 2)); } TEST(HookListTest, AddPrependsAfterRemove) { TestHookList list = INIT_HOOK_LIST(69); ASSERT_TRUE(TestHookList_Add(&list, 42)); ASSERT_TRUE(TestHookList_Remove(&list, 69)); EXPECT_EQ(2, list.priv_end); ASSERT_TRUE(TestHookList_Add(&list, 7)); EXPECT_EQ(2, list.priv_end); uintptr_t values[2] = { 0, 0 }; EXPECT_EQ(2, TestHookList_Traverse(list, values, 2)); EXPECT_EQ(7, values[0]); EXPECT_EQ(42, values[1]); } TEST(HookListTest, InvalidAddRejected) { TestHookList list = INIT_HOOK_LIST(69); EXPECT_FALSE(TestHookList_Add(&list, 0)); uintptr_t values[2] = { 0, 0 }; EXPECT_EQ(1, TestHookList_Traverse(list, values, 2)); EXPECT_EQ(69, values[0]); EXPECT_EQ(1, list.priv_end); } TEST(HookListTest, FillUpTheList) { TestHookList list = INIT_HOOK_LIST(69); int num_inserts = 0; while (TestHookList_Add(&list, ++num_inserts)) ; EXPECT_EQ(kHookListMaxValues, num_inserts); EXPECT_EQ(kHookListMaxValues, list.priv_end); uintptr_t values[kHookListMaxValues + 1]; EXPECT_EQ(kHookListMaxValues, TestHookList_Traverse(list, values, kHookListMaxValues)); EXPECT_EQ(69, values[0]); for (int i = 1; i < kHookListMaxValues; ++i) { EXPECT_EQ(i, values[i]); } } void MultithreadedTestThread(TestHookList* list, int shift, int thread_num) { string message; char buf[64]; for (int i = 1; i < 1000; ++i) { // In each loop, we insert a unique value, check it exists, remove it, and // check it doesn't exist. We also record some stats to log at the end of // each thread. Each insertion location and the length of the list is // non-deterministic (except for the very first one, over all threads, and // after the very last one the list should be empty). int value = (i << shift) + thread_num; EXPECT_TRUE(TestHookList_Add(list, value)); sched_yield(); // Ensure some more interleaving. uintptr_t values[kHookListMaxValues + 1]; int num_values = TestHookList_Traverse(*list, values, kHookListMaxValues); EXPECT_LT(0, num_values); int value_index; for (value_index = 0; value_index < num_values && values[value_index] != value; ++value_index) ; EXPECT_LT(value_index, num_values); // Should have found value. snprintf(buf, sizeof(buf), "[%d/%d; ", value_index, num_values); message += buf; sched_yield(); EXPECT_TRUE(TestHookList_Remove(list, value)); sched_yield(); num_values = TestHookList_Traverse(*list, values, kHookListMaxValues); for (value_index = 0; value_index < num_values && values[value_index] != value; ++value_index) ; EXPECT_EQ(value_index, num_values); // Should not have found value. snprintf(buf, sizeof(buf), "%d]", num_values); message += buf; sched_yield(); } fprintf(stderr, "thread %d: %s\n", thread_num, message.c_str()); } static volatile int num_threads_remaining; static TestHookList list = INIT_HOOK_LIST(69); static Mutex threadcount_lock; void MultithreadedTestThreadRunner(int thread_num) { // Wait for all threads to start running. { MutexLock ml(&threadcount_lock); assert(num_threads_remaining > 0); --num_threads_remaining; // We should use condvars and the like, but for this test, we'll // go simple and busy-wait. while (num_threads_remaining > 0) { threadcount_lock.Unlock(); Sleep(1); threadcount_lock.Lock(); } } // shift is the smallest number such that (1<<shift) > kHookListMaxValues int shift = 0; for (int i = kHookListMaxValues; i > 0; i >>= 1) shift += 1; MultithreadedTestThread(&list, shift, thread_num); } TEST(HookListTest, MultithreadedTest) { ASSERT_TRUE(TestHookList_Remove(&list, 69)); ASSERT_EQ(0, list.priv_end); // Run kHookListMaxValues thread, each running MultithreadedTestThread. // First, we need to set up the rest of the globals. num_threads_remaining = kHookListMaxValues; // a global var RunManyThreadsWithId(&MultithreadedTestThreadRunner, num_threads_remaining, 1 << 15); uintptr_t values[kHookListMaxValues + 1]; EXPECT_EQ(0, TestHookList_Traverse(list, values, kHookListMaxValues)); EXPECT_EQ(0, list.priv_end); } // We only do mmap-hooking on (some) linux systems. #if defined(HAVE_MMAP) && defined(__linux) && \ (defined(__i386__) || defined(__x86_64__) || defined(__PPC__)) int mmap_calls = 0; int mmap_matching_calls = 0; int munmap_calls = 0; int munmap_matching_calls = 0; const int kMmapMagicFd = 1; void* const kMmapMagicPointer = reinterpret_cast<void*>(1); int MmapReplacement(const void* start, size_t size, int protection, int flags, int fd, off_t offset, void** result) { ++mmap_calls; if (fd == kMmapMagicFd) { ++mmap_matching_calls; *result = kMmapMagicPointer; return true; } return false; } int MunmapReplacement(const void* ptr, size_t size, int* result) { ++munmap_calls; if (ptr == kMmapMagicPointer) { ++munmap_matching_calls; *result = 0; return true; } return false; } TEST(MallocMookTest, MmapReplacements) { mmap_calls = mmap_matching_calls = munmap_calls = munmap_matching_calls = 0; MallocHook::SetMmapReplacement(&MmapReplacement); MallocHook::SetMunmapReplacement(&MunmapReplacement); EXPECT_EQ(kMmapMagicPointer, mmap(NULL, 1, PROT_READ, MAP_PRIVATE, kMmapMagicFd, 0)); EXPECT_EQ(1, mmap_matching_calls); char* ptr = reinterpret_cast<char*>( mmap(NULL, 1, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)); EXPECT_EQ(2, mmap_calls); EXPECT_EQ(1, mmap_matching_calls); ASSERT_NE(MAP_FAILED, ptr); *ptr = 'a'; EXPECT_EQ(0, munmap(kMmapMagicPointer, 1)); EXPECT_EQ(1, munmap_calls); EXPECT_EQ(1, munmap_matching_calls); EXPECT_EQ(0, munmap(ptr, 1)); EXPECT_EQ(2, munmap_calls); EXPECT_EQ(1, munmap_matching_calls); // The DEATH test below is flaky, because we've just munmapped the memory, // making it available for mmap()ing again. There is no guarantee that it // will stay unmapped, and in fact it gets reused ~10% of the time. // It the area is reused, then not only we don't die, but we also corrupt // whoever owns that memory now. // EXPECT_DEATH(*ptr = 'a', "SIGSEGV"); } #endif // #ifdef HAVE_MMAP && linux && ... } // namespace int main(int argc, char** argv) { return RUN_ALL_TESTS(); }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/profiler_unittest.cc
.cc
4,894
148
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Craig Silverstein // // Does some simple arithmetic and a few libc routines, so we can profile it. // Define WITH_THREADS to add pthread functionality as well (otherwise, btw, // the num_threads argument to this program is ingored). #include "config_for_unittests.h" #include <stdio.h> #include <stdlib.h> #ifdef HAVE_UNISTD_H #include <unistd.h> // for fork() #endif #include <sys/wait.h> // for wait() #include "gperftools/profiler.h" #include "base/simple_mutex.h" #include "tests/testutil.h" static volatile int result = 0; static int g_iters = 0; // argv[1] Mutex mutex(Mutex::LINKER_INITIALIZED); static void test_other_thread() { #ifndef NO_THREADS ProfilerRegisterThread(); int i, m; char b[128]; MutexLock ml(&mutex); for (m = 0; m < 1000000; ++m) { // run millions of times for (i = 0; i < g_iters; ++i ) { result ^= i; } snprintf(b, sizeof(b), "other: %d", result); // get some libc action } #endif } static void test_main_thread() { int i, m; char b[128]; MutexLock ml(&mutex); for (m = 0; m < 1000000; ++m) { // run millions of times for (i = 0; i < g_iters; ++i ) { result ^= i; } snprintf(b, sizeof(b), "same: %d", result); // get some libc action } } int main(int argc, char** argv) { if ( argc <= 1 ) { fprintf(stderr, "USAGE: %s <iters> [num_threads] [filename]\n", argv[0]); fprintf(stderr, " iters: How many million times to run the XOR test.\n"); fprintf(stderr, " num_threads: how many concurrent threads.\n"); fprintf(stderr, " 0 or 1 for single-threaded mode,\n"); fprintf(stderr, " -# to fork instead of thread.\n"); fprintf(stderr, " filename: The name of the output profile.\n"); fprintf(stderr, (" If you don't specify, set CPUPROFILE " "in the environment instead!\n")); return 1; } g_iters = atoi(argv[1]); int num_threads = 1; const char* filename = NULL; if (argc > 2) { num_threads = atoi(argv[2]); } if (argc > 3) { filename = argv[3]; } if (filename) { ProfilerStart(filename); } test_main_thread(); ProfilerFlush(); // just because we can // The other threads, if any, will run only half as long as the main thread if(num_threads > 0) { RunManyThreads(test_other_thread, num_threads); } else { // Or maybe they asked to fork. The fork test is only interesting // when we use CPUPROFILE to name, so check for that #ifdef HAVE_UNISTD_H for (; num_threads < 0; ++num_threads) { // -<num_threads> to fork if (filename) { printf("FORK test only makes sense when no filename is specified.\n"); return 2; } switch (fork()) { case -1: printf("FORK failed!\n"); return 1; case 0: // child return execl(argv[0], argv[0], argv[1], NULL); default: wait(NULL); // we'll let the kids run one at a time } } #else fprintf(stderr, "%s was compiled without support for fork() and exec()\n", argv[0]); #endif } test_main_thread(); if (filename) { ProfilerStop(); } return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/page_heap_test.cc
.cc
5,871
203
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright 2009 Google Inc. All Rights Reserved. // Author: fikes@google.com (Andrew Fikes) // // Use of this source code is governed by a BSD-style license that can // be found in the LICENSE file. #include "config_for_unittests.h" #include <stdio.h> #include <memory> #include "page_heap.h" #include "system-alloc.h" #include "base/logging.h" #include "common.h" DECLARE_int64(tcmalloc_heap_limit_mb); namespace { // The system will only release memory if the block size is equal or hight than // system page size. static bool HaveSystemRelease = TCMalloc_SystemRelease( TCMalloc_SystemAlloc(getpagesize(), NULL, 0), getpagesize()); static void CheckStats(const tcmalloc::PageHeap* ph, uint64_t system_pages, uint64_t free_pages, uint64_t unmapped_pages) { tcmalloc::PageHeap::Stats stats = ph->stats(); if (!HaveSystemRelease) { free_pages += unmapped_pages; unmapped_pages = 0; } EXPECT_EQ(system_pages, stats.system_bytes >> kPageShift); EXPECT_EQ(free_pages, stats.free_bytes >> kPageShift); EXPECT_EQ(unmapped_pages, stats.unmapped_bytes >> kPageShift); } static void TestPageHeap_Stats() { std::unique_ptr<tcmalloc::PageHeap> ph(new tcmalloc::PageHeap()); // Empty page heap CheckStats(ph.get(), 0, 0, 0); // Allocate a span 's1' tcmalloc::Span* s1 = ph->New(256); CheckStats(ph.get(), 256, 0, 0); // Split span 's1' into 's1', 's2'. Delete 's2' tcmalloc::Span* s2 = ph->Split(s1, 128); ph->Delete(s2); CheckStats(ph.get(), 256, 128, 0); // Unmap deleted span 's2' ph->ReleaseAtLeastNPages(1); CheckStats(ph.get(), 256, 0, 128); // Delete span 's1' ph->Delete(s1); CheckStats(ph.get(), 256, 128, 128); } // The number of kMaxPages-sized Spans we will allocate and free during the // tests. // We will also do twice this many kMaxPages/2-sized ones. static constexpr int kNumberMaxPagesSpans = 10; // Allocates all the last-level page tables we will need. Doing this before // calculating the base heap usage is necessary, because otherwise if any of // these are allocated during the main test it will throw the heap usage // calculations off and cause the test to fail. static void AllocateAllPageTables() { // Make a separate PageHeap from the main test so the test can start without // any pages in the lists. std::unique_ptr<tcmalloc::PageHeap> ph(new tcmalloc::PageHeap()); tcmalloc::Span *spans[kNumberMaxPagesSpans * 2]; for (int i = 0; i < kNumberMaxPagesSpans; ++i) { spans[i] = ph->New(kMaxPages); EXPECT_NE(spans[i], NULL); } for (int i = 0; i < kNumberMaxPagesSpans; ++i) { ph->Delete(spans[i]); } for (int i = 0; i < kNumberMaxPagesSpans * 2; ++i) { spans[i] = ph->New(kMaxPages >> 1); EXPECT_NE(spans[i], NULL); } for (int i = 0; i < kNumberMaxPagesSpans * 2; ++i) { ph->Delete(spans[i]); } } static void TestPageHeap_Limit() { AllocateAllPageTables(); std::unique_ptr<tcmalloc::PageHeap> ph(new tcmalloc::PageHeap()); CHECK_EQ(kMaxPages, 1 << (20 - kPageShift)); // We do not know much is taken from the system for other purposes, // so we detect the proper limit: { FLAGS_tcmalloc_heap_limit_mb = 1; tcmalloc::Span* s = NULL; while((s = ph->New(kMaxPages)) == NULL) { FLAGS_tcmalloc_heap_limit_mb++; } FLAGS_tcmalloc_heap_limit_mb += kNumberMaxPagesSpans - 1; ph->Delete(s); // We are [10, 11) mb from the limit now. } // Test AllocLarge and GrowHeap first: { tcmalloc::Span * spans[kNumberMaxPagesSpans]; for (int i=0; i<kNumberMaxPagesSpans; ++i) { spans[i] = ph->New(kMaxPages); EXPECT_NE(spans[i], NULL); } EXPECT_EQ(ph->New(kMaxPages), NULL); for (int i=0; i<kNumberMaxPagesSpans; i += 2) { ph->Delete(spans[i]); } tcmalloc::Span *defragmented = ph->New(kNumberMaxPagesSpans / 2 * kMaxPages); if (HaveSystemRelease) { // EnsureLimit should release deleted normal spans EXPECT_NE(defragmented, NULL); EXPECT_TRUE(ph->CheckExpensive()); ph->Delete(defragmented); } else { EXPECT_EQ(defragmented, NULL); EXPECT_TRUE(ph->CheckExpensive()); } for (int i=1; i<kNumberMaxPagesSpans; i += 2) { ph->Delete(spans[i]); } } // Once again, testing small lists this time (twice smaller spans): { tcmalloc::Span * spans[kNumberMaxPagesSpans * 2]; for (int i=0; i<kNumberMaxPagesSpans * 2; ++i) { spans[i] = ph->New(kMaxPages >> 1); EXPECT_NE(spans[i], NULL); } // one more half size allocation may be possible: tcmalloc::Span * lastHalf = ph->New(kMaxPages >> 1); EXPECT_EQ(ph->New(kMaxPages >> 1), NULL); for (int i=0; i<kNumberMaxPagesSpans * 2; i += 2) { ph->Delete(spans[i]); } for (Length len = kMaxPages >> 2; len < kNumberMaxPagesSpans / 2 * kMaxPages; len = len << 1) { if(len <= kMaxPages >> 1 || HaveSystemRelease) { tcmalloc::Span *s = ph->New(len); EXPECT_NE(s, NULL); ph->Delete(s); } } EXPECT_TRUE(ph->CheckExpensive()); for (int i=1; i<kNumberMaxPagesSpans * 2; i += 2) { ph->Delete(spans[i]); } if (lastHalf != NULL) { ph->Delete(lastHalf); } } } } // namespace int main(int argc, char **argv) { TestPageHeap_Stats(); TestPageHeap_Limit(); printf("PASS\n"); // on windows as part of library destructors we call getenv which // calls malloc which fails due to our exhausted heap limit. It then // causes fancy stack overflow because log message we're printing // for failed allocation somehow cause malloc calls too // // To keep us out of trouble we just drop malloc limit FLAGS_tcmalloc_heap_limit_mb = 0; return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/testutil.cc
.cc
7,735
225
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2007, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Craig Silverstein // // A few routines that are useful for multiple tests in this directory. #include "config_for_unittests.h" #include <stdlib.h> // for NULL, abort() // On FreeBSD, if you #include <sys/resource.h>, you have to get stdint first. #ifdef HAVE_STDINT_H #include <stdint.h> #endif #ifdef HAVE_SYS_RESOURCE_H #include <sys/resource.h> #endif #include "tests/testutil.h" // When compiled 64-bit and run on systems with swap several unittests will end // up trying to consume all of RAM+swap, and that can take quite some time. By // limiting the address-space size we get sufficient coverage without blowing // out job limits. void SetTestResourceLimit() { #ifdef HAVE_SYS_RESOURCE_H // The actual resource we need to set varies depending on which flavour of // unix. On Linux we need RLIMIT_AS because that covers the use of mmap. // Otherwise hopefully RLIMIT_RSS is good enough. (Unfortunately 64-bit // and 32-bit headers disagree on the type of these constants!) #ifdef RLIMIT_AS #define USE_RESOURCE RLIMIT_AS #else #define USE_RESOURCE RLIMIT_RSS #endif // Restrict the test to 1GiB, which should fit comfortably well on both // 32-bit and 64-bit hosts, and executes in ~1s. const rlim_t kMaxMem = 1<<30; struct rlimit rlim; if (getrlimit(USE_RESOURCE, &rlim) == 0) { if (rlim.rlim_cur == RLIM_INFINITY || rlim.rlim_cur > kMaxMem) { rlim.rlim_cur = kMaxMem; setrlimit(USE_RESOURCE, &rlim); // ignore result } } #endif /* HAVE_SYS_RESOURCE_H */ } struct FunctionAndId { void (*ptr_to_function)(int); int id; }; #if defined(NO_THREADS) || !(defined(HAVE_PTHREAD) || defined(_WIN32)) extern "C" void RunThread(void (*fn)()) { (*fn)(); } extern "C" void RunManyThreads(void (*fn)(), int count) { // I guess the best we can do is run fn sequentially, 'count' times for (int i = 0; i < count; i++) (*fn)(); } extern "C" void RunManyThreadsWithId(void (*fn)(int), int count, int) { for (int i = 0; i < count; i++) (*fn)(i); // stacksize doesn't make sense in a non-threaded context } #elif defined(_WIN32) #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN /* We always want minimal includes */ #endif #include <windows.h> extern "C" { // This helper function has the signature that pthread_create wants. DWORD WINAPI RunFunctionInThread(LPVOID ptr_to_ptr_to_fn) { (**static_cast<void (**)()>(ptr_to_ptr_to_fn))(); // runs fn return 0; } DWORD WINAPI RunFunctionInThreadWithId(LPVOID ptr_to_fnid) { FunctionAndId* fn_and_id = static_cast<FunctionAndId*>(ptr_to_fnid); (*fn_and_id->ptr_to_function)(fn_and_id->id); // runs fn return 0; } void RunManyThreads(void (*fn)(), int count) { DWORD dummy; HANDLE* hThread = new HANDLE[count]; for (int i = 0; i < count; i++) { hThread[i] = CreateThread(NULL, 0, RunFunctionInThread, &fn, 0, &dummy); if (hThread[i] == NULL) ExitProcess(i); } WaitForMultipleObjects(count, hThread, TRUE, INFINITE); for (int i = 0; i < count; i++) { CloseHandle(hThread[i]); } delete[] hThread; } void RunThread(void (*fn)()) { RunManyThreads(fn, 1); } void RunManyThreadsWithId(void (*fn)(int), int count, int stacksize) { DWORD dummy; HANDLE* hThread = new HANDLE[count]; FunctionAndId* fn_and_ids = new FunctionAndId[count]; for (int i = 0; i < count; i++) { fn_and_ids[i].ptr_to_function = fn; fn_and_ids[i].id = i; hThread[i] = CreateThread(NULL, stacksize, RunFunctionInThreadWithId, &fn_and_ids[i], 0, &dummy); if (hThread[i] == NULL) ExitProcess(i); } WaitForMultipleObjects(count, hThread, TRUE, INFINITE); for (int i = 0; i < count; i++) { CloseHandle(hThread[i]); } delete[] fn_and_ids; delete[] hThread; } } #else // not NO_THREADS, not !HAVE_PTHREAD, not _WIN32 #include <pthread.h> #define SAFE_PTHREAD(fncall) do { if ((fncall) != 0) abort(); } while (0) extern "C" { // This helper function has the signature that pthread_create wants. static void* RunFunctionInThread(void *ptr_to_ptr_to_fn) { (**static_cast<void (**)()>(ptr_to_ptr_to_fn))(); // runs fn return NULL; } static void* RunFunctionInThreadWithId(void *ptr_to_fnid) { FunctionAndId* fn_and_id = static_cast<FunctionAndId*>(ptr_to_fnid); (*fn_and_id->ptr_to_function)(fn_and_id->id); // runs fn return NULL; } // Run a function in a thread of its own and wait for it to finish. // This is useful for tcmalloc testing, because each thread is // handled separately in tcmalloc, so there's interesting stuff to // test even if the threads are not running concurrently. void RunThread(void (*fn)()) { pthread_t thr; // Even though fn is on the stack, it's safe to pass a pointer to it, // because we pthread_join immediately (ie, before RunInThread exits). SAFE_PTHREAD(pthread_create(&thr, NULL, RunFunctionInThread, &fn)); SAFE_PTHREAD(pthread_join(thr, NULL)); } void RunManyThreads(void (*fn)(), int count) { pthread_t* thr = new pthread_t[count]; for (int i = 0; i < count; i++) { SAFE_PTHREAD(pthread_create(&thr[i], NULL, RunFunctionInThread, &fn)); } for (int i = 0; i < count; i++) { SAFE_PTHREAD(pthread_join(thr[i], NULL)); } delete[] thr; } void RunManyThreadsWithId(void (*fn)(int), int count, int stacksize) { pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setstacksize(&attr, stacksize); pthread_t* thr = new pthread_t[count]; FunctionAndId* fn_and_ids = new FunctionAndId[count]; for (int i = 0; i < count; i++) { fn_and_ids[i].ptr_to_function = fn; fn_and_ids[i].id = i; SAFE_PTHREAD(pthread_create(&thr[i], &attr, RunFunctionInThreadWithId, &fn_and_ids[i])); } for (int i = 0; i < count; i++) { SAFE_PTHREAD(pthread_join(thr[i], NULL)); } delete[] fn_and_ids; delete[] thr; pthread_attr_destroy(&attr); } } #endif
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/system-alloc_unittest.cc
.cc
5,188
156
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- // Copyright (c) 2007, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // Author: Arun Sharma #include "config_for_unittests.h" #include "system-alloc.h" #include <stdio.h> #if defined HAVE_STDINT_H #include <stdint.h> // to get uintptr_t #elif defined HAVE_INTTYPES_H #include <inttypes.h> // another place uintptr_t might be defined #endif #include <sys/types.h> #include <algorithm> #include <limits> #include "base/logging.h" // for Check_GEImpl, Check_LTImpl, etc #include <gperftools/malloc_extension.h> // for MallocExtension::instance #include "common.h" // for kAddressBits class ArraySysAllocator : public SysAllocator { public: // Was this allocator invoked at least once? bool invoked_; ArraySysAllocator() : SysAllocator() { ptr_ = 0; invoked_ = false; } void* Alloc(size_t size, size_t *actual_size, size_t alignment) { invoked_ = true; if (size > kArraySize) { return NULL; } void *result = &array_[ptr_]; uintptr_t ptr = reinterpret_cast<uintptr_t>(result); if (actual_size) { *actual_size = size; } // Try to get more memory for alignment size_t extra = alignment - (ptr & (alignment-1)); size += extra; CHECK_LT(ptr_ + size, kArraySize); if ((ptr & (alignment-1)) != 0) { ptr += alignment - (ptr & (alignment-1)); } ptr_ += size; return reinterpret_cast<void *>(ptr); } void DumpStats() { } private: static const int kArraySize = 8 * 1024 * 1024; char array_[kArraySize]; // We allocate the next chunk from here int ptr_; }; const int ArraySysAllocator::kArraySize; ArraySysAllocator a; static void TestBasicInvoked() { MallocExtension::instance()->SetSystemAllocator(&a); // An allocation size that is likely to trigger the system allocator. // XXX: this is implementation specific. char *p = new char[1024 * 1024]; delete [] p; // Make sure that our allocator was invoked. CHECK(a.invoked_); } #if 0 // could port this to various OSs, but won't bother for now TEST(AddressBits, CpuVirtualBits) { // Check that kAddressBits is as least as large as either the number of bits // in a pointer or as the number of virtual bits handled by the processor. // To be effective this test must be run on each processor model. const int kPointerBits = 8 * sizeof(void*); const int kImplementedVirtualBits = NumImplementedVirtualBits(); CHECK_GE(kAddressBits, std::min(kImplementedVirtualBits, kPointerBits)); } #endif static void TestBasicRetryFailTest() { // Check with the allocator still works after a failed allocation. // // There is no way to call malloc and guarantee it will fail. malloc takes a // size_t parameter and the C++ standard does not constrain the size of // size_t. For example, consider an implementation where size_t is 32 bits // and pointers are 64 bits. // // It is likely, though, that sizeof(size_t) == sizeof(void*). In that case, // the first allocation here might succeed but the second allocation must // fail. // // If the second allocation succeeds, you will have to rewrite or // disable this test. // The weird parens are to avoid macro-expansion of 'max' on windows. const size_t kHugeSize = (std::numeric_limits<size_t>::max)() / 2; void* p1 = malloc(kHugeSize); void* p2 = malloc(kHugeSize); CHECK(p2 == NULL); if (p1 != NULL) free(p1); char* q = new char[1024]; CHECK(q != NULL); delete [] q; } int main(int argc, char** argv) { TestBasicInvoked(); TestBasicRetryFailTest(); printf("PASS\n"); return 0; }
Unknown
3D
mcellteam/mcell
libs/gperftools/src/tests/stacktrace_unittest.cc
.cc
7,845
195
// Copyright (c) 2005, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "config_for_unittests.h" #ifdef HAVE_EXECINFO_H #include <execinfo.h> #endif #include <stdio.h> #include <stdlib.h> #include "base/commandlineflags.h" #include "base/logging.h" #include <gperftools/stacktrace.h> namespace { // Obtain a backtrace, verify that the expected callers are present in the // backtrace, and maybe print the backtrace to stdout. // The sequence of functions whose return addresses we expect to see in the // backtrace. const int BACKTRACE_STEPS = 6; struct AddressRange { const void *start, *end; }; // Expected function [start,end] range. AddressRange expected_range[BACKTRACE_STEPS]; #if __GNUC__ // Using GCC extension: address of a label can be taken with '&&label'. // Start should be a label somewhere before recursive call, end somewhere // after it. #define INIT_ADDRESS_RANGE(fn, start_label, end_label, prange) \ do { \ (prange)->start = &&start_label; \ (prange)->end = &&end_label; \ CHECK_LT((prange)->start, (prange)->end); \ } while (0) // This macro expands into "unmovable" code (opaque to GCC), and that // prevents GCC from moving a_label up or down in the code. // Without it, there is no code following the 'end' label, and GCC // (4.3.1, 4.4.0) thinks it safe to assign &&end an address that is before // the recursive call. #define DECLARE_ADDRESS_LABEL(a_label) \ a_label: do { __asm__ __volatile__(""); } while (0) // Gcc 4.4.0 may split function into multiple chunks, and the chunk // performing recursive call may end up later in the code then the return // instruction (this actually happens with FDO). // Adjust function range from __builtin_return_address. #define ADJUST_ADDRESS_RANGE_FROM_RA(prange) \ do { \ void *ra = __builtin_return_address(0); \ CHECK_LT((prange)->start, ra); \ if (ra > (prange)->end) { \ printf("Adjusting range from %p..%p to %p..%p\n", \ (prange)->start, (prange)->end, \ (prange)->start, ra); \ (prange)->end = ra; \ } \ } while (0) #else // Assume the Check* functions below are not longer than 256 bytes. #define INIT_ADDRESS_RANGE(fn, start_label, end_label, prange) \ do { \ (prange)->start = reinterpret_cast<const void *>(&fn); \ (prange)->end = reinterpret_cast<const char *>(&fn) + 256; \ } while (0) #define DECLARE_ADDRESS_LABEL(a_label) do { } while (0) #define ADJUST_ADDRESS_RANGE_FROM_RA(prange) do { } while (0) #endif // __GNUC__ //-----------------------------------------------------------------------// void CheckRetAddrIsInFunction(void *ret_addr, const AddressRange &range) { CHECK_GE(ret_addr, range.start); CHECK_LE(ret_addr, range.end); } //-----------------------------------------------------------------------// void ATTRIBUTE_NOINLINE CheckStackTrace(int); void ATTRIBUTE_NOINLINE CheckStackTraceLeaf(void) { const int STACK_LEN = 10; void *stack[STACK_LEN]; int size; ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[1]); INIT_ADDRESS_RANGE(CheckStackTraceLeaf, start, end, &expected_range[0]); DECLARE_ADDRESS_LABEL(start); size = GetStackTrace(stack, STACK_LEN, 0); printf("Obtained %d stack frames.\n", size); CHECK_GE(size, 1); CHECK_LE(size, STACK_LEN); #ifdef HAVE_EXECINFO_H { char **strings = backtrace_symbols(stack, size); printf("Obtained %d stack frames.\n", size); for (int i = 0; i < size; i++) printf("%s %p\n", strings[i], stack[i]); printf("CheckStackTrace() addr: %p\n", &CheckStackTrace); free(strings); } #endif for (int i = 0; i < BACKTRACE_STEPS; i++) { printf("Backtrace %d: expected: %p..%p actual: %p ... ", i, expected_range[i].start, expected_range[i].end, stack[i]); fflush(stdout); CheckRetAddrIsInFunction(stack[i], expected_range[i]); printf("OK\n"); } DECLARE_ADDRESS_LABEL(end); } //-----------------------------------------------------------------------// /* Dummy functions to make the backtrace more interesting. */ void ATTRIBUTE_NOINLINE CheckStackTrace4(int i) { ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[2]); INIT_ADDRESS_RANGE(CheckStackTrace4, start, end, &expected_range[1]); DECLARE_ADDRESS_LABEL(start); for (int j = i; j >= 0; j--) CheckStackTraceLeaf(); DECLARE_ADDRESS_LABEL(end); } void ATTRIBUTE_NOINLINE CheckStackTrace3(int i) { ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[3]); INIT_ADDRESS_RANGE(CheckStackTrace3, start, end, &expected_range[2]); DECLARE_ADDRESS_LABEL(start); for (int j = i; j >= 0; j--) CheckStackTrace4(j); DECLARE_ADDRESS_LABEL(end); } void ATTRIBUTE_NOINLINE CheckStackTrace2(int i) { ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[4]); INIT_ADDRESS_RANGE(CheckStackTrace2, start, end, &expected_range[3]); DECLARE_ADDRESS_LABEL(start); for (int j = i; j >= 0; j--) CheckStackTrace3(j); DECLARE_ADDRESS_LABEL(end); } void ATTRIBUTE_NOINLINE CheckStackTrace1(int i) { ADJUST_ADDRESS_RANGE_FROM_RA(&expected_range[5]); INIT_ADDRESS_RANGE(CheckStackTrace1, start, end, &expected_range[4]); DECLARE_ADDRESS_LABEL(start); for (int j = i; j >= 0; j--) CheckStackTrace2(j); DECLARE_ADDRESS_LABEL(end); } void ATTRIBUTE_NOINLINE CheckStackTrace(int i) { INIT_ADDRESS_RANGE(CheckStackTrace, start, end, &expected_range[5]); DECLARE_ADDRESS_LABEL(start); for (int j = i; j >= 0; j--) CheckStackTrace1(j); DECLARE_ADDRESS_LABEL(end); } } // namespace //-----------------------------------------------------------------------// int main(int argc, char ** argv) { CheckStackTrace(0); printf("PASS\n"); return 0; }
Unknown