/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * vim: set ts=8 sts=4 et sw=4 tw=99: * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef gc_Barrier_h #define gc_Barrier_h #include "NamespaceImports.h" #include "gc/Heap.h" #include "gc/StoreBuffer.h" #include "js/HeapAPI.h" #include "js/Id.h" #include "js/RootingAPI.h" #include "js/Value.h" /* * A write barrier is a mechanism used by incremental or generation GCs to * ensure that every value that needs to be marked is marked. In general, the * write barrier should be invoked whenever a write can cause the set of things * traced through by the GC to change. This includes: * - writes to object properties * - writes to array slots * - writes to fields like JSObject::shape_ that we trace through * - writes to fields in private data * - writes to non-markable fields like JSObject::private that point to * markable data * The last category is the trickiest. Even though the private pointers does not * point to a GC thing, changing the private pointer may change the set of * objects that are traced by the GC. Therefore it needs a write barrier. * * Every barriered write should have the following form: * * obj->field = value; // do the actual write * * The pre-barrier is used for incremental GC and the post-barrier is for * generational GC. * * PRE-BARRIER * * To understand the pre-barrier, let's consider how incremental GC works. The * GC itself is divided into "slices". Between each slice, JS code is allowed to * run. Each slice should be short so that the user doesn't notice the * interruptions. In our GC, the structure of the slices is as follows: * * 1. ... JS work, which leads to a request to do GC ... * 2. [first GC slice, which performs all root marking and possibly more marking] * 3. ... more JS work is allowed to run ... * 4. [GC mark slice, which runs entirely in drainMarkStack] * 5. ... more JS work ... * 6. [GC mark slice, which runs entirely in drainMarkStack] * 7. ... more JS work ... * 8. [GC marking finishes; sweeping done non-incrementally; GC is done] * 9. ... JS continues uninterrupted now that GC is finishes ... * * Of course, there may be a different number of slices depending on how much * marking is to be done. * * The danger inherent in this scheme is that the JS code in steps 3, 5, and 7 * might change the heap in a way that causes the GC to collect an object that * is actually reachable. The write barrier prevents this from happening. We use * a variant of incremental GC called "snapshot at the beginning." This approach * guarantees the invariant that if an object is reachable in step 2, then we * will mark it eventually. The name comes from the idea that we take a * theoretical "snapshot" of all reachable objects in step 2; all objects in * that snapshot should eventually be marked. (Note that the write barrier * verifier code takes an actual snapshot.) * * The basic correctness invariant of a snapshot-at-the-beginning collector is * that any object reachable at the end of the GC (step 9) must either: * (1) have been reachable at the beginning (step 2) and thus in the snapshot * (2) or must have been newly allocated, in steps 3, 5, or 7. * To deal with case (2), any objects allocated during an incremental GC are * automatically marked black. * * This strategy is actually somewhat conservative: if an object becomes * unreachable between steps 2 and 8, it would be safe to collect it. We won't, * mainly for simplicity. (Also, note that the snapshot is entirely * theoretical. We don't actually do anything special in step 2 that we wouldn't * do in a non-incremental GC. * * It's the pre-barrier's job to maintain the snapshot invariant. Consider the * write "obj->field = value". Let the prior value of obj->field be * value0. Since it's possible that value0 may have been what obj->field * contained in step 2, when the snapshot was taken, the barrier marks * value0. Note that it only does this if we're in the middle of an incremental * GC. Since this is rare, the cost of the write barrier is usually just an * extra branch. * * In practice, we implement the pre-barrier differently based on the type of * value0. E.g., see JSObject::writeBarrierPre, which is used if obj->field is * a JSObject*. It takes value0 as a parameter. * * POST-BARRIER * * For generational GC, we want to be able to quickly collect the nursery in a * minor collection. Part of the way this is achieved is to only mark the * nursery itself; tenured things, which may form the majority of the heap, are * not traced through or marked. This leads to the problem of what to do about * tenured objects that have pointers into the nursery: if such things are not * marked, they may be discarded while there are still live objects which * reference them. The solution is to maintain information about these pointers, * and mark their targets when we start a minor collection. * * The pointers can be thought of as edges in object graph, and the set of edges * from the tenured generation into the nursery is know as the remembered set. * Post barriers are used to track this remembered set. * * Whenever a slot which could contain such a pointer is written, we use a write * barrier to check if the edge created is in the remembered set, and if so we * insert it into the store buffer, which is the collector's representation of * the remembered set. This means than when we come to do a minor collection we * can examine the contents of the store buffer and mark any edge targets that * are in the nursery. * * IMPLEMENTATION DETAILS * * Since it would be awkward to change every write to memory into a function * call, this file contains a bunch of C++ classes and templates that use * operator overloading to take care of barriers automatically. In many cases, * all that's necessary to make some field be barriered is to replace * Type* field; * with * HeapPtr field; * There are also special classes HeapValue and HeapId, which barrier js::Value * and jsid, respectively. * * One additional note: not all object writes need to be pre-barriered. Writes * to newly allocated objects do not need a pre-barrier. In these cases, we use * the "obj->field.init(value)" method instead of "obj->field = value". We use * the init naming idiom in many places to signify that a field is being * assigned for the first time. * * This file implements four classes, illustrated here: * * BarrieredBase base class of all barriers * | | * | WriteBarrieredBase base class which provides common write operations * | | | | | * | | | | PreBarriered provides pre-barriers only * | | | | * | | | HeapPtr provides pre- and post-barriers * | | | * | | RelocatablePtr provides pre- and post-barriers and is relocatable * | | * | HeapSlot similar to HeapPtr, but tailored to slots storage * | * ReadBarrieredBase base class which provides common read operations * | * ReadBarriered provides read barriers only * * * The implementation of the barrier logic is implemented on T::writeBarrier.*, * via: * * WriteBarrieredBase::pre * -> InternalGCMethods::preBarrier * -> T::writeBarrierPre * -> InternalGCMethods::preBarrier * -> InternalGCMethods::preBarrier * -> InternalGCMethods::preBarrier * -> T::writeBarrierPre * * HeapPtr::post and RelocatablePtr::post * -> InternalGCMethods::postBarrier * -> T::writeBarrierPost * -> InternalGCMethods::postBarrier * -> StoreBuffer::put * * These classes are designed to be used by the internals of the JS engine. * Barriers designed to be used externally are provided in js/RootingAPI.h. * These external barriers call into the same post-barrier implementations at * InternalGCMethods::post via an indirect call to Heap(.+)Barrier. */ class JSAtom; struct JSCompartment; class JSFlatString; class JSLinearString; namespace JS { class Symbol; } // namespace JS namespace js { class AccessorShape; class ArrayObject; class ArgumentsObject; class ArrayBufferObjectMaybeShared; class ArrayBufferObject; class ArrayBufferViewObject; class SharedArrayBufferObject; class BaseShape; class DebugScopeObject; class GlobalObject; class LazyScript; class ModuleEnvironmentObject; class ModuleNamespaceObject; class NativeObject; class NestedScopeObject; class PlainObject; class PropertyName; class SavedFrame; class ScopeObject; class ScriptSourceObject; class Shape; class UnownedBaseShape; class ObjectGroup; namespace jit { class JitCode; } // namespace jit #ifdef DEBUG // Barriers can't be triggered during backend Ion compilation, which may run on // a helper thread. bool CurrentThreadIsIonCompiling(); bool CurrentThreadIsIonCompilingSafeForMinorGC(); bool CurrentThreadIsGCSweeping(); bool CurrentThreadCanSkipPostBarrier(bool inNursery); #endif namespace gc { // Marking.h depends on these barrier definitions, so we need a separate // entry point for marking to implement the pre-barrier. void MarkValueForBarrier(JSTracer* trc, Value* v, const char* name); void MarkIdForBarrier(JSTracer* trc, jsid* idp, const char* name); } // namespace gc template struct InternalGCMethods {}; template struct InternalGCMethods { static bool isMarkable(T* v) { return v != nullptr; } static bool isMarkableTaggedPointer(T* v) { return !IsNullTaggedPointer(v); } static void preBarrier(T* v) { T::writeBarrierPre(v); } static void postBarrier(T** vp, T* prev, T* next) { T::writeBarrierPost(vp, prev, next); } static void readBarrier(T* v) { T::readBarrier(v); } static bool isInsideNursery(T* v) { return IsInsideNursery(v); } }; template struct PreBarrierFunctor : VoidDefaultAdaptor { template void operator()(T* t); }; template struct ReadBarrierFunctor : public VoidDefaultAdaptor { template void operator()(T* t); }; template <> struct InternalGCMethods { static bool isMarkable(Value v) { return v.isMarkable(); } static bool isMarkableTaggedPointer(Value v) { return isMarkable(v); } static void preBarrier(Value v) { DispatchTyped(PreBarrierFunctor(), v); } static void postBarrier(Value* vp, const Value& prev, const Value& next) { MOZ_ASSERT(!CurrentThreadIsIonCompiling()); MOZ_ASSERT(vp); // If the target needs an entry, add it. js::gc::StoreBuffer* sb; if (next.isObject() && (sb = reinterpret_cast(&next.toObject())->storeBuffer())) { // If we know that the prev has already inserted an entry, we can // skip doing the lookup to add the new entry. Note that we cannot // safely assert the presence of the entry because it may have been // added via a different store buffer. if (prev.isObject() && reinterpret_cast(&prev.toObject())->storeBuffer()) return; sb->putValue(vp); return; } // Remove the prev entry if the new value does not need it. if (prev.isObject() && (sb = reinterpret_cast(&prev.toObject())->storeBuffer())) sb->unputValue(vp); } static void readBarrier(const Value& v) { DispatchTyped(ReadBarrierFunctor(), v); } static bool isInsideNursery(const Value& v) { return v.isMarkable() && IsInsideNursery(v.toGCThing()); } }; template <> struct InternalGCMethods { static bool isMarkable(jsid id) { return JSID_IS_STRING(id) || JSID_IS_SYMBOL(id); } static bool isMarkableTaggedPointer(jsid id) { return isMarkable(id); } static void preBarrier(jsid id) { DispatchTyped(PreBarrierFunctor(), id); } static void postBarrier(jsid* idp, jsid prev, jsid next) {} static bool isInsideNursery(jsid id) { return false; } }; // Barrier classes can use Mixins to add methods to a set of barrier // instantiations, to make the barriered thing look and feel more like the // thing itself. template class BarrieredBaseMixins {}; // Base class of all barrier types. // // This is marked non-memmovable since post barriers added by derived classes // can add pointers to class instances to the store buffer. template class MOZ_NON_MEMMOVABLE BarrieredBase : public BarrieredBaseMixins { protected: // BarrieredBase is not directly instantiable. explicit BarrieredBase(T v) : value(v) { #ifdef DEBUG assertTypeConstraints(); #endif } // Storage for all barrier classes. |value| must be a GC thing reference // type: either a direct pointer to a GC thing or a supported tagged // pointer that can reference GC things, such as JS::Value or jsid. Nested // barrier types are NOT supported. See assertTypeConstraints. T value; public: // Note: this is public because C++ cannot friend to a specific template instantiation. // Friending to the generic template leads to a number of unintended consequences, including // template resolution ambiguity and a circular dependency with Tracing.h. T* unsafeUnbarrieredForTracing() { return &value; } private: #ifdef DEBUG // Static type assertions about T must be moved out of line to avoid // circular dependencies between Barrier classes and GC memory definitions. void assertTypeConstraints() const; #endif }; // Base class for barriered pointer types that intercept only writes. template class WriteBarrieredBase : public BarrieredBase { protected: // WriteBarrieredBase is not directly instantiable. explicit WriteBarrieredBase(T v) : BarrieredBase(v) {} public: DECLARE_POINTER_COMPARISON_OPS(T); DECLARE_POINTER_CONSTREF_OPS(T); // Use this if the automatic coercion to T isn't working. const T& get() const { return this->value; } // Use this if you want to change the value without invoking barriers. // Obviously this is dangerous unless you know the barrier is not needed. void unsafeSet(T v) { this->value = v; } // For users who need to manually barrier the raw types. static void writeBarrierPre(const T& v) { InternalGCMethods::preBarrier(v); } protected: void pre() { InternalGCMethods::preBarrier(this->value); } void post(T prev, T next) { InternalGCMethods::postBarrier(&this->value, prev, next); } }; /* * PreBarriered only automatically handles pre-barriers. Post-barriers must * be manually implemented when using this class. HeapPtr and RelocatablePtr * should be used in all cases that do not require explicit low-level control * of moving behavior, e.g. for HashMap keys. */ template class PreBarriered : public WriteBarrieredBase { public: PreBarriered() : WriteBarrieredBase(GCMethods::initial()) {} /* * Allow implicit construction for use in generic contexts, such as DebuggerWeakMap::markKeys. */ MOZ_IMPLICIT PreBarriered(T v) : WriteBarrieredBase(v) {} explicit PreBarriered(const PreBarriered& v) : WriteBarrieredBase(v.value) {} ~PreBarriered() { this->pre(); } void init(T v) { this->value = v; } /* Use to set the pointer to nullptr. */ void clear() { this->pre(); this->value = nullptr; } DECLARE_POINTER_ASSIGN_OPS(PreBarriered, T); private: void set(const T& v) { this->pre(); this->value = v; } }; /* * A pre- and post-barriered heap pointer, for use inside the JS engine. * * It must only be stored in memory that has GC lifetime. HeapPtr must not be * used in contexts where it may be implicitly moved or deleted, e.g. most * containers. * * Not to be confused with JS::Heap. This is a different class from the * external interface and implements substantially different semantics. * * The post-barriers implemented by this class are faster than those * implemented by RelocatablePtr or JS::Heap at the cost of not * automatically handling deletion or movement. */ template class HeapPtr : public WriteBarrieredBase { public: HeapPtr() : WriteBarrieredBase(GCMethods::initial()) {} explicit HeapPtr(T v) : WriteBarrieredBase(v) { this->post(GCMethods::initial(), v); } explicit HeapPtr(const HeapPtr& v) : WriteBarrieredBase(v) { this->post(GCMethods::initial(), v); } #ifdef DEBUG ~HeapPtr() { // No prebarrier necessary as this only happens when we are sweeping or // after we have just collected the nursery. bool inNursery = InternalGCMethods::isInsideNursery(this->value); MOZ_ASSERT(CurrentThreadIsGCSweeping() || CurrentThreadCanSkipPostBarrier(inNursery)); Poison(this, JS_FREED_HEAP_PTR_PATTERN, sizeof(*this)); } #endif void init(T v) { this->value = v; this->post(GCMethods::initial(), v); } DECLARE_POINTER_ASSIGN_OPS(HeapPtr, T); private: void set(const T& v) { this->pre(); T tmp = this->value; this->value = v; this->post(tmp, this->value); } /* * Unlike RelocatablePtr, HeapPtr must be managed with GC lifetimes. * Specifically, the memory used by the pointer itself must be live until * at least the next minor GC. For that reason, move semantics are invalid * and are deleted here. Please note that not all containers support move * semantics, so this does not completely prevent invalid uses. */ HeapPtr(HeapPtr&&) = delete; HeapPtr& operator=(HeapPtr&&) = delete; }; /* * A pre- and post-barriered heap pointer, for use inside the JS engine. * * Unlike HeapPtr, it can be used in memory that is not managed by the GC, * i.e. in C++ containers. It is, however, somewhat slower, so should only be * used in contexts where this ability is necessary. */ template class RelocatablePtr : public WriteBarrieredBase { public: RelocatablePtr() : WriteBarrieredBase(GCMethods::initial()) {} // Implicitly adding barriers is a reasonable default. MOZ_IMPLICIT RelocatablePtr(const T& v) : WriteBarrieredBase(v) { this->post(GCMethods::initial(), this->value); } /* * For RelocatablePtr, move semantics are equivalent to copy semantics. In * C++, a copy constructor taking const-ref is the way to get a single * function that will be used for both lvalue and rvalue copies, so we can * simply omit the rvalue variant. */ MOZ_IMPLICIT RelocatablePtr(const RelocatablePtr& v) : WriteBarrieredBase(v) { this->post(GCMethods::initial(), this->value); } ~RelocatablePtr() { this->pre(); this->post(this->value, GCMethods::initial()); } void init(T v) { this->value = v; this->post(GCMethods::initial(), this->value); } DECLARE_POINTER_ASSIGN_OPS(RelocatablePtr, T); /* Make this friend so it can access pre() and post(). */ template friend inline void BarrieredSetPair(Zone* zone, RelocatablePtr& v1, T1* val1, RelocatablePtr& v2, T2* val2); protected: void set(const T& v) { this->pre(); postBarrieredSet(v); } void postBarrieredSet(const T& v) { T tmp = this->value; this->value = v; this->post(tmp, this->value); } }; // Base class for barriered pointer types that intercept reads and writes. template class ReadBarrieredBase : public BarrieredBase { protected: // ReadBarrieredBase is not directly instantiable. explicit ReadBarrieredBase(T v) : BarrieredBase(v) {} protected: void read() const { InternalGCMethods::readBarrier(this->value); } void post(T prev, T next) { InternalGCMethods::postBarrier(&this->value, prev, next); } }; // Incremental GC requires that weak pointers have read barriers. This is mostly // an issue for empty shapes stored in JSCompartment. The problem happens when, // during an incremental GC, some JS code stores one of the compartment's empty // shapes into an object already marked black. Normally, this would not be a // problem, because the empty shape would have been part of the initial snapshot // when the GC started. However, since this is a weak pointer, it isn't. So we // may collect the empty shape even though a live object points to it. To fix // this, we mark these empty shapes black whenever they get read out. // // Note that this class also has post-barriers, so is safe to use with nursery // pointers. However, when used as a hashtable key, care must still be taken to // insert manual post-barriers on the table for rekeying if the key is based in // any way on the address of the object. template class ReadBarriered : public ReadBarrieredBase { public: ReadBarriered() : ReadBarrieredBase(GCMethods::initial()) {} // It is okay to add barriers implicitly. MOZ_IMPLICIT ReadBarriered(const T& v) : ReadBarrieredBase(v) { this->post(GCMethods::initial(), v); } // Copy is creating a new edge, so we must read barrier the source edge. explicit ReadBarriered(const ReadBarriered& v) : ReadBarrieredBase(v) { this->post(GCMethods::initial(), v.get()); } // Move retains the lifetime status of the source edge, so does not fire // the read barrier of the defunct edge. ReadBarriered(ReadBarriered&& v) : ReadBarrieredBase(mozilla::Forward>(v)) { this->post(GCMethods::initial(), v.value); } ~ReadBarriered() { this->post(this->value, GCMethods::initial()); } ReadBarriered& operator=(const ReadBarriered& v) { T prior = this->value; this->value = v.value; this->post(prior, v.value); return *this; } const T get() const { if (!InternalGCMethods::isMarkable(this->value)) return GCMethods::initial(); this->read(); return this->value; } const T unbarrieredGet() const { return this->value; } explicit operator bool() const { return bool(this->value); } operator const T() const { return get(); } const T operator->() const { return get(); } T* unsafeGet() { return &this->value; } T const* unsafeGet() const { return &this->value; } void set(const T& v) { T tmp = this->value; this->value = v; this->post(tmp, v); } }; // A WeakRef pointer does not hold its target live and is automatically nulled // out when the GC discovers that it is not reachable from any other path. template using WeakRef = ReadBarriered; // Add Value operations to all Barrier types. Note, this must be defined before // HeapSlot for HeapSlot's base to get these operations. template <> class BarrieredBaseMixins : public ValueOperations> {}; // A pre- and post-barriered Value that is specialized to be aware that it // resides in a slots or elements vector. This allows it to be relocated in // memory, but with substantially less overhead than a RelocatablePtr. class HeapSlot : public WriteBarrieredBase { public: enum Kind { Slot = 0, Element = 1 }; explicit HeapSlot() = delete; explicit HeapSlot(NativeObject* obj, Kind kind, uint32_t slot, const Value& v) : WriteBarrieredBase(v) { post(obj, kind, slot, v); } explicit HeapSlot(NativeObject* obj, Kind kind, uint32_t slot, const HeapSlot& s) : WriteBarrieredBase(s.value) { post(obj, kind, slot, s); } ~HeapSlot() { pre(); } void init(NativeObject* owner, Kind kind, uint32_t slot, const Value& v) { value = v; post(owner, kind, slot, v); } #ifdef DEBUG bool preconditionForSet(NativeObject* owner, Kind kind, uint32_t slot); bool preconditionForWriteBarrierPost(NativeObject* obj, Kind kind, uint32_t slot, Value target) const; #endif void set(NativeObject* owner, Kind kind, uint32_t slot, const Value& v) { MOZ_ASSERT(preconditionForSet(owner, kind, slot)); pre(); value = v; post(owner, kind, slot, v); } /* For users who need to manually barrier the raw types. */ static void writeBarrierPost(NativeObject* owner, Kind kind, uint32_t slot, const Value& target) { reinterpret_cast(const_cast(&target))->post(owner, kind, slot, target); } private: void post(NativeObject* owner, Kind kind, uint32_t slot, const Value& target) { MOZ_ASSERT(preconditionForWriteBarrierPost(owner, kind, slot, target)); if (this->value.isObject()) { gc::Cell* cell = reinterpret_cast(&this->value.toObject()); if (cell->storeBuffer()) cell->storeBuffer()->putSlot(owner, kind, slot, 1); } } }; class HeapSlotArray { HeapSlot* array; // Whether writes may be performed to the slots in this array. This helps // to control how object elements which may be copy on write are used. #ifdef DEBUG bool allowWrite_; #endif public: explicit HeapSlotArray(HeapSlot* array, bool allowWrite) : array(array) #ifdef DEBUG , allowWrite_(allowWrite) #endif {} operator const Value*() const { JS_STATIC_ASSERT(sizeof(HeapPtr) == sizeof(Value)); JS_STATIC_ASSERT(sizeof(HeapSlot) == sizeof(Value)); return reinterpret_cast(array); } operator HeapSlot*() const { MOZ_ASSERT(allowWrite()); return array; } HeapSlotArray operator +(int offset) const { return HeapSlotArray(array + offset, allowWrite()); } HeapSlotArray operator +(uint32_t offset) const { return HeapSlotArray(array + offset, allowWrite()); } private: bool allowWrite() const { #ifdef DEBUG return allowWrite_; #else return true; #endif } }; /* * This is a hack for RegExpStatics::updateFromMatch. It allows us to do two * barriers with only one branch to check if we're in an incremental GC. */ template static inline void BarrieredSetPair(Zone* zone, RelocatablePtr& v1, T1* val1, RelocatablePtr& v2, T2* val2) { if (T1::needWriteBarrierPre(zone)) { v1.pre(); v2.pre(); } v1.postBarrieredSet(val1); v2.postBarrieredSet(val2); } /* * ImmutableTenuredPtr is designed for one very narrow case: replacing * immutable raw pointers to GC-managed things, implicitly converting to a * handle type for ease of use. Pointers encapsulated by this type must: * * be immutable (no incremental write barriers), * never point into the nursery (no generational write barriers), and * be traced via MarkRuntime (we use fromMarkedLocation). * * In short: you *really* need to know what you're doing before you use this * class! */ template class ImmutableTenuredPtr { T value; public: operator T() const { return value; } T operator->() const { return value; } operator Handle() const { return Handle::fromMarkedLocation(&value); } void init(T ptr) { MOZ_ASSERT(ptr->isTenured()); value = ptr; } T get() const { return value; } const T* address() { return &value; } }; template struct MovableCellHasher> { using Key = PreBarriered; using Lookup = T; static HashNumber hash(const Lookup& l) { return MovableCellHasher::hash(l); } static bool match(const Key& k, const Lookup& l) { return MovableCellHasher::match(k, l); } static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); } }; template struct MovableCellHasher> { using Key = RelocatablePtr; using Lookup = T; static HashNumber hash(const Lookup& l) { return MovableCellHasher::hash(l); } static bool match(const Key& k, const Lookup& l) { return MovableCellHasher::match(k, l); } static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); } }; template struct MovableCellHasher> { using Key = ReadBarriered; using Lookup = T; static HashNumber hash(const Lookup& l) { return MovableCellHasher::hash(l); } static bool match(const Key& k, const Lookup& l) { return MovableCellHasher::match(k.unbarrieredGet(), l); } static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); } }; /* Useful for hashtables with a HeapPtr as key. */ template struct HeapPtrHasher { typedef HeapPtr Key; typedef T Lookup; static HashNumber hash(Lookup obj) { return DefaultHasher::hash(obj); } static bool match(const Key& k, Lookup l) { return k.get() == l; } static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); } }; /* Specialized hashing policy for HeapPtrs. */ template struct DefaultHasher> : HeapPtrHasher { }; template struct PreBarrieredHasher { typedef PreBarriered Key; typedef T Lookup; static HashNumber hash(Lookup obj) { return DefaultHasher::hash(obj); } static bool match(const Key& k, Lookup l) { return k.get() == l; } static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); } }; template struct DefaultHasher> : PreBarrieredHasher { }; /* Useful for hashtables with a ReadBarriered as key. */ template struct ReadBarrieredHasher { typedef ReadBarriered Key; typedef T Lookup; static HashNumber hash(Lookup obj) { return DefaultHasher::hash(obj); } static bool match(const Key& k, Lookup l) { return k.unbarrieredGet() == l; } static void rekey(Key& k, const Key& newKey) { k.set(newKey.unbarrieredGet()); } }; /* Specialized hashing policy for ReadBarriereds. */ template struct DefaultHasher> : ReadBarrieredHasher { }; class ArrayObject; class ArrayBufferObject; class NestedScopeObject; class DebugScopeObject; class GlobalObject; class ScriptSourceObject; class Shape; class BaseShape; class UnownedBaseShape; namespace jit { class JitCode; } // namespace jit typedef PreBarriered PreBarrieredObject; typedef PreBarriered PreBarrieredScript; typedef PreBarriered PreBarrieredJitCode; typedef PreBarriered PreBarrieredString; typedef PreBarriered PreBarrieredAtom; typedef RelocatablePtr RelocatablePtrObject; typedef RelocatablePtr RelocatablePtrFunction; typedef RelocatablePtr RelocatablePtrPlainObject; typedef RelocatablePtr RelocatablePtrScript; typedef RelocatablePtr RelocatablePtrNativeObject; typedef RelocatablePtr RelocatablePtrNestedScopeObject; typedef RelocatablePtr RelocatablePtrShape; typedef RelocatablePtr RelocatablePtrObjectGroup; typedef RelocatablePtr RelocatablePtrJitCode; typedef RelocatablePtr RelocatablePtrLinearString; typedef RelocatablePtr RelocatablePtrString; typedef RelocatablePtr RelocatablePtrAtom; typedef RelocatablePtr RelocatablePtrArrayBufferObjectMaybeShared; typedef HeapPtr HeapPtrNativeObject; typedef HeapPtr HeapPtrArrayObject; typedef HeapPtr HeapPtrArrayBufferObjectMaybeShared; typedef HeapPtr HeapPtrArrayBufferObject; typedef HeapPtr HeapPtrBaseShape; typedef HeapPtr HeapPtrAtom; typedef HeapPtr HeapPtrFlatString; typedef HeapPtr HeapPtrFunction; typedef HeapPtr HeapPtrLinearString; typedef HeapPtr HeapPtrObject; typedef HeapPtr HeapPtrScript; typedef HeapPtr HeapPtrString; typedef HeapPtr HeapPtrModuleEnvironmentObject; typedef HeapPtr HeapPtrModuleNamespaceObject; typedef HeapPtr HeapPtrPlainObject; typedef HeapPtr HeapPtrPropertyName; typedef HeapPtr HeapPtrShape; typedef HeapPtr HeapPtrUnownedBaseShape; typedef HeapPtr HeapPtrJitCode; typedef HeapPtr HeapPtrObjectGroup; typedef PreBarriered PreBarrieredValue; typedef RelocatablePtr RelocatableValue; typedef HeapPtr HeapValue; typedef PreBarriered PreBarrieredId; typedef RelocatablePtr RelocatableId; typedef HeapPtr HeapId; typedef ImmutableTenuredPtr ImmutablePropertyNamePtr; typedef ImmutableTenuredPtr ImmutableSymbolPtr; typedef ReadBarriered ReadBarrieredDebugScopeObject; typedef ReadBarriered ReadBarrieredGlobalObject; typedef ReadBarriered ReadBarrieredObject; typedef ReadBarriered ReadBarrieredScript; typedef ReadBarriered ReadBarrieredScriptSourceObject; typedef ReadBarriered ReadBarrieredShape; typedef ReadBarriered ReadBarrieredJitCode; typedef ReadBarriered ReadBarrieredObjectGroup; typedef ReadBarriered ReadBarrieredSymbol; typedef ReadBarriered ReadBarrieredValue; } /* namespace js */ #endif /* gc_Barrier_h */