1/*
2 * Copyright (C) 2012-2018 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#pragma once
27
28#include "AllocationFailureMode.h"
29#include "BlockDirectoryBits.h"
30#include "CellAttributes.h"
31#include "FreeList.h"
32#include "LocalAllocator.h"
33#include "MarkedBlock.h"
34#include <wtf/DataLog.h>
35#include <wtf/FastBitVector.h>
36#include <wtf/MonotonicTime.h>
37#include <wtf/SharedTask.h>
38#include <wtf/Vector.h>
39
40namespace WTF {
41class SimpleStats;
42}
43
44namespace JSC {
45
46class GCDeferralContext;
47class Heap;
48class IsoCellSet;
49class MarkedSpace;
50class LLIntOffsetsExtractor;
51
52DECLARE_ALLOCATOR_WITH_HEAP_IDENTIFIER(BlockDirectory);
53
54class BlockDirectory {
55 WTF_MAKE_NONCOPYABLE(BlockDirectory);
56 WTF_MAKE_FAST_ALLOCATED_WITH_HEAP_IDENTIFIER(BlockDirectory);
57
58 friend class LLIntOffsetsExtractor;
59
60public:
61 BlockDirectory(size_t cellSize);
62 ~BlockDirectory();
63 void setSubspace(Subspace*);
64 void lastChanceToFinalize();
65 void prepareForAllocation();
66 void stopAllocating();
67 void stopAllocatingForGood();
68 void resumeAllocating();
69 void beginMarkingForFullCollection();
70 void endMarking();
71 void snapshotUnsweptForEdenCollection();
72 void snapshotUnsweptForFullCollection();
73 void sweep();
74 void shrink();
75 void assertNoUnswept();
76 size_t cellSize() const { return m_cellSize; }
77 const CellAttributes& attributes() const { return m_attributes; }
78 bool needsDestruction() const { return m_attributes.destruction == NeedsDestruction; }
79 DestructionMode destruction() const { return m_attributes.destruction; }
80 HeapCell::Kind cellKind() const { return m_attributes.cellKind; }
81
82 bool isFreeListedCell(const void* target);
83
84 template<typename Functor> void forEachBlock(const Functor&);
85 template<typename Functor> void forEachNotEmptyBlock(const Functor&);
86
87 RefPtr<SharedTask<MarkedBlock::Handle*()>> parallelNotEmptyBlockSource();
88
89 void addBlock(MarkedBlock::Handle*);
90 enum class WillDeleteBlock { No, Yes };
91 // If WillDeleteBlock::Yes is passed then the block will be left in an invalid state. We do this, however, to avoid potentially paging in / decompressing old blocks to update their handle just before freeing them.
92 void removeBlock(MarkedBlock::Handle*, WillDeleteBlock = WillDeleteBlock::No);
93
94 void updatePercentageOfPagedOutPages(WTF::SimpleStats&);
95
96 Lock& bitvectorLock() WTF_RETURNS_LOCK(m_bitvectorLock) { return m_bitvectorLock; }
97
98#define BLOCK_DIRECTORY_BIT_ACCESSORS(lowerBitName, capitalBitName) \
99 bool is ## capitalBitName(const AbstractLocker&, size_t index) const { return m_bits.is ## capitalBitName(index); } \
100 bool is ## capitalBitName(const AbstractLocker& locker, MarkedBlock::Handle* block) const { return is ## capitalBitName(locker, block->index()); } \
101 void setIs ## capitalBitName(const AbstractLocker&, size_t index, bool value) { m_bits.setIs ## capitalBitName(index, value); } \
102 void setIs ## capitalBitName(const AbstractLocker& locker, MarkedBlock::Handle* block, bool value) { setIs ## capitalBitName(locker, block->index(), value); }
103 FOR_EACH_BLOCK_DIRECTORY_BIT(BLOCK_DIRECTORY_BIT_ACCESSORS)
104#undef BLOCK_DIRECTORY_BIT_ACCESSORS
105
106 template<typename Func>
107 void forEachBitVector(const AbstractLocker&, const Func& func)
108 {
109#define BLOCK_DIRECTORY_BIT_CALLBACK(lowerBitName, capitalBitName) \
110 func(m_bits.lowerBitName());
111 FOR_EACH_BLOCK_DIRECTORY_BIT(BLOCK_DIRECTORY_BIT_CALLBACK);
112#undef BLOCK_DIRECTORY_BIT_CALLBACK
113 }
114
115 template<typename Func>
116 void forEachBitVectorWithName(const AbstractLocker&, const Func& func)
117 {
118#define BLOCK_DIRECTORY_BIT_CALLBACK(lowerBitName, capitalBitName) \
119 func(m_bits.lowerBitName(), #capitalBitName);
120 FOR_EACH_BLOCK_DIRECTORY_BIT(BLOCK_DIRECTORY_BIT_CALLBACK);
121#undef BLOCK_DIRECTORY_BIT_CALLBACK
122 }
123
124 BlockDirectory* nextDirectory() const { return m_nextDirectory; }
125 BlockDirectory* nextDirectoryInSubspace() const { return m_nextDirectoryInSubspace; }
126 BlockDirectory* nextDirectoryInAlignedMemoryAllocator() const { return m_nextDirectoryInAlignedMemoryAllocator; }
127
128 void setNextDirectory(BlockDirectory* directory) { m_nextDirectory = directory; }
129 void setNextDirectoryInSubspace(BlockDirectory* directory) { m_nextDirectoryInSubspace = directory; }
130 void setNextDirectoryInAlignedMemoryAllocator(BlockDirectory* directory) { m_nextDirectoryInAlignedMemoryAllocator = directory; }
131
132 MarkedBlock::Handle* findEmptyBlockToSteal();
133
134 MarkedBlock::Handle* findBlockToSweep();
135
136 Subspace* subspace() const { return m_subspace; }
137 MarkedSpace& markedSpace() const;
138
139 void dump(PrintStream&) const;
140 void dumpBits(PrintStream& = WTF::dataFile());
141
142private:
143 friend class IsoCellSet;
144 friend class LocalAllocator;
145 friend class LocalSideAllocator;
146 friend class MarkedBlock;
147
148 MarkedBlock::Handle* findBlockForAllocation(LocalAllocator&);
149
150 MarkedBlock::Handle* tryAllocateBlock(Heap&);
151
152 Vector<MarkedBlock::Handle*> m_blocks;
153 Vector<unsigned> m_freeBlockIndices;
154
155 // Mutator uses this to guard resizing the bitvectors. Those things in the GC that may run
156 // concurrently to the mutator must lock this when accessing the bitvectors.
157 BlockDirectoryBits m_bits;
158 Lock m_bitvectorLock;
159 Lock m_localAllocatorsLock;
160 CellAttributes m_attributes;
161
162 unsigned m_cellSize;
163
164 // After you do something to a block based on one of these cursors, you clear the bit in the
165 // corresponding bitvector and leave the cursor where it was. We can use unsigned instead of size_t since
166 // this number is bound by capacity of Vector m_blocks, which must be within unsigned.
167 unsigned m_emptyCursor { 0 };
168 unsigned m_unsweptCursor { 0 }; // Points to the next block that is a candidate for incremental sweeping.
169
170 // FIXME: All of these should probably be references.
171 // https://bugs.webkit.org/show_bug.cgi?id=166988
172 Subspace* m_subspace { nullptr };
173 BlockDirectory* m_nextDirectory { nullptr };
174 BlockDirectory* m_nextDirectoryInSubspace { nullptr };
175 BlockDirectory* m_nextDirectoryInAlignedMemoryAllocator { nullptr };
176
177 SentinelLinkedList<LocalAllocator, BasicRawSentinelNode<LocalAllocator>> m_localAllocators;
178};
179
180} // namespace JSC
181