aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBo Liu <boliu@google.com>2014-07-15 13:59:00 -0700
committerBo Liu <boliu@google.com>2014-07-15 13:59:00 -0700
commit39a71c99d271fe9991f3fbca7c559eb4ca5a8239 (patch)
tree4df3130b69446b1884e9f6ec4bb81b8be8f889a8
parent70aa50bc4b052ffa5637e27e5d77126e3059fd06 (diff)
parent0ce8f0b69763279bcd71f5766eeab4b1cf1eb72c (diff)
downloadv8-39a71c99d271fe9991f3fbca7c559eb4ca5a8239.tar.gz
Merge v8 from https://chromium.googlesource.com/a/external/v8.git at 0ce8f0b69763279bcd71f5766eeab4b1cf1eb72c
This commit was generated by merge_from_chromium.py. Change-Id: I6e59aeb166539603e2eb5cdab4084bc7fd1b8c74
-rw-r--r--src/codegen.cc2
-rw-r--r--src/compiler.cc6
-rw-r--r--src/global-handles.cc18
-rw-r--r--src/global-handles.h6
-rw-r--r--src/heap.cc67
-rw-r--r--src/heap.h19
-rw-r--r--src/hydrogen.cc6
-rw-r--r--src/objects.h2
-rw-r--r--src/version.cc2
-rw-r--r--test/cctest/cctest.status3
-rw-r--r--test/mjsunit/debug-compile-optimized.js18
-rw-r--r--test/mjsunit/regress/regress-386034.js19
12 files changed, 116 insertions, 52 deletions
diff --git a/src/codegen.cc b/src/codegen.cc
index 753d522d6..c039e40c9 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -151,7 +151,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
isolate->factory()->NewCode(desc, flags, masm->CodeObject(),
false, is_crankshafted,
info->prologue_offset(),
- info->is_debug());
+ info->is_debug() && !is_crankshafted);
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
isolate->heap()->IncrementCodeGeneratedBytes(is_crankshafted,
diff --git a/src/compiler.cc b/src/compiler.cc
index 42fcc7840..0d3f146ab 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -297,8 +297,10 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
// generated code for this from the shared function object.
if (FLAG_always_full_compiler) return AbortOptimization();
- // Do not use crankshaft if compiling for debugging.
- if (info()->is_debug()) return AbortOptimization(kDebuggerIsActive);
+ // Do not use crankshaft if we need to be able to set break points.
+ if (isolate()->DebuggerHasBreakPoints()) {
+ return AbortOptimization(kDebuggerHasBreakPoints);
+ }
// Limit the number of times we re-compile a functions with
// the optimizing compiler.
diff --git a/src/global-handles.cc b/src/global-handles.cc
index ba19fe167..a5ae2d562 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -611,21 +611,21 @@ bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
}
-bool GlobalHandles::PostGarbageCollectionProcessing(
+int GlobalHandles::PostGarbageCollectionProcessing(
GarbageCollector collector, GCTracer* tracer) {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count_;
- bool next_gc_likely_to_collect_more = false;
+ int freed_nodes = 0;
if (collector == SCAVENGER) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
if (!node->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute
- // the next_gc_likely_to_collect_more.
+ // the freed_nodes.
continue;
}
// Skip dependent handles. Their weak callbacks might expect to be
@@ -641,29 +641,29 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
// PostGarbageCollection processing. The current node might
// have been deleted in that round, so we need to bail out (or
// restart the processing).
- return next_gc_likely_to_collect_more;
+ return freed_nodes;
}
}
if (!node->IsRetainer()) {
- next_gc_likely_to_collect_more = true;
+ freed_nodes++;
}
}
} else {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (!it.node()->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute
- // the next_gc_likely_to_collect_more.
+ // the freed_nodes.
continue;
}
it.node()->clear_partially_dependent();
if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above.
- return next_gc_likely_to_collect_more;
+ return freed_nodes;
}
}
if (!it.node()->IsRetainer()) {
- next_gc_likely_to_collect_more = true;
+ freed_nodes++;
}
}
}
@@ -686,7 +686,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
}
}
new_space_nodes_.Rewind(last);
- return next_gc_likely_to_collect_more;
+ return freed_nodes;
}
diff --git a/src/global-handles.h b/src/global-handles.h
index a8631f07a..2f5afc934 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -155,9 +155,9 @@ class GlobalHandles {
static bool IsWeak(Object** location);
// Process pending weak handles.
- // Returns true if next major GC is likely to collect more garbage.
- bool PostGarbageCollectionProcessing(GarbageCollector collector,
- GCTracer* tracer);
+ // Returns the number of freed nodes.
+ int PostGarbageCollectionProcessing(GarbageCollector collector,
+ GCTracer* tracer);
// Iterates over all strong handles.
void IterateStrongRoots(ObjectVisitor* v);
diff --git a/src/heap.cc b/src/heap.cc
index 7260e7a27..513757085 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -60,7 +60,6 @@ Heap::Heap()
// Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size.
maximum_committed_(0),
- old_space_growing_factor_(4),
survived_since_last_expansion_(0),
sweep_generation_(0),
always_allocate_scope_depth_(0),
@@ -87,7 +86,6 @@ Heap::Heap()
allocation_timeout_(0),
#endif // DEBUG
old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
- size_of_old_gen_at_last_old_space_gc_(0),
old_gen_exhausted_(false),
inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
@@ -1053,7 +1051,7 @@ bool Heap::PerformGarbageCollection(
GarbageCollector collector,
GCTracer* tracer,
const v8::GCCallbackFlags gc_callback_flags) {
- bool next_gc_likely_to_collect_more = false;
+ int freed_global_handles = 0;
if (collector != SCAVENGER) {
PROFILE(isolate_, CodeMovingGCEvent());
@@ -1093,12 +1091,11 @@ bool Heap::PerformGarbageCollection(
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
sweep_generation_++;
-
- size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
-
+ // Temporarily set the limit for case when PostGarbageCollectionProcessing
+ // allocates and triggers GC. The real limit is set at after
+ // PostGarbageCollectionProcessing.
old_generation_allocation_limit_ =
- OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
-
+ OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
old_gen_exhausted_ = false;
} else {
tracer_ = tracer;
@@ -1117,7 +1114,7 @@ bool Heap::PerformGarbageCollection(
gc_post_processing_depth_++;
{ AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- next_gc_likely_to_collect_more =
+ freed_global_handles =
isolate_->global_handles()->PostGarbageCollectionProcessing(
collector, tracer);
}
@@ -1132,6 +1129,9 @@ bool Heap::PerformGarbageCollection(
// Register the amount of external allocated memory.
amount_of_external_allocated_memory_at_last_global_gc_ =
amount_of_external_allocated_memory_;
+ old_generation_allocation_limit_ =
+ OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
+ freed_global_handles);
}
{ GCCallbacksScope scope(this);
@@ -1150,7 +1150,7 @@ bool Heap::PerformGarbageCollection(
}
#endif
- return next_gc_likely_to_collect_more;
+ return freed_global_handles > 0;
}
@@ -4965,12 +4965,6 @@ bool Heap::ConfigureHeap(int max_semi_space_size,
code_range_size_ = code_range_size * MB;
- // We set the old generation growing factor to 2 to grow the heap slower on
- // memory-constrained devices.
- if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
- old_space_growing_factor_ = 2;
- }
-
configured_ = true;
return true;
}
@@ -5039,6 +5033,47 @@ int64_t Heap::PromotedExternalMemorySize() {
}
+intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
+ int freed_global_handles) {
+ const int kMaxHandles = 1000;
+ const int kMinHandles = 100;
+ double min_factor = 1.1;
+ double max_factor = 4;
+ // We set the old generation growing factor to 2 to grow the heap slower on
+ // memory-constrained devices.
+ if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
+ max_factor = 2;
+ }
+ // If there are many freed global handles, then the next full GC will
+ // likely collect a lot of garbage. Choose the heap growing factor
+ // depending on freed global handles.
+ // TODO(ulan, hpayer): Take into account mutator utilization.
+ double factor;
+ if (freed_global_handles <= kMinHandles) {
+ factor = max_factor;
+ } else if (freed_global_handles >= kMaxHandles) {
+ factor = min_factor;
+ } else {
+ // Compute factor using linear interpolation between points
+ // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
+ factor = max_factor -
+ (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
+ (kMaxHandles - kMinHandles);
+ }
+
+ if (FLAG_stress_compaction ||
+ mark_compact_collector()->reduce_memory_footprint_) {
+ factor = min_factor;
+ }
+
+ intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
+ limit = Max(limit, kMinimumOldGenerationAllocationLimit);
+ limit += new_space_.Capacity();
+ intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+ return Min(limit, halfway_to_the_max);
+}
+
+
void Heap::EnableInlineAllocation() {
if (!inline_allocation_disabled_) return;
inline_allocation_disabled_ = false;
diff --git a/src/heap.h b/src/heap.h
index 97de93eab..b5f42b431 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1085,15 +1085,8 @@ class Heap {
static const int kMaxExecutableSizeHugeMemoryDevice =
700 * kPointerMultiplier;
- intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size) {
- intptr_t limit = FLAG_stress_compaction
- ? old_gen_size + old_gen_size / 10
- : old_gen_size * old_space_growing_factor_;
- limit = Max(limit, kMinimumOldGenerationAllocationLimit);
- limit += new_space_.Capacity();
- intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
- return Min(limit, halfway_to_the_max);
- }
+ intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
+ int freed_global_handles);
// Indicates whether inline bump-pointer allocation has been disabled.
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
@@ -1513,11 +1506,6 @@ class Heap {
intptr_t max_executable_size_;
intptr_t maximum_committed_;
- // The old space growing factor is used in the old space heap growing
- // strategy. The new old space size is the current old space size times
- // old_space_growing_factor_.
- int old_space_growing_factor_;
-
// For keeping track of how much data has survived
// scavenge since last new space expansion.
int survived_since_last_expansion_;
@@ -1586,9 +1574,6 @@ class Heap {
// generation and on every allocation in large object space.
intptr_t old_generation_allocation_limit_;
- // Used to adjust the limits that control the timing of the next GC.
- intptr_t size_of_old_gen_at_last_old_space_gc_;
-
// Indicates that an allocation has failed in the old generation since the
// last GC.
bool old_gen_exhausted_;
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 8be2c6717..6e5ea741b 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -8563,10 +8563,12 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
HValue* function = Pop(); // f
Drop(1); // apply
+ HValue* checked_function = AddCheckMap(function, function_map);
+
if (function_state()->outer() == NULL) {
HInstruction* elements = Add<HArgumentsElements>(false);
HInstruction* length = Add<HArgumentsLength>(elements);
- HValue* wrapped_receiver = BuildWrapReceiver(receiver, function);
+ HValue* wrapped_receiver = BuildWrapReceiver(receiver, checked_function);
HInstruction* result = New<HApplyArguments>(function,
wrapped_receiver,
length,
@@ -8582,7 +8584,7 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
const ZoneList<HValue*>* arguments_values = args->arguments_values();
int arguments_count = arguments_values->length();
Push(function);
- Push(BuildWrapReceiver(receiver, function));
+ Push(BuildWrapReceiver(receiver, checked_function));
for (int i = 1; i < arguments_count; i++) {
Push(arguments_values->at(i));
}
diff --git a/src/objects.h b/src/objects.h
index 46661b681..73566d885 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1036,7 +1036,7 @@ template <class C> inline bool Is(Object* obj);
V(kCopyBuffersOverlap, "Copy buffers overlap") \
V(kCouldNotGenerateZero, "Could not generate +0.0") \
V(kCouldNotGenerateNegativeZero, "Could not generate -0.0") \
- V(kDebuggerIsActive, "Debugger is active") \
+ V(kDebuggerHasBreakPoints, "Debugger has break points") \
V(kDebuggerStatement, "DebuggerStatement") \
V(kDeclarationInCatchContext, "Declaration in catch context") \
V(kDeclarationInWithContext, "Declaration in with context") \
diff --git a/src/version.cc b/src/version.cc
index cc52a0df7..1f2191728 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -35,7 +35,7 @@
#define MAJOR_VERSION 3
#define MINOR_VERSION 27
#define BUILD_NUMBER 34
-#define PATCH_LEVEL 3
+#define PATCH_LEVEL 6
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index f4fadb320..3dd25abd1 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -75,6 +75,9 @@
# BUG(3287). (test-cpu-profiler/SampleWhenFrameIsNotSetup)
'test-cpu-profiler/*': [PASS, FLAKY],
+ # BUG(crbug/386492). This will be fixed by r22029.
+ 'test-debug/ThreadedDebugging': [PASS, FAIL],
+
############################################################################
# Slow tests.
'test-api/Threading1': [PASS, ['mode == debug', SLOW]],
diff --git a/test/mjsunit/debug-compile-optimized.js b/test/mjsunit/debug-compile-optimized.js
new file mode 100644
index 000000000..468605aba
--- /dev/null
+++ b/test/mjsunit/debug-compile-optimized.js
@@ -0,0 +1,18 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax --crankshaft
+
+Debug = debug.Debug;
+
+Debug.setListener(function() {});
+
+function f() {}
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
+assertOptimized(f);
+
+Debug.setListener(null);
diff --git a/test/mjsunit/regress/regress-386034.js b/test/mjsunit/regress/regress-386034.js
new file mode 100644
index 000000000..d770ce91b
--- /dev/null
+++ b/test/mjsunit/regress/regress-386034.js
@@ -0,0 +1,19 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(x) {
+ var v = x;
+ for (i = 0; i < 1; i++) {
+ v.apply(this, arguments);
+ }
+}
+
+function g() {}
+
+f(g);
+f(g);
+%OptimizeFunctionOnNextCall(f);
+assertThrows(function() { f('----'); }, TypeError);