deps: upgrade v8 to 4.5.92

This commit includes two fix-ups to src/node_contextify.cc and
lib/module.js to force-load the debugger when necessary.

PR-URL: https://github.com/nodejs/io.js/pull/2091
Reviewed-By: Trevor Norris <trev.norris@gmail.com>
This commit is contained in:
Ben Noordhuis 2015-07-02 16:32:19 +02:00 committed by Ali Ijaz Sheikh
parent a5745aa151
commit 41e63fb088
1238 changed files with 81934 additions and 41207 deletions

2
deps/v8/.gitignore vendored
View File

@ -60,6 +60,8 @@ shell_g
/test/promises-aplus/promises-tests /test/promises-aplus/promises-tests
/test/promises-aplus/promises-tests.tar.gz /test/promises-aplus/promises-tests.tar.gz
/test/promises-aplus/sinon /test/promises-aplus/sinon
/test/simdjs/ecmascript_simd*
/test/simdjs/data*
/test/test262/data /test/test262/data
/test/test262/data.old /test/test262/data.old
/test/test262/tc39-test262-* /test/test262/tc39-test262-*

69
deps/v8/BUILD.gn vendored
View File

@ -52,7 +52,7 @@ config("internal_config") {
include_dirs = [ "." ] include_dirs = [ "." ]
if (component_mode == "shared_library") { if (is_component_build) {
defines = [ defines = [
"V8_SHARED", "V8_SHARED",
"BUILDING_V8_SHARED", "BUILDING_V8_SHARED",
@ -204,6 +204,7 @@ action("js2c") {
"src/macros.py", "src/macros.py",
"src/messages.h", "src/messages.h",
"src/runtime.js", "src/runtime.js",
"src/prologue.js",
"src/v8natives.js", "src/v8natives.js",
"src/symbol.js", "src/symbol.js",
"src/array.js", "src/array.js",
@ -215,6 +216,7 @@ action("js2c") {
"src/regexp.js", "src/regexp.js",
"src/arraybuffer.js", "src/arraybuffer.js",
"src/typedarray.js", "src/typedarray.js",
"src/iterator-prototype.js",
"src/generator.js", "src/generator.js",
"src/object-observe.js", "src/object-observe.js",
"src/collection.js", "src/collection.js",
@ -267,6 +269,7 @@ action("js2c_experimental") {
"src/messages.h", "src/messages.h",
"src/proxy.js", "src/proxy.js",
"src/generator.js", "src/generator.js",
"src/harmony-atomics.js",
"src/harmony-array.js", "src/harmony-array.js",
"src/harmony-array-includes.js", "src/harmony-array-includes.js",
"src/harmony-typedarray.js", "src/harmony-typedarray.js",
@ -274,7 +277,8 @@ action("js2c_experimental") {
"src/harmony-regexp.js", "src/harmony-regexp.js",
"src/harmony-reflect.js", "src/harmony-reflect.js",
"src/harmony-spread.js", "src/harmony-spread.js",
"src/harmony-object.js" "src/harmony-object.js",
"src/harmony-sharedarraybuffer.js"
] ]
outputs = [ outputs = [
@ -474,9 +478,13 @@ source_set("v8_snapshot") {
":js2c", ":js2c",
":js2c_experimental", ":js2c_experimental",
":js2c_extras", ":js2c_extras",
":run_mksnapshot",
":v8_base", ":v8_base",
] ]
public_deps = [
# This should be public so downstream targets can declare the snapshot
# output file as their inputs.
":run_mksnapshot",
]
sources = [ sources = [
"$target_gen_dir/libraries.cc", "$target_gen_dir/libraries.cc",
@ -502,9 +510,11 @@ if (v8_use_external_startup_data) {
":js2c", ":js2c",
":js2c_experimental", ":js2c_experimental",
":js2c_extras", ":js2c_extras",
":run_mksnapshot",
":v8_base", ":v8_base",
]
public_deps = [
":natives_blob", ":natives_blob",
":run_mksnapshot",
] ]
sources = [ sources = [
@ -526,6 +536,14 @@ source_set("v8_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [ sources = [
"include/v8-debug.h",
"include/v8-platform.h",
"include/v8-profiler.h",
"include/v8-testing.h",
"include/v8-util.h",
"include/v8-version.h",
"include/v8.h",
"include/v8config.h",
"src/accessors.cc", "src/accessors.cc",
"src/accessors.h", "src/accessors.h",
"src/allocation.cc", "src/allocation.cc",
@ -544,6 +562,8 @@ source_set("v8_base") {
"src/assembler.h", "src/assembler.h",
"src/assert-scope.h", "src/assert-scope.h",
"src/assert-scope.cc", "src/assert-scope.cc",
"src/ast-literal-reindexer.cc",
"src/ast-literal-reindexer.h",
"src/ast-numbering.cc", "src/ast-numbering.cc",
"src/ast-numbering.h", "src/ast-numbering.h",
"src/ast-value-factory.cc", "src/ast-value-factory.cc",
@ -602,6 +622,8 @@ source_set("v8_base") {
"src/compiler/basic-block-instrumentor.h", "src/compiler/basic-block-instrumentor.h",
"src/compiler/change-lowering.cc", "src/compiler/change-lowering.cc",
"src/compiler/change-lowering.h", "src/compiler/change-lowering.h",
"src/compiler/coalesced-live-ranges.cc",
"src/compiler/coalesced-live-ranges.h",
"src/compiler/code-generator-impl.h", "src/compiler/code-generator-impl.h",
"src/compiler/code-generator.cc", "src/compiler/code-generator.cc",
"src/compiler/code-generator.h", "src/compiler/code-generator.h",
@ -617,8 +639,8 @@ source_set("v8_base") {
"src/compiler/control-equivalence.h", "src/compiler/control-equivalence.h",
"src/compiler/control-flow-optimizer.cc", "src/compiler/control-flow-optimizer.cc",
"src/compiler/control-flow-optimizer.h", "src/compiler/control-flow-optimizer.h",
"src/compiler/control-reducer.cc", "src/compiler/dead-code-elimination.cc",
"src/compiler/control-reducer.h", "src/compiler/dead-code-elimination.h",
"src/compiler/diamond.h", "src/compiler/diamond.h",
"src/compiler/frame.h", "src/compiler/frame.h",
"src/compiler/frame-elider.cc", "src/compiler/frame-elider.cc",
@ -632,10 +654,14 @@ source_set("v8_base") {
"src/compiler/graph-reducer.h", "src/compiler/graph-reducer.h",
"src/compiler/graph-replay.cc", "src/compiler/graph-replay.cc",
"src/compiler/graph-replay.h", "src/compiler/graph-replay.h",
"src/compiler/graph-trimmer.cc",
"src/compiler/graph-trimmer.h",
"src/compiler/graph-visualizer.cc", "src/compiler/graph-visualizer.cc",
"src/compiler/graph-visualizer.h", "src/compiler/graph-visualizer.h",
"src/compiler/graph.cc", "src/compiler/graph.cc",
"src/compiler/graph.h", "src/compiler/graph.h",
"src/compiler/greedy-allocator.cc",
"src/compiler/greedy-allocator.h",
"src/compiler/instruction-codes.h", "src/compiler/instruction-codes.h",
"src/compiler/instruction-selector-impl.h", "src/compiler/instruction-selector-impl.h",
"src/compiler/instruction-selector.cc", "src/compiler/instruction-selector.cc",
@ -703,8 +729,6 @@ source_set("v8_base") {
"src/compiler/pipeline.h", "src/compiler/pipeline.h",
"src/compiler/pipeline-statistics.cc", "src/compiler/pipeline-statistics.cc",
"src/compiler/pipeline-statistics.h", "src/compiler/pipeline-statistics.h",
"src/compiler/raw-machine-assembler.cc",
"src/compiler/raw-machine-assembler.h",
"src/compiler/register-allocator.cc", "src/compiler/register-allocator.cc",
"src/compiler/register-allocator.h", "src/compiler/register-allocator.h",
"src/compiler/register-allocator-verifier.cc", "src/compiler/register-allocator-verifier.cc",
@ -774,6 +798,7 @@ source_set("v8_base") {
"src/elements.h", "src/elements.h",
"src/execution.cc", "src/execution.cc",
"src/execution.h", "src/execution.h",
"src/expression-classifier.h",
"src/extensions/externalize-string-extension.cc", "src/extensions/externalize-string-extension.cc",
"src/extensions/externalize-string-extension.h", "src/extensions/externalize-string-extension.h",
"src/extensions/free-buffer-extension.cc", "src/extensions/free-buffer-extension.cc",
@ -958,12 +983,11 @@ source_set("v8_base") {
"src/optimizing-compile-dispatcher.h", "src/optimizing-compile-dispatcher.h",
"src/ostreams.cc", "src/ostreams.cc",
"src/ostreams.h", "src/ostreams.h",
"src/pattern-rewriter.cc",
"src/parser.cc", "src/parser.cc",
"src/parser.h", "src/parser.h",
"src/pending-compilation-error-handler.cc", "src/pending-compilation-error-handler.cc",
"src/pending-compilation-error-handler.h", "src/pending-compilation-error-handler.h",
"src/perf-jit.cc",
"src/perf-jit.h",
"src/preparse-data-format.h", "src/preparse-data-format.h",
"src/preparse-data.cc", "src/preparse-data.cc",
"src/preparse-data.h", "src/preparse-data.h",
@ -992,11 +1016,13 @@ source_set("v8_base") {
"src/runtime-profiler.cc", "src/runtime-profiler.cc",
"src/runtime-profiler.h", "src/runtime-profiler.h",
"src/runtime/runtime-array.cc", "src/runtime/runtime-array.cc",
"src/runtime/runtime-atomics.cc",
"src/runtime/runtime-classes.cc", "src/runtime/runtime-classes.cc",
"src/runtime/runtime-collections.cc", "src/runtime/runtime-collections.cc",
"src/runtime/runtime-compiler.cc", "src/runtime/runtime-compiler.cc",
"src/runtime/runtime-date.cc", "src/runtime/runtime-date.cc",
"src/runtime/runtime-debug.cc", "src/runtime/runtime-debug.cc",
"src/runtime/runtime-forin.cc",
"src/runtime/runtime-function.cc", "src/runtime/runtime-function.cc",
"src/runtime/runtime-generator.cc", "src/runtime/runtime-generator.cc",
"src/runtime/runtime-i18n.cc", "src/runtime/runtime-i18n.cc",
@ -1032,6 +1058,7 @@ source_set("v8_base") {
"src/scopes.cc", "src/scopes.cc",
"src/scopes.h", "src/scopes.h",
"src/signature.h", "src/signature.h",
"src/simulator.h",
"src/small-pointer-list.h", "src/small-pointer-list.h",
"src/smart-pointers.h", "src/smart-pointers.h",
"src/snapshot/natives.h", "src/snapshot/natives.h",
@ -1040,6 +1067,8 @@ source_set("v8_base") {
"src/snapshot/snapshot-common.cc", "src/snapshot/snapshot-common.cc",
"src/snapshot/snapshot-source-sink.cc", "src/snapshot/snapshot-source-sink.cc",
"src/snapshot/snapshot-source-sink.h", "src/snapshot/snapshot-source-sink.h",
"src/splay-tree.h",
"src/splay-tree-inl.h",
"src/snapshot/snapshot.h", "src/snapshot/snapshot.h",
"src/string-builder.cc", "src/string-builder.cc",
"src/string-builder.h", "src/string-builder.h",
@ -1089,6 +1118,8 @@ source_set("v8_base") {
"src/vm-state.h", "src/vm-state.h",
"src/zone.cc", "src/zone.cc",
"src/zone.h", "src/zone.h",
"src/zone-allocator.h",
"src/zone-containers.h",
"src/third_party/fdlibm/fdlibm.cc", "src/third_party/fdlibm/fdlibm.cc",
"src/third_party/fdlibm/fdlibm.h", "src/third_party/fdlibm/fdlibm.h",
] ]
@ -1201,6 +1232,7 @@ source_set("v8_base") {
"src/arm/regexp-macro-assembler-arm.cc", "src/arm/regexp-macro-assembler-arm.cc",
"src/arm/regexp-macro-assembler-arm.h", "src/arm/regexp-macro-assembler-arm.h",
"src/arm/simulator-arm.cc", "src/arm/simulator-arm.cc",
"src/arm/simulator-arm.h",
"src/compiler/arm/code-generator-arm.cc", "src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h", "src/compiler/arm/instruction-codes-arm.h",
"src/compiler/arm/instruction-selector-arm.cc", "src/compiler/arm/instruction-selector-arm.cc",
@ -1295,6 +1327,7 @@ source_set("v8_base") {
"src/mips/regexp-macro-assembler-mips.cc", "src/mips/regexp-macro-assembler-mips.cc",
"src/mips/regexp-macro-assembler-mips.h", "src/mips/regexp-macro-assembler-mips.h",
"src/mips/simulator-mips.cc", "src/mips/simulator-mips.cc",
"src/mips/simulator-mips.h",
"src/compiler/mips/code-generator-mips.cc", "src/compiler/mips/code-generator-mips.cc",
"src/compiler/mips/instruction-codes-mips.h", "src/compiler/mips/instruction-codes-mips.h",
"src/compiler/mips/instruction-selector-mips.cc", "src/compiler/mips/instruction-selector-mips.cc",
@ -1336,6 +1369,7 @@ source_set("v8_base") {
"src/mips64/regexp-macro-assembler-mips64.cc", "src/mips64/regexp-macro-assembler-mips64.cc",
"src/mips64/regexp-macro-assembler-mips64.h", "src/mips64/regexp-macro-assembler-mips64.h",
"src/mips64/simulator-mips64.cc", "src/mips64/simulator-mips64.cc",
"src/mips64/simulator-mips64.h",
"src/ic/mips64/access-compiler-mips64.cc", "src/ic/mips64/access-compiler-mips64.cc",
"src/ic/mips64/handler-compiler-mips64.cc", "src/ic/mips64/handler-compiler-mips64.cc",
"src/ic/mips64/ic-mips64.cc", "src/ic/mips64/ic-mips64.cc",
@ -1399,6 +1433,8 @@ source_set("v8_libbase") {
"src/base/atomicops_internals_atomicword_compat.h", "src/base/atomicops_internals_atomicword_compat.h",
"src/base/atomicops_internals_mac.h", "src/base/atomicops_internals_mac.h",
"src/base/atomicops_internals_mips_gcc.h", "src/base/atomicops_internals_mips_gcc.h",
"src/base/atomicops_internals_mips64_gcc.h",
"src/base/atomicops_internals_portable.h",
"src/base/atomicops_internals_tsan.h", "src/base/atomicops_internals_tsan.h",
"src/base/atomicops_internals_x86_gcc.cc", "src/base/atomicops_internals_x86_gcc.cc",
"src/base/atomicops_internals_x86_gcc.h", "src/base/atomicops_internals_x86_gcc.h",
@ -1558,7 +1594,7 @@ if (current_toolchain == snapshot_toolchain) {
# Public targets # Public targets
# #
if (component_mode == "shared_library") { if (is_component_build) {
component("v8") { component("v8") {
sources = [ sources = [
"src/v8dll-main.cc", "src/v8dll-main.cc",
@ -1567,11 +1603,17 @@ if (component_mode == "shared_library") {
if (v8_use_snapshot && v8_use_external_startup_data) { if (v8_use_snapshot && v8_use_external_startup_data) {
deps = [ deps = [
":v8_base", ":v8_base",
]
public_deps = [
":v8_external_snapshot", ":v8_external_snapshot",
] ]
} else if (v8_use_snapshot) { } else if (v8_use_snapshot) {
deps = [ deps = [
":v8_base", ":v8_base",
]
# v8_snapshot should be public so downstream targets can declare the
# snapshot file as their input.
public_deps = [
":v8_snapshot", ":v8_snapshot",
] ]
} else { } else {
@ -1607,6 +1649,8 @@ if (component_mode == "shared_library") {
} else if (v8_use_snapshot) { } else if (v8_use_snapshot) {
deps = [ deps = [
":v8_base", ":v8_base",
]
public_deps = [
":v8_snapshot", ":v8_snapshot",
] ]
} else { } else {
@ -1657,9 +1701,10 @@ if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
sources += [ "src/d8-windows.cc" ] sources += [ "src/d8-windows.cc" ]
} }
if (component_mode != "shared_library") { if (!is_component_build) {
sources += [ sources += [
"src/d8-debug.cc", "src/d8-debug.cc",
"src/d8-debug.h",
"$target_gen_dir/d8-js.cc", "$target_gen_dir/d8-js.cc",
] ]
} }

612
deps/v8/ChangeLog vendored
View File

@ -1,3 +1,615 @@
2015-07-02: Version 4.5.92
Performance and stability improvements on all platforms.
2015-07-01: Version 4.5.91
Performance and stability improvements on all platforms.
2015-07-01: Version 4.5.90
Performance and stability improvements on all platforms.
2015-07-01: Version 4.5.89
Performance and stability improvements on all platforms.
2015-06-30: Version 4.5.88
Performance and stability improvements on all platforms.
2015-06-30: Version 4.5.87
Performance and stability improvements on all platforms.
2015-06-30: Version 4.5.86
Ensure mjsunit tests use dashes not underscores in flags directives
(Chromium issue 505228).
Performance and stability improvements on all platforms.
2015-06-29: Version 4.5.85
Fix flag convention in handle count tests and comment (Chromium issue
505228).
Performance and stability improvements on all platforms.
2015-06-29: Version 4.5.84
Performance and stability improvements on all platforms.
2015-06-27: Version 4.5.83
Performance and stability improvements on all platforms.
2015-06-26: Version 4.5.82
Performance and stability improvements on all platforms.
2015-06-26: Version 4.5.81
Remove obsolete options in ScriptCompiler::CompileOptions (Chromium
issue 399580).
Performance and stability improvements on all platforms.
2015-06-25: Version 4.5.80
Performance and stability improvements on all platforms.
2015-06-25: Version 4.5.79
Performance and stability improvements on all platforms.
2015-06-25: Version 4.5.78
Serializer: clear next link in weak cells (Chromium issue 503552).
Performance and stability improvements on all platforms.
2015-06-24: Version 4.5.77
Performance and stability improvements on all platforms.
2015-06-24: Version 4.5.76
Performance and stability improvements on all platforms.
2015-06-24: Version 4.5.75
Date() should not depend on Date.prototype.toString (issue 4225).
Performance and stability improvements on all platforms.
2015-06-23: Version 4.5.74
Expose Map/Set methods through the API (issue 3340).
[turbofan] NaN is never truish (issue 4207).
Performance and stability improvements on all platforms.
2015-06-23: Version 4.5.73
Re-ship Harmony Array/TypedArray methods (issue 3578).
Performance and stability improvements on all platforms.
2015-06-23: Version 4.5.72
Performance and stability improvements on all platforms.
2015-06-23: Version 4.5.71
Performance and stability improvements on all platforms.
2015-06-20: Version 4.5.70
Ship Harmony Array/TypedArray methods (issue 3578).
Performance and stability improvements on all platforms.
2015-06-20: Version 4.5.69
Ship arrow functions (issue 2700).
Performance and stability improvements on all platforms.
2015-06-19: Version 4.5.68
Performance and stability improvements on all platforms.
2015-06-19: Version 4.5.67
Performance and stability improvements on all platforms.
2015-06-19: Version 4.5.66
Ship arrow functions (issue 2700).
Performance and stability improvements on all platforms.
2015-06-18: Version 4.5.65
Performance and stability improvements on all platforms.
2015-06-18: Version 4.5.64
Performance and stability improvements on all platforms.
2015-06-18: Version 4.5.63
Performance and stability improvements on all platforms.
2015-06-17: Version 4.5.62
Hydrogen object literals: always initialize in-object properties
(Chromium issue 500497).
Performance and stability improvements on all platforms.
2015-06-17: Version 4.5.61
Add %TypedArray% to proto chain (issue 4085).
Performance and stability improvements on all platforms.
2015-06-17: Version 4.5.60
Performance and stability improvements on all platforms.
2015-06-17: Version 4.5.59
[crankshaft] Fix wrong bailout points in for-in loop body (Chromium
issue 500435).
Performance and stability improvements on all platforms.
2015-06-16: Version 4.5.58
Performance and stability improvements on all platforms.
2015-06-16: Version 4.5.57
Inline code generation for %_IsTypedArray (issue 4085).
Allow TypedArrays to be initialized with iterables (issue 4090).
Performance and stability improvements on all platforms.
2015-06-15: Version 4.5.56
Performance and stability improvements on all platforms.
2015-06-15: Version 4.5.55
Performance and stability improvements on all platforms.
2015-06-14: Version 4.5.54
Performance and stability improvements on all platforms.
2015-06-13: Version 4.5.53
Performance and stability improvements on all platforms.
2015-06-12: Version 4.5.52
Map::TryUpdate() must be in sync with Map::Update() (issue 4173).
Add ToObject call in Array.prototype.sort (issue 4125).
In Array.of and Array.from, fall back to DefineOwnProperty (issue 4168).
Performance and stability improvements on all platforms.
2015-06-12: Version 4.5.51
Performance and stability improvements on all platforms.
2015-06-11: Version 4.5.50
Performance and stability improvements on all platforms.
2015-06-11: Version 4.5.49
Performance and stability improvements on all platforms.
2015-06-11: Version 4.5.48
Support rest parameters in arrow functions (issue 2700).
Performance and stability improvements on all platforms.
2015-06-10: Version 4.5.47
Implement %TypedArray%.prototype.slice (issue 3578).
Performance and stability improvements on all platforms.
2015-06-09: Version 4.5.46
Stage ES6 arrow functions (issue 2700).
Performance and stability improvements on all platforms.
2015-06-09: Version 4.5.45
Performance and stability improvements on all platforms.
2015-06-09: Version 4.5.44
Performance and stability improvements on all platforms.
2015-06-08: Version 4.5.43
[for-in] Make ForInNext and ForInFilter deal properly with exceptions
(Chromium issue 496331).
Performance and stability improvements on all platforms.
2015-06-08: Version 4.5.42
Performance and stability improvements on all platforms.
2015-06-06: Version 4.5.41
Performance and stability improvements on all platforms.
2015-06-05: Version 4.5.40
Performance and stability improvements on all platforms.
2015-06-05: Version 4.5.39
Stage ES6 Array and TypedArray methods (issue 3578).
Performance and stability improvements on all platforms.
2015-06-05: Version 4.5.38
Implement %TypedArray%.prototype.{reduce,reduceRight} (issue 3578).
Add support for Embedded Constant Pools for PPC and Arm (Chromium issue
478811).
Performance and stability improvements on all platforms.
2015-06-04: Version 4.5.37
Performance and stability improvements on all platforms.
2015-06-04: Version 4.5.36
Performance and stability improvements on all platforms.
2015-06-04: Version 4.5.35
Flatten the Arrays returned and consumed by the v8::Map API (Chromium
issue 478263).
Performance and stability improvements on all platforms.
2015-06-03: Version 4.5.34
Also allocate small typed arrays on heap when initialized from an array-
like (issue 3996).
Implement %TypedArray%.prototype.{reduce,reduceRight} (issue 3578).
Performance and stability improvements on all platforms.
2015-06-03: Version 4.5.33
Add support for Embedded Constant Pools for PPC and Arm (Chromium issue
478811).
Implement %TypedArray%.prototype.{toString,toLocaleString,join} (issue
3578).
Performance and stability improvements on all platforms.
2015-06-03: Version 4.5.32
Performance and stability improvements on all platforms.
2015-06-02: Version 4.5.31
Performance and stability improvements on all platforms.
2015-06-02: Version 4.5.30
Performance and stability improvements on all platforms.
2015-06-01: Version 4.5.29
Reland "Re-enable on-heap typed array allocation" (issue 3996).
Performance and stability improvements on all platforms.
2015-06-01: Version 4.5.28
Re-enable on-heap typed array allocation (issue 3996).
Also expose DefineOwnProperty (Chromium issue 475206).
Performance and stability improvements on all platforms.
2015-06-01: Version 4.5.27
Performance and stability improvements on all platforms.
2015-05-31: Version 4.5.26
Performance and stability improvements on all platforms.
2015-05-30: Version 4.5.25
Performance and stability improvements on all platforms.
2015-05-29: Version 4.5.24
Debugger: consider try-finally scopes not catching wrt debug events
(Chromium issue 492522).
Performance and stability improvements on all platforms.
2015-05-29: Version 4.5.23
Performance and stability improvements on all platforms.
2015-05-29: Version 4.5.22
Do not eagerly convert exception to string when creating a message
object (Chromium issue 490680).
Performance and stability improvements on all platforms.
2015-05-28: Version 4.5.21
Performance and stability improvements on all platforms.
2015-05-28: Version 4.5.20
Introduce v8::Object::CreateDataProperty (Chromium issue 475206).
Performance and stability improvements on all platforms.
2015-05-27: Version 4.5.19
Performance and stability improvements on all platforms.
2015-05-27: Version 4.5.18
Add {Map,Set}::FromArray to the API (issue 3340).
Add {Map,Set}::AsArray to the API (issue 3340).
Add basic API support for Map & Set (issue 3340).
Performance and stability improvements on all platforms.
2015-05-26: Version 4.5.17
Correctly hook up materialized receiver into the evaluation context
chain (Chromium issue 491943).
Implement bookmarks for ExternalStreamingStream (Chromium issue 470930).
Performance and stability improvements on all platforms.
2015-05-26: Version 4.5.16
Performance and stability improvements on all platforms.
2015-05-26: Version 4.5.15
Performance and stability improvements on all platforms.
2015-05-23: Version 4.5.14
Performance and stability improvements on all platforms.
2015-05-22: Version 4.5.13
Remove v8::Private.
Performance and stability improvements on all platforms.
2015-05-22: Version 4.5.12
Performance and stability improvements on all platforms.
2015-05-22: Version 4.5.11
Performance and stability improvements on all platforms.
2015-05-21: Version 4.5.10
Re-land %TypedArray%.prototype.{map,filter,some} (issue 3578).
Performance and stability improvements on all platforms.
2015-05-21: Version 4.5.9
Performance and stability improvements on all platforms.
2015-05-20: Version 4.5.8
Performance and stability improvements on all platforms.
2015-05-20: Version 4.5.7
Implement %TypedArray%.{lastI,i}ndexOf (issue 3578).
Implement %TypedArray%.prototype.sort (issue 3578).
Implement %TypedArray%.reverse (issue 3578).
Implement %TypedArray%.prototype.{map,filter,some,reduce,reduceRight}
(issue 3578).
Fix has_pending_exception logic in API's Array::CloneElementAt (issue
4103).
Adding api to get last gc object statistics for chrome://tracing
(Chromium issue 476013).
Fix harmless HGraph verification failure after hoisting inlined bounds
checks (Chromium issue 487608).
Performance and stability improvements on all platforms.
2015-05-20: Version 4.5.6
Add TypedArray.from method (issue 3578).
Performance and stability improvements on all platforms.
2015-05-19: Version 4.5.5
ARM64: Propagate notification about aborted compilation from
RegExpEngine to MacroAssembler (Chromium issue 489290).
Performance and stability improvements on all platforms.
2015-05-18: Version 4.5.4
Performance and stability improvements on all platforms.
2015-05-18: Version 4.5.3
Performance and stability improvements on all platforms.
2015-05-17: Version 4.5.2
Performance and stability improvements on all platforms.
2015-05-16: Version 4.5.1
Test that TypedArray methods don't read length (issue 3578).
Implement %TypedArray%.{fill,find,findIndex} (issue 3578).
TypedArray.prototype.copyWithin method (issue 3578).
Provide accessor for object internal properties that doesn't require
debugger to be active (Chromium issue 481845).
Don't create debug context if debug listener is not set (Chromium issue
482290).
Performance and stability improvements on all platforms.
2015-05-13: Version 4.4.65
Deprecate Isolate::New.
Factor out core of Array.forEach and .every, for use in TypedArrays
(issue 3578).
Performance and stability improvements on all platforms.
2015-05-12: Version 4.4.64
Performance and stability improvements on all platforms.
2015-05-11: Version 4.4.63 2015-05-11: Version 4.4.63
Let Runtime_GrowArrayElements accept non-Smi numbers as |key| (Chromium Let Runtime_GrowArrayElements accept non-Smi numbers as |key| (Chromium

12
deps/v8/DEPS vendored
View File

@ -8,23 +8,23 @@ vars = {
deps = { deps = {
"v8/build/gyp": "v8/build/gyp":
Var("git_url") + "/external/gyp.git" + "@" + "0bb67471bca068996e15b56738fa4824dfa19de0", Var("git_url") + "/external/gyp.git" + "@" + "5122240c5e5c4d8da12c543d82b03d6089eb77c5",
"v8/third_party/icu": "v8/third_party/icu":
Var("git_url") + "/chromium/deps/icu.git" + "@" + "f8c0e585b0a046d83d72b5d37356cb50d5b2031a", Var("git_url") + "/chromium/deps/icu.git" + "@" + "1b697da5c2c0112e2b70e7e75d3e3d985f464a8f",
"v8/buildtools": "v8/buildtools":
Var("git_url") + "/chromium/buildtools.git" + "@" + "b0ede9c89f9d5fbe5387d961ad4c0ec665b6c821", Var("git_url") + "/chromium/buildtools.git" + "@" + "ecc8e253abac3b6186a97573871a084f4c0ca3ae",
"v8/testing/gtest": "v8/testing/gtest":
Var("git_url") + "/external/googletest.git" + "@" + "be1868139ffe0ccd0e8e3b37292b84c821d9c8ad", Var("git_url") + "/external/googletest.git" + "@" + "23574bf2333f834ff665f894c97bef8a5b33a0a9",
"v8/testing/gmock": "v8/testing/gmock":
Var("git_url") + "/external/googlemock.git" + "@" + "29763965ab52f24565299976b936d1265cb6a271", # from svn revision 501 Var("git_url") + "/external/googlemock.git" + "@" + "29763965ab52f24565299976b936d1265cb6a271", # from svn revision 501
"v8/tools/clang": "v8/tools/clang":
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "5bab78c6ced45a71a8e095a09697ca80492e57e1", Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "73ec8804ed395b0886d6edf82a9f33583f4a7902",
} }
deps_os = { deps_os = {
"android": { "android": {
"v8/third_party/android_tools": "v8/third_party/android_tools":
Var("git_url") + "/android_tools.git" + "@" + "4f723e2a5fa5b7b8a198072ac19b92344be2b271", Var("git_url") + "/android_tools.git" + "@" + "21f4bcbd6cd927e4b4227cfde7d5f13486be1236",
}, },
"win": { "win": {
"v8/third_party/cygwin": "v8/third_party/cygwin":

7
deps/v8/LICENSE vendored
View File

@ -3,12 +3,12 @@ maintained libraries. The externally maintained libraries used by V8
are: are:
- PCRE test suite, located in - PCRE test suite, located in
test/mjsunit/third_party/regexp-pcre.js. This is based on the test/mjsunit/third_party/regexp-pcre/regexp-pcre.js. This is based on the
test suite from PCRE-7.3, which is copyrighted by the University test suite from PCRE-7.3, which is copyrighted by the University
of Cambridge and Google, Inc. The copyright notice and license of Cambridge and Google, Inc. The copyright notice and license
are embedded in regexp-pcre.js. are embedded in regexp-pcre.js.
- Layout tests, located in test/mjsunit/third_party. These are - Layout tests, located in test/mjsunit/third_party/object-keys. These are
based on layout tests from webkit.org which are copyrighted by based on layout tests from webkit.org which are copyrighted by
Apple Computer, Inc. and released under a 3-clause BSD license. Apple Computer, Inc. and released under a 3-clause BSD license.
@ -26,6 +26,9 @@ are:
These libraries have their own licenses; we recommend you read them, These libraries have their own licenses; we recommend you read them,
as their terms may differ from the terms below. as their terms may differ from the terms below.
Further license information can be found in LICENSE files located in
sub-directories.
Copyright 2014, the V8 project authors. All rights reserved. Copyright 2014, the V8 project authors. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are modification, are permitted provided that the following conditions are

21
deps/v8/Makefile vendored
View File

@ -31,9 +31,7 @@ OUTDIR ?= out
TESTJOBS ?= TESTJOBS ?=
GYPFLAGS ?= GYPFLAGS ?=
TESTFLAGS ?= TESTFLAGS ?=
ANDROID_NDK_ROOT ?=
ANDROID_NDK_HOST_ARCH ?= ANDROID_NDK_HOST_ARCH ?=
ANDROID_TOOLCHAIN ?=
ANDROID_V8 ?= /data/local/tmp/v8 ANDROID_V8 ?= /data/local/tmp/v8
NACL_SDK_ROOT ?= NACL_SDK_ROOT ?=
@ -145,10 +143,14 @@ ifeq ($(i18nsupport), off)
GYPFLAGS += -Dv8_enable_i18n_support=0 GYPFLAGS += -Dv8_enable_i18n_support=0
TESTFLAGS += --noi18n TESTFLAGS += --noi18n
endif endif
# deprecation_warnings=on # deprecationwarnings=on
ifeq ($(deprecationwarnings), on) ifeq ($(deprecationwarnings), on)
GYPFLAGS += -Dv8_deprecation_warnings=1 GYPFLAGS += -Dv8_deprecation_warnings=1
endif endif
# imminentdeprecationwarnings=on
ifeq ($(imminentdeprecationwarnings), on)
GYPFLAGS += -Dv8_imminent_deprecation_warnings=1
endif
# asan=on # asan=on
ifeq ($(asan), on) ifeq ($(asan), on)
GYPFLAGS += -Dasan=1 -Dclang=1 GYPFLAGS += -Dasan=1 -Dclang=1
@ -246,7 +248,7 @@ NACL_ARCHES = nacl_ia32 nacl_x64
GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \ GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \
build/shim_headers.gypi build/features.gypi build/standalone.gypi \ build/shim_headers.gypi build/features.gypi build/standalone.gypi \
build/toolchain.gypi build/all.gyp build/mac/asan.gyp \ build/toolchain.gypi build/all.gyp build/mac/asan.gyp \
build/android.gypi test/cctest/cctest.gyp \ test/cctest/cctest.gyp \
test/unittests/unittests.gyp tools/gyp/v8.gyp \ test/unittests/unittests.gyp tools/gyp/v8.gyp \
tools/parser-shell.gyp testing/gmock.gyp testing/gtest.gyp \ tools/parser-shell.gyp testing/gmock.gyp testing/gtest.gyp \
buildtools/third_party/libc++abi/libc++abi.gyp \ buildtools/third_party/libc++abi/libc++abi.gyp \
@ -277,7 +279,6 @@ ENVFILE = $(OUTDIR)/environment
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \ $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \ $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
$(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \ $(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN \
$(NACL_ARCHES) $(NACL_BUILDS) $(NACL_CHECKS) \ $(NACL_ARCHES) $(NACL_BUILDS) $(NACL_CHECKS) \
must-set-NACL_SDK_ROOT must-set-NACL_SDK_ROOT
@ -311,8 +312,7 @@ native: $(OUTDIR)/Makefile.native
$(ANDROID_ARCHES): $(addprefix $$@.,$(MODES)) $(ANDROID_ARCHES): $(addprefix $$@.,$(MODES))
$(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) build/android.gypi \ $(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) Makefile.android
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN Makefile.android
@$(MAKE) -f Makefile.android $@ \ @$(MAKE) -f Makefile.android $@ \
ARCH="$(basename $@)" \ ARCH="$(basename $@)" \
MODE="$(subst .,,$(suffix $@))" \ MODE="$(subst .,,$(suffix $@))" \
@ -448,13 +448,6 @@ $(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS) -Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN:
ifndef ANDROID_NDK_ROOT
ifndef ANDROID_TOOLCHAIN
$(error ANDROID_NDK_ROOT or ANDROID_TOOLCHAIN must be set))
endif
endif
# Note that NACL_SDK_ROOT must be set to point to an appropriate # Note that NACL_SDK_ROOT must be set to point to an appropriate
# Native Client SDK before using this makefile. You can download # Native Client SDK before using this makefile. You can download
# an SDK here: # an SDK here:

View File

@ -35,75 +35,28 @@ MODES = release debug
ANDROID_BUILDS = $(foreach mode,$(MODES), \ ANDROID_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(ANDROID_ARCHES))) $(addsuffix .$(mode),$(ANDROID_ARCHES)))
HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
ANDROID_NDK_HOST_ARCH ?= $(shell uname -m | sed -e 's/i[3456]86/x86/')
ifeq ($(HOST_OS), linux)
TOOLCHAIN_DIR = linux-$(ANDROID_NDK_HOST_ARCH)
else ifeq ($(HOST_OS), mac)
TOOLCHAIN_DIR = darwin-$(ANDROID_NDK_HOST_ARCH)
else
$(error Host platform "${HOST_OS}" is not supported)
endif
ifeq ($(ARCH), android_arm) ifeq ($(ARCH), android_arm)
DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm android_target_platform=14 DEFINES = target_arch=arm v8_target_arch=arm
DEFINES += arm_neon=0 arm_version=7
TOOLCHAIN_ARCH = arm-linux-androideabi
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8
else ifeq ($(ARCH), android_arm64) else ifeq ($(ARCH), android_arm64)
DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 android_target_platform=21 DEFINES = target_arch=arm64 v8_target_arch=arm64
TOOLCHAIN_ARCH = aarch64-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.9
else ifeq ($(ARCH), android_mipsel) else ifeq ($(ARCH), android_mipsel)
DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_platform=14 DEFINES = target_arch=mipsel v8_target_arch=mipsel
DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
TOOLCHAIN_ARCH = mipsel-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8
else ifeq ($(ARCH), android_ia32) else ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 android_target_platform=14 DEFINES = target_arch=ia32 v8_target_arch=ia32
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.8
else ifeq ($(ARCH), android_x64) else ifeq ($(ARCH), android_x64)
DEFINES = target_arch=x64 v8_target_arch=x64 android_target_arch=x86_64 android_target_platform=21 DEFINES = target_arch=x64 v8_target_arch=x64
TOOLCHAIN_ARCH = x86_64
TOOLCHAIN_PREFIX = x86_64-linux-android
TOOLCHAIN_VER = 4.9
else ifeq ($(ARCH), android_x87) else ifeq ($(ARCH), android_x87)
DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14 DEFINES = target_arch=ia32 v8_target_arch=x87
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.8
else else
$(error Target architecture "${ARCH}" is not supported) $(error Target architecture "${ARCH}" is not supported)
endif endif
TOOLCHAIN_PATH = \ # Common flags.
${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}-${TOOLCHAIN_VER}/prebuilt
ANDROID_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
ifeq ($(wildcard $(ANDROID_TOOLCHAIN)),)
$(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}". Please \
check that ANDROID_NDK_ROOT and ANDROID_NDK_HOST_ARCH are set \
correctly)
endif
# For mksnapshot host generation.
DEFINES += host_os=${HOST_OS}
DEFINES += OS=android DEFINES += OS=android
.SECONDEXPANSION: .SECONDEXPANSION:
$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@ $(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \ @$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
CXX="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
AR="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ar" \
RANLIB="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ranlib" \
CC="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-gcc" \
LD="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ld" \
LINK="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \ BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \ python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@" builddir="$(shell pwd)/$(OUTDIR)/$@"
@ -113,9 +66,7 @@ ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS))
$(ANDROID_MAKEFILES): $(ANDROID_MAKEFILES):
GYP_GENERATORS=make-android \ GYP_GENERATORS=make-android \
GYP_DEFINES="${DEFINES}" \ GYP_DEFINES="${DEFINES}" \
CC="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-gcc" \
CXX="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-g++" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \ PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \ build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \ -Ibuild/standalone.gypi --depth=. \
-S$(suffix $(basename $@))$(suffix $@) ${GYPFLAGS} -S$(suffix $(basename $@))$(suffix $@) ${GYPFLAGS}

View File

@ -1,266 +0,0 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Definitions for building standalone V8 binaries to run on Android.
# This is mostly excerpted from:
# http://src.chromium.org/viewvc/chrome/trunk/src/build/common.gypi
{
'variables': {
# Location of Android NDK.
'variables': {
'android_ndk_root%': '<!(/bin/echo -n $ANDROID_NDK_ROOT)',
'android_toolchain%': '<!(/bin/echo -n $ANDROID_TOOLCHAIN)',
},
'conditions': [
['android_ndk_root==""', {
'variables': {
'android_sysroot': '<(android_toolchain)/sysroot/',
'android_stlport': '<(android_toolchain)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
}, {
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}, {
'variables': {
'android_sysroot': '<(android_ndk_root)/platforms/android-<(android_target_platform)/arch-<(android_target_arch)',
'android_stlport': '<(android_ndk_root)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
}, {
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}],
],
'android_stlport_library': 'stlport_static',
}, # variables
'target_defaults': {
'defines': [
'ANDROID',
'V8_ANDROID_LOG_STDOUT',
],
'configurations': {
'Release': {
'cflags': [
'-fomit-frame-pointer',
],
}, # Release
}, # configurations
'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter'],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions',
# Note: Using -std=c++0x will define __STRICT_ANSI__, which
# in turn will leave out some template stuff for 'long
# long'. What we want is -std=c++11, but this is not
# supported by GCC 4.6 or Xcode 4.2
'-std=gnu++0x' ],
'target_conditions': [
['_toolset=="target"', {
'cflags!': [
'-pthread', # Not supported by Android toolchain.
],
'cflags': [
'-ffunction-sections',
'-funwind-tables',
'-fstack-protector',
'-fno-short-enums',
'-finline-limit=64',
'-Wa,--noexecstack',
# Note: This include is in cflags to ensure that it comes after
# all of the includes.
'-I<(android_include)',
'-I<(android_stlport_include)',
],
'cflags_cc': [
'-Wno-error=non-virtual-dtor', # TODO(michaelbai): Fix warnings.
],
'defines': [
'ANDROID',
#'__GNU_SOURCE=1', # Necessary for clone()
'USE_STLPORT=1',
'_STLP_USE_PTR_SPECIALIZATIONS=1',
'HAVE_OFF64_T',
'HAVE_SYS_UIO_H',
'ANDROID_BINSIZE_HACK', # Enable temporary hacks to reduce binsize.
],
'ldflags!': [
'-pthread', # Not supported by Android toolchain.
],
'ldflags': [
'-nostdlib',
'-Wl,--no-undefined',
'-Wl,-rpath-link=<(android_lib)',
'-L<(android_lib)',
],
'libraries!': [
'-lrt', # librt is built into Bionic.
# Not supported by Android toolchain.
# Where do these come from? Can't find references in
# any Chromium gyp or gypi file. Maybe they come from
# gyp itself?
'-lpthread', '-lnss3', '-lnssutil3', '-lsmime3', '-lplds4', '-lplc4', '-lnspr4',
],
'libraries': [
'-l<(android_stlport_library)',
# Manually link the libgcc.a that the cross compiler uses.
'<!($CC -print-libgcc-file-name)',
'-lc',
'-ldl',
'-lstdc++',
'-lm',
],
'conditions': [
['target_arch == "arm"', {
'ldflags': [
# Enable identical code folding to reduce size.
'-Wl,--icf=safe',
],
}],
['target_arch=="arm" and arm_version==7', {
'cflags': [
'-march=armv7-a',
'-mtune=cortex-a8',
'-mfpu=vfp3',
],
'ldflags': [
'-L<(android_stlport_libs)/armeabi-v7a',
],
}],
['target_arch=="arm" and arm_version < 7', {
'ldflags': [
'-L<(android_stlport_libs)/armeabi',
],
}],
['target_arch=="x64"', {
'ldflags': [
'-L<(android_stlport_libs)/x86_64',
],
}],
['target_arch=="arm64"', {
'ldflags': [
'-L<(android_stlport_libs)/arm64-v8a',
],
}],
['target_arch=="ia32" or target_arch=="x87"', {
# The x86 toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
],
'cflags': [
'-fno-stack-protector',
],
'ldflags': [
'-L<(android_stlport_libs)/x86',
],
}],
['target_arch=="mipsel"', {
# The mips toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
'-U__linux__'
],
'cflags': [
'-fno-stack-protector',
],
'ldflags': [
'-L<(android_stlport_libs)/mips',
],
}],
['(target_arch=="arm" or target_arch=="arm64" or target_arch=="x64") and component!="shared_library"', {
'cflags': [
'-fPIE',
],
'ldflags': [
'-pie',
],
}],
],
'target_conditions': [
['_type=="executable"', {
'conditions': [
['target_arch=="arm64" or target_arch=="x64"', {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker64',
],
}, {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker',
],
}]
],
'ldflags': [
'-Bdynamic',
'-Wl,-z,nocopyreloc',
# crtbegin_dynamic.o should be the last item in ldflags.
'<(android_lib)/crtbegin_dynamic.o',
],
'libraries': [
# crtend_android.o needs to be the last item in libraries.
# Do not add any libraries after this!
'<(android_lib)/crtend_android.o',
],
}],
['_type=="shared_library"', {
'ldflags': [
'-Wl,-shared,-Bsymbolic',
'<(android_lib)/crtbegin_so.o',
],
}],
['_type=="static_library"', {
'ldflags': [
# Don't export symbols from statically linked libraries.
'-Wl,--exclude-libs=ALL',
],
}],
],
}], # _toolset=="target"
# Settings for building host targets using the system toolchain.
['_toolset=="host"', {
'cflags': [ '-pthread' ],
'ldflags': [ '-pthread' ],
'ldflags!': [
'-Wl,-z,noexecstack',
'-Wl,--gc-sections',
'-Wl,-O1',
'-Wl,--as-needed',
],
}],
], # target_conditions
}, # target_defaults
}

View File

@ -59,6 +59,9 @@
# Enable compiler warnings when using V8_DEPRECATED apis. # Enable compiler warnings when using V8_DEPRECATED apis.
'v8_deprecation_warnings%': 0, 'v8_deprecation_warnings%': 0,
# Enable compiler warnings when using V8_DEPRECATE_SOON apis.
'v8_imminent_deprecation_warnings%': 0,
# Set to 1 to enable DCHECKs in release builds. # Set to 1 to enable DCHECKs in release builds.
'dcheck_always_on%': 0, 'dcheck_always_on%': 0,
}, },
@ -88,6 +91,9 @@
['v8_deprecation_warnings==1', { ['v8_deprecation_warnings==1', {
'defines': ['V8_DEPRECATION_WARNINGS',], 'defines': ['V8_DEPRECATION_WARNINGS',],
}], }],
['v8_imminent_deprecation_warnings==1', {
'defines': ['V8_IMMINENT_DEPRECATION_WARNINGS',],
}],
['v8_enable_i18n_support==1', { ['v8_enable_i18n_support==1', {
'defines': ['V8_I18N_SUPPORT',], 'defines': ['V8_I18N_SUPPORT',],
}], }],
@ -112,7 +118,7 @@
}, # Debug }, # Debug
'Release': { 'Release': {
'variables': { 'variables': {
'v8_enable_handle_zapping%': 0, 'v8_enable_handle_zapping%': 1,
}, },
'conditions': [ 'conditions': [
['v8_enable_handle_zapping==1', { ['v8_enable_handle_zapping==1', {

View File

@ -21,6 +21,8 @@ def main():
print 'Revert activation of MSVS 2013.' print 'Revert activation of MSVS 2013.'
print 'Activating MSVS 2013 again.' print 'Activating MSVS 2013 again.'
print 'Clobber after ICU roll.' print 'Clobber after ICU roll.'
print 'Moar clobbering...'
print 'Remove build/android.gypi'
return 0 return 0

11
deps/v8/build/gyp_v8 vendored
View File

@ -130,7 +130,7 @@ if __name__ == '__main__':
# Generate for the architectures supported on the given platform. # Generate for the architectures supported on the given platform.
gyp_args = list(args) gyp_args = list(args)
gyp_generators = os.environ.get('GYP_GENERATORS') gyp_generators = os.environ.get('GYP_GENERATORS', '')
if platform.system() == 'Linux' and gyp_generators != 'ninja': if platform.system() == 'Linux' and gyp_generators != 'ninja':
# Work around for crbug.com/331475. # Work around for crbug.com/331475.
for f in glob.glob(os.path.join(v8_root, 'out', 'Makefile.*')): for f in glob.glob(os.path.join(v8_root, 'out', 'Makefile.*')):
@ -140,4 +140,13 @@ if __name__ == '__main__':
# -Goutput_dir defines where the build output goes, relative to the # -Goutput_dir defines where the build output goes, relative to the
# Makefile. Set it to . so that the build output doesn't end up in out/out. # Makefile. Set it to . so that the build output doesn't end up in out/out.
gyp_args.append('-Goutput_dir=.') gyp_args.append('-Goutput_dir=.')
gyp_defines = os.environ.get('GYP_DEFINES', '')
# Automatically turn on crosscompile support for platforms that need it.
if all(('ninja' in gyp_generators,
'OS=android' in gyp_defines,
'GYP_CROSSCOMPILE' not in os.environ)):
os.environ['GYP_CROSSCOMPILE'] = '1'
run_gyp(gyp_args) run_gyp(gyp_args)

View File

@ -33,16 +33,17 @@
'includes': ['toolchain.gypi'], 'includes': ['toolchain.gypi'],
'variables': { 'variables': {
'component%': 'static_library', 'component%': 'static_library',
'clang_dir%': 'third_party/llvm-build/Release+Asserts',
'clang_xcode%': 0, 'clang_xcode%': 0,
# Track where uninitialized memory originates from. From fastest to # Track where uninitialized memory originates from. From fastest to
# slowest: 0 - no tracking, 1 - track only the initial allocation site, 2 # slowest: 0 - no tracking, 1 - track only the initial allocation site, 2
# - track the chain of stores leading from allocation site to use site. # - track the chain of stores leading from allocation site to use site.
'msan_track_origins%': 1, 'msan_track_origins%': 2,
'visibility%': 'hidden', 'visibility%': 'hidden',
'v8_enable_backtrace%': 0, 'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 1, 'v8_enable_i18n_support%': 1,
'v8_deprecation_warnings': 1, 'v8_deprecation_warnings': 1,
# TODO(jochen): Turn this on.
'v8_imminent_deprecation_warnings%': 0,
'msvs_multi_core_compile%': '1', 'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5', 'mac_deployment_target%': '10.5',
'release_extra_cflags%': '', 'release_extra_cflags%': '',
@ -66,7 +67,9 @@
}, },
'host_arch%': '<(host_arch)', 'host_arch%': '<(host_arch)',
'target_arch%': '<(host_arch)', 'target_arch%': '<(host_arch)',
'base_dir%': '<!(cd <(DEPTH) && python -c "import os; print os.getcwd()")',
}, },
'base_dir%': '<(base_dir)',
'host_arch%': '<(host_arch)', 'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)', 'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(target_arch)', 'v8_target_arch%': '<(target_arch)',
@ -74,6 +77,16 @@
'lsan%': 0, 'lsan%': 0,
'msan%': 0, 'msan%': 0,
'tsan%': 0, 'tsan%': 0,
# Enable coverage gathering instrumentation in sanitizer tools. This flag
# also controls coverage granularity (1 for function-level, 2 for
# block-level, 3 for edge-level).
'sanitizer_coverage%': 0,
# Use libc++ (buildtools/third_party/libc++ and
# buildtools/third_party/libc++abi) instead of stdlibc++ as standard
# library. This is intended to be used for instrumented builds.
'use_custom_libcxx%': 0,
'clang_dir%': '<(base_dir)/third_party/llvm-build/Release+Asserts',
# goma settings. # goma settings.
# 1 to use goma. # 1 to use goma.
@ -87,9 +100,17 @@
}, { }, {
'gomadir': '<!(/bin/echo -n ${HOME}/goma)', 'gomadir': '<!(/bin/echo -n ${HOME}/goma)',
}], }],
['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le"', {
'host_clang%': '1',
}, {
'host_clang%': '0',
}],
], ],
}, },
'base_dir%': '<(base_dir)',
'clang_dir%': '<(clang_dir)',
'host_arch%': '<(host_arch)', 'host_arch%': '<(host_arch)',
'host_clang%': '<(host_clang)',
'target_arch%': '<(target_arch)', 'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(v8_target_arch)', 'v8_target_arch%': '<(v8_target_arch)',
'werror%': '-Werror', 'werror%': '-Werror',
@ -99,6 +120,11 @@
'lsan%': '<(lsan)', 'lsan%': '<(lsan)',
'msan%': '<(msan)', 'msan%': '<(msan)',
'tsan%': '<(tsan)', 'tsan%': '<(tsan)',
'sanitizer_coverage%': '<(sanitizer_coverage)',
'use_custom_libcxx%': '<(use_custom_libcxx)',
# Add a simple extra solely for the purpose of the cctests
'v8_extra_library_files': ['../test/cctest/test-extra.js'],
# .gyp files or targets should set v8_code to 1 if they build V8 specific # .gyp files or targets should set v8_code to 1 if they build V8 specific
# code, as opposed to external code. This variable is used to control such # code, as opposed to external code. This variable is used to control such
@ -160,20 +186,132 @@
'v8_enable_gdbjit%': 0, 'v8_enable_gdbjit%': 0,
}], }],
['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \ ['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \
(v8_target_arch!="x87")', { (v8_target_arch!="x87" and v8_target_arch!="x32")', {
'clang%': 1, 'clang%': 1,
}, { }, {
'clang%': 0, 'clang%': 0,
}], }],
['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le"', {
'host_clang%': '1',
}, {
'host_clang%': '0',
}],
['asan==1 or lsan==1 or msan==1 or tsan==1', { ['asan==1 or lsan==1 or msan==1 or tsan==1', {
'clang%': 1, 'clang%': 1,
'use_allocator%': 'none', 'use_allocator%': 'none',
}], }],
['asan==1 and OS=="linux"', {
'use_custom_libcxx%': 1,
}],
['tsan==1', {
'use_custom_libcxx%': 1,
}],
['msan==1', {
# Use a just-built, MSan-instrumented libc++ instead of the system-wide
# libstdc++. This is required to avoid false positive reports whenever
# the C++ standard library is used.
'use_custom_libcxx%': 1,
}],
['OS=="linux"', {
# Gradually roll out v8_use_external_startup_data.
# Should eventually be default enabled on all platforms.
'v8_use_external_startup_data%': 1,
}],
['OS=="android"', {
# Location of Android NDK.
'variables': {
'variables': {
# The Android toolchain needs to use the absolute path to the NDK
# because it is used at different levels in the GYP files.
'android_ndk_root%': '<(base_dir)/third_party/android_tools/ndk/',
'android_host_arch%': "<!(uname -m | sed -e 's/i[3456]86/x86/')",
'host_os%': "<!(uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')",
},
# Copy conditionally-set variables out one scope.
'android_ndk_root%': '<(android_ndk_root)',
'host_os%': '<(host_os)',
'conditions': [
['target_arch == "ia32"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/x86-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'x86',
'android_target_platform%': '16',
}],
['target_arch == "x64"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/x86_64-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'x86_64',
'android_target_platform%': '21',
}],
['target_arch=="arm"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/arm-linux-androideabi-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'arm',
'android_target_platform%': '16',
'arm_version%': 7,
}],
['target_arch == "arm64"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/aarch64-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'arm64',
'android_target_platform%': '21',
'arm_version%': 'default',
}],
['target_arch == "mipsel"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/mipsel-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'mips',
'android_target_platform%': '16',
}],
['target_arch == "mips64el"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/mips64el-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'mips64',
'android_target_platform%': '21',
}],
],
},
# Copy conditionally-set variables out one scope.
'android_target_arch%': '<(android_target_arch)',
'android_target_platform%': '<(android_target_platform)',
'android_toolchain%': '<(android_toolchain)',
'arm_version%': '<(arm_version)',
'host_os%': '<(host_os)',
'conditions': [
['android_ndk_root==""', {
'variables': {
'android_sysroot': '<(android_toolchain)/sysroot/',
'android_stlport': '<(android_toolchain)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
}, {
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}, {
'variables': {
'android_sysroot': '<(android_ndk_root)/platforms/android-<(android_target_platform)/arch-<(android_target_arch)',
'android_stlport': '<(android_ndk_root)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
}, {
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}],
],
'android_stlport_library': 'stlport_static',
}], # OS=="android"
['host_clang==1', {
'host_cc': '<(clang_dir)/bin/clang',
'host_cxx': '<(clang_dir)/bin/clang++',
}, {
'host_cc': '<!(which gcc)',
'host_cxx': '<!(which g++)',
}],
], ],
# Default ARM variable settings. # Default ARM variable settings.
'arm_version%': 'default', 'arm_version%': 'default',
@ -194,6 +332,11 @@
'target_defaults': { 'target_defaults': {
'variables': { 'variables': {
'v8_code%': '<(v8_code)', 'v8_code%': '<(v8_code)',
'conditions':[
['OS=="android"', {
'host_os%': '<(host_os)',
}],
],
}, },
'default_configuration': 'Debug', 'default_configuration': 'Debug',
'configurations': { 'configurations': {
@ -283,96 +426,148 @@
], ],
}, },
'conditions': [ 'conditions': [
['asan==1 and OS!="mac"', { ['os_posix==1 and OS!="mac"', {
'target_defaults': { 'target_defaults': {
'cflags_cc+': [ 'conditions': [
'-fno-omit-frame-pointer', # Common options for AddressSanitizer, LeakSanitizer,
'-gline-tables-only', # ThreadSanitizer and MemorySanitizer.
'-fsanitize=address', ['asan==1 or lsan==1 or tsan==1 or msan==1', {
'-w', # http://crbug.com/162783 'target_conditions': [
], ['_toolset=="target"', {
'cflags!': [ 'cflags': [
'-fomit-frame-pointer', '-fno-omit-frame-pointer',
], '-gline-tables-only',
'ldflags': [ ],
'-fsanitize=address', 'cflags!': [
], '-fomit-frame-pointer',
}, ],
}], }],
['tsan==1 and OS!="mac"', { ],
'target_defaults': { }],
'cflags+': [ ['asan==1', {
'-fno-omit-frame-pointer', 'target_conditions': [
'-gline-tables-only', ['_toolset=="target"', {
'-fsanitize=thread', 'cflags': [
'-fPIC', '-fsanitize=address',
'-Wno-c++11-extensions', ],
], 'ldflags': [
'cflags!': [ '-fsanitize=address',
'-fomit-frame-pointer', ],
], 'defines': [
'ldflags': [ 'ADDRESS_SANITIZER',
'-fsanitize=thread', ],
'-pie', }],
], ],
'defines': [ }],
'THREAD_SANITIZER', ['lsan==1', {
], 'target_conditions': [
}, ['_toolset=="target"', {
}], 'cflags': [
['msan==1 and OS!="mac"', { '-fsanitize=leak',
'target_defaults': { ],
'cflags_cc+': [ 'ldflags': [
'-fno-omit-frame-pointer', '-fsanitize=leak',
'-gline-tables-only', ],
'-fsanitize=memory', 'defines': [
'-fsanitize-memory-track-origins=<(msan_track_origins)', 'LEAK_SANITIZER',
'-fPIC', ],
], }],
'cflags+': [ ],
'-fPIC', }],
], ['tsan==1', {
'cflags!': [ 'target_conditions': [
'-fno-exceptions', ['_toolset=="target"', {
'-fomit-frame-pointer', 'cflags': [
], '-fsanitize=thread',
'ldflags': [ ],
'-fsanitize=memory', 'ldflags': [
], '-fsanitize=thread',
'defines': [ ],
'MEMORY_SANITIZER', 'defines': [
], 'THREAD_SANITIZER',
'dependencies': [ ],
# Use libc++ (third_party/libc++ and third_party/libc++abi) instead of }],
# stdlibc++ as standard library. This is intended to use for instrumented ],
# builds. }],
'<(DEPTH)/buildtools/third_party/libc++/libc++.gyp:libcxx_proxy', ['msan==1', {
], 'target_conditions': [
}, ['_toolset=="target"', {
}], 'cflags': [
['asan==1 and OS=="mac"', { '-fsanitize=memory',
'target_defaults': { '-fsanitize-memory-track-origins=<(msan_track_origins)',
'xcode_settings': { ],
'OTHER_CFLAGS+': [ 'ldflags': [
'-fno-omit-frame-pointer', '-fsanitize=memory',
'-gline-tables-only', ],
'-fsanitize=address', 'defines': [
'-w', # http://crbug.com/162783 'MEMORY_SANITIZER',
], ],
'OTHER_CFLAGS!': [ }],
'-fomit-frame-pointer', ],
], }],
}, ['use_custom_libcxx==1', {
'target_conditions': [ 'dependencies': [
['_type!="static_library"', { '<(DEPTH)/buildtools/third_party/libc++/libc++.gyp:libcxx_proxy',
'xcode_settings': {'OTHER_LDFLAGS': ['-fsanitize=address']}, ],
}],
['sanitizer_coverage!=0', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize-coverage=<(sanitizer_coverage)',
],
'defines': [
'SANITIZER_COVERAGE',
],
}],
],
}], }],
], ],
'dependencies': [
'<(DEPTH)/build/mac/asan.gyp:asan_dynamic_runtime',
],
}, },
}], }],
['OS=="mac"', {
'target_defaults': {
'conditions': [
['asan==1', {
'xcode_settings': {
# FIXME(machenbach): This is outdated compared to common.gypi.
'OTHER_CFLAGS+': [
'-fno-omit-frame-pointer',
'-gline-tables-only',
'-fsanitize=address',
'-w', # http://crbug.com/162783
],
'OTHER_CFLAGS!': [
'-fomit-frame-pointer',
],
'defines': [
'ADDRESS_SANITIZER',
],
},
'dependencies': [
'<(DEPTH)/build/mac/asan.gyp:asan_dynamic_runtime',
],
'target_conditions': [
['_type!="static_library"', {
'xcode_settings': {'OTHER_LDFLAGS': ['-fsanitize=address']},
}],
],
}],
['sanitizer_coverage!=0', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize-coverage=<(sanitizer_coverage)',
],
'defines': [
'SANITIZER_COVERAGE',
],
}],
],
}],
],
}, # target_defaults
}], # OS=="mac"
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="aix"', { or OS=="netbsd" or OS=="aix"', {
'target_defaults': { 'target_defaults': {
@ -382,17 +577,20 @@
'-Wno-unused-parameter', '-Wno-unused-parameter',
'-Wno-long-long', '-Wno-long-long',
'-pthread', '-pthread',
'-fno-exceptions',
'-pedantic', '-pedantic',
# Don't warn about the "struct foo f = {0};" initialization pattern. # Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers', '-Wno-missing-field-initializers',
], ],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ], 'cflags_cc': [
'-Wnon-virtual-dtor',
'-fno-exceptions',
'-fno-rtti',
'-std=gnu++0x',
],
'ldflags': [ '-pthread', ], 'ldflags': [ '-pthread', ],
'conditions': [ 'conditions': [
# TODO(arm64): It'd be nice to enable this for arm64 as well, [ 'clang==1 and (v8_target_arch=="x64" or v8_target_arch=="arm64" \
# but the Assembler requires some serious fixing first. or v8_target_arch=="mips64el")', {
[ 'clang==1 and v8_target_arch=="x64"', {
'cflags': [ '-Wshorten-64-to-32' ], 'cflags': [ '-Wshorten-64-to-32' ],
}], }],
[ 'host_arch=="ppc64" and OS!="aix"', { [ 'host_arch=="ppc64" and OS!="aix"', {
@ -415,11 +613,15 @@
'-Wall', '-Wall',
'<(werror)', '<(werror)',
'-Wno-unused-parameter', '-Wno-unused-parameter',
'-fno-exceptions',
# Don't warn about the "struct foo f = {0};" initialization pattern. # Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers', '-Wno-missing-field-initializers',
], ],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ], 'cflags_cc': [
'-Wnon-virtual-dtor',
'-fno-exceptions',
'-fno-rtti',
'-std=gnu++0x',
],
'conditions': [ 'conditions': [
[ 'visibility=="hidden"', { [ 'visibility=="hidden"', {
'cflags': [ '-fvisibility=hidden' ], 'cflags': [ '-fvisibility=hidden' ],
@ -581,10 +783,214 @@
], # target_conditions ], # target_conditions
}, # target_defaults }, # target_defaults
}], # OS=="mac" }], # OS=="mac"
['OS=="android"', {
'target_defaults': {
'defines': [
'ANDROID',
'V8_ANDROID_LOG_STDOUT',
],
'configurations': {
'Release': {
'cflags': [
'-fomit-frame-pointer',
],
}, # Release
}, # configurations
'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter'],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions',
# Note: Using -std=c++0x will define __STRICT_ANSI__, which
# in turn will leave out some template stuff for 'long
# long'. What we want is -std=c++11, but this is not
# supported by GCC 4.6 or Xcode 4.2
'-std=gnu++0x' ],
'target_conditions': [
['_toolset=="target"', {
'cflags!': [
'-pthread', # Not supported by Android toolchain.
],
'cflags': [
'-ffunction-sections',
'-funwind-tables',
'-fstack-protector',
'-fno-short-enums',
'-finline-limit=64',
'-Wa,--noexecstack',
# Note: This include is in cflags to ensure that it comes after
# all of the includes.
'-I<(android_include)',
'-I<(android_stlport_include)',
],
'cflags_cc': [
'-Wno-error=non-virtual-dtor', # TODO(michaelbai): Fix warnings.
],
'defines': [
'ANDROID',
#'__GNU_SOURCE=1', # Necessary for clone()
'USE_STLPORT=1',
'_STLP_USE_PTR_SPECIALIZATIONS=1',
'HAVE_OFF64_T',
'HAVE_SYS_UIO_H',
'ANDROID_BINSIZE_HACK', # Enable temporary hacks to reduce binsize.
],
'ldflags!': [
'-pthread', # Not supported by Android toolchain.
],
'ldflags': [
'-nostdlib',
'-Wl,--no-undefined',
'-Wl,-rpath-link=<(android_lib)',
'-L<(android_lib)',
],
'libraries!': [
'-lrt', # librt is built into Bionic.
# Not supported by Android toolchain.
# Where do these come from? Can't find references in
# any Chromium gyp or gypi file. Maybe they come from
# gyp itself?
'-lpthread', '-lnss3', '-lnssutil3', '-lsmime3', '-lplds4', '-lplc4', '-lnspr4',
],
'libraries': [
'-l<(android_stlport_library)',
# Manually link the libgcc.a that the cross compiler uses.
'<!(<(android_toolchain)/*-gcc -print-libgcc-file-name)',
'-lc',
'-ldl',
'-lstdc++',
'-lm',
],
'conditions': [
['target_arch == "arm"', {
'ldflags': [
# Enable identical code folding to reduce size.
'-Wl,--icf=safe',
],
}],
['target_arch=="arm" and arm_version==7', {
'cflags': [
'-march=armv7-a',
'-mtune=cortex-a8',
'-mfpu=vfp3',
],
'ldflags': [
'-L<(android_stlport_libs)/armeabi-v7a',
],
}],
['target_arch=="arm" and arm_version < 7', {
'ldflags': [
'-L<(android_stlport_libs)/armeabi',
],
}],
['target_arch=="x64"', {
'ldflags': [
'-L<(android_stlport_libs)/x86_64',
],
}],
['target_arch=="arm64"', {
'ldflags': [
'-L<(android_stlport_libs)/arm64-v8a',
],
}],
['target_arch=="ia32" or target_arch=="x87"', {
# The x86 toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
],
'cflags': [
'-fno-stack-protector',
],
'ldflags': [
'-L<(android_stlport_libs)/x86',
],
}],
['target_arch=="mipsel"', {
# The mips toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
'-U__linux__'
],
'cflags': [
'-fno-stack-protector',
],
'ldflags': [
'-L<(android_stlport_libs)/mips',
],
}],
['(target_arch=="arm" or target_arch=="arm64" or target_arch=="x64" or target_arch=="ia32") and component!="shared_library"', {
'cflags': [
'-fPIE',
],
'ldflags': [
'-pie',
],
}],
],
'target_conditions': [
['_type=="executable"', {
'conditions': [
['target_arch=="arm64" or target_arch=="x64"', {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker64',
],
}, {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker',
],
}]
],
'ldflags': [
'-Bdynamic',
'-Wl,-z,nocopyreloc',
# crtbegin_dynamic.o should be the last item in ldflags.
'<(android_lib)/crtbegin_dynamic.o',
],
'libraries': [
# crtend_android.o needs to be the last item in libraries.
# Do not add any libraries after this!
'<(android_lib)/crtend_android.o',
],
}],
['_type=="shared_library"', {
'ldflags': [
'-Wl,-shared,-Bsymbolic',
'<(android_lib)/crtbegin_so.o',
],
}],
['_type=="static_library"', {
'ldflags': [
# Don't export symbols from statically linked libraries.
'-Wl,--exclude-libs=ALL',
],
}],
],
}], # _toolset=="target"
# Settings for building host targets using the system toolchain.
['_toolset=="host"', {
'cflags': [ '-pthread' ],
'ldflags': [ '-pthread' ],
'ldflags!': [
'-Wl,-z,noexecstack',
'-Wl,--gc-sections',
'-Wl,-O1',
'-Wl,--as-needed',
],
}],
], # target_conditions
}, # target_defaults
}], # OS=="android"
['OS=="android" and clang==0', {
# Hardcode the compiler names in the Makefile so that
# it won't depend on the environment at make time.
'make_global_settings': [
['CC', '<!(/bin/echo -n <(android_toolchain)/*-gcc)'],
['CXX', '<!(/bin/echo -n <(android_toolchain)/*-g++)'],
['CC.host', '<(host_cc)'],
['CXX.host', '<(host_cxx)'],
],
}],
['clang!=1 and host_clang==1 and target_arch!="ia32" and target_arch!="x64"', { ['clang!=1 and host_clang==1 and target_arch!="ia32" and target_arch!="x64"', {
'make_global_settings': [ 'make_global_settings': [
['CC.host', '../<(clang_dir)/bin/clang'], ['CC.host', '<(clang_dir)/bin/clang'],
['CXX.host', '../<(clang_dir)/bin/clang++'], ['CXX.host', '<(clang_dir)/bin/clang++'],
], ],
}], }],
['clang==0 and host_clang==1 and target_arch!="ia32" and target_arch!="x64"', { ['clang==0 and host_clang==1 and target_arch!="ia32" and target_arch!="x64"', {
@ -609,8 +1015,8 @@
['clang==1 and ((OS!="mac" and OS!="ios") or clang_xcode==0) ' ['clang==1 and ((OS!="mac" and OS!="ios") or clang_xcode==0) '
'and OS!="win" and "<(GENERATOR)"=="make"', { 'and OS!="win" and "<(GENERATOR)"=="make"', {
'make_global_settings': [ 'make_global_settings': [
['CC', '../<(clang_dir)/bin/clang'], ['CC', '<(clang_dir)/bin/clang'],
['CXX', '../<(clang_dir)/bin/clang++'], ['CXX', '<(clang_dir)/bin/clang++'],
['CC.host', '$(CC)'], ['CC.host', '$(CC)'],
['CXX.host', '$(CXX)'], ['CXX.host', '$(CXX)'],
], ],
@ -627,7 +1033,7 @@
['clang==1 and OS=="win"', { ['clang==1 and OS=="win"', {
'make_global_settings': [ 'make_global_settings': [
# On Windows, gyp's ninja generator only looks at CC. # On Windows, gyp's ninja generator only looks at CC.
['CC', '../<(clang_dir)/bin/clang-cl'], ['CC', '<(clang_dir)/bin/clang-cl'],
], ],
}], }],
# TODO(yyanagisawa): supports GENERATOR==make # TODO(yyanagisawa): supports GENERATOR==make

View File

@ -338,6 +338,26 @@
], ],
'cflags': ['-march=i586'], 'cflags': ['-march=i586'],
}], # v8_target_arch=="x87" }], # v8_target_arch=="x87"
['(v8_target_arch=="mips" or v8_target_arch=="mipsel" \
or v8_target_arch=="mips64el") and v8_target_arch==target_arch', {
'target_conditions': [
['_toolset=="target"', {
# Target built with a Mips CXX compiler.
'variables': {
'ldso_path%': '<!(/bin/echo -n $LDSO_PATH)',
'ld_r_path%': '<!(/bin/echo -n $LD_R_PATH)',
},
'conditions': [
['ldso_path!=""', {
'ldflags': ['-Wl,--dynamic-linker=<(ldso_path)'],
}],
['ld_r_path!=""', {
'ldflags': ['-Wl,--rpath=<(ld_r_path)'],
}],
],
}],
],
}],
['v8_target_arch=="mips"', { ['v8_target_arch=="mips"', {
'defines': [ 'defines': [
'V8_TARGET_ARCH_MIPS', 'V8_TARGET_ARCH_MIPS',
@ -384,11 +404,7 @@
], ],
'cflags!': ['-mfp32', '-mfpxx'], 'cflags!': ['-mfp32', '-mfpxx'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'], 'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [ 'ldflags': ['-mips32r6'],
'-mips32r6',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
}], }],
['mips_arch_variant=="r2"', { ['mips_arch_variant=="r2"', {
'conditions': [ 'conditions': [
@ -571,11 +587,7 @@
], ],
'cflags!': ['-mfp32', '-mfpxx'], 'cflags!': ['-mfp32', '-mfpxx'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'], 'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [ 'ldflags': ['-mips32r6'],
'-mips32r6',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
}], }],
['mips_arch_variant=="r2"', { ['mips_arch_variant=="r2"', {
'conditions': [ 'conditions': [
@ -770,20 +782,12 @@
['mips_arch_variant=="r6"', { ['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',], 'defines': ['_MIPS_ARCH_MIPS64R6',],
'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'], 'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'],
'ldflags': [ 'ldflags': ['-mips64r6', '-mabi=64'],
'-mips64r6', '-mabi=64',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
}], }],
['mips_arch_variant=="r2"', { ['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',], 'defines': ['_MIPS_ARCH_MIPS64R2',],
'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'], 'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
'ldflags': [ 'ldflags': ['-mips64r2', '-mabi=64'],
'-mips64r2', '-mabi=64',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
}], }],
], ],
}, { }, {

View File

@ -248,7 +248,8 @@ class V8_EXPORT Debug {
* Debugger is running in its own context which is entered while debugger * Debugger is running in its own context which is entered while debugger
* messages are being dispatched. This is an explicit getter for this * messages are being dispatched. This is an explicit getter for this
* debugger context. Note that the content of the debugger context is subject * debugger context. Note that the content of the debugger context is subject
* to change. * to change. The Context exists only when the debugger is active, i.e. at
* least one DebugEventListener or MessageHandler is set.
*/ */
static Local<Context> GetDebugContext(); static Local<Context> GetDebugContext();
@ -259,6 +260,14 @@ class V8_EXPORT Debug {
* unexpectedly used. LiveEdit is enabled by default. * unexpectedly used. LiveEdit is enabled by default.
*/ */
static void SetLiveEditEnabled(Isolate* isolate, bool enable); static void SetLiveEditEnabled(Isolate* isolate, bool enable);
/**
* Returns array of internal properties specific to the value type. Result has
* the following format: [<name>, <value>,...,<name>, <value>]. Result array
* will be allocated in the current context.
*/
static MaybeLocal<Array> GetInternalProperties(Isolate* isolate,
Local<Value> value);
}; };

View File

@ -56,6 +56,17 @@ class Platform {
*/ */
virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0; virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0;
/**
* Schedules a task to be invoked on a foreground thread wrt a specific
* |isolate| after the given number of seconds |delay_in_seconds|.
* Tasks posted for the same isolate should be execute in order of
* scheduling. The definition of "foreground" is opaque to V8.
*/
virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
double delay_in_seconds) {
// TODO(ulan): Make this function abstract after V8 roll in Chromium.
}
/** /**
* Monotonically increasing time in seconds from an arbitrary fixed point in * Monotonically increasing time in seconds from an arbitrary fixed point in
* the past. This function is expected to return at least * the past. This function is expected to return at least

View File

@ -275,7 +275,8 @@ class V8_EXPORT HeapGraphNode {
// snapshot items together. // snapshot items together.
kConsString = 10, // Concatenated string. A pair of pointers to strings. kConsString = 10, // Concatenated string. A pair of pointers to strings.
kSlicedString = 11, // Sliced string. A fragment of another string. kSlicedString = 11, // Sliced string. A fragment of another string.
kSymbol = 12 // A Symbol (ES6). kSymbol = 12, // A Symbol (ES6).
kSimdValue = 13 // A SIMD value stored in the heap (Proposed ES7).
}; };
/** Returns node type (see HeapGraphNode::Type). */ /** Returns node type (see HeapGraphNode::Type). */

View File

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build // NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts. // system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 4 #define V8_MAJOR_VERSION 4
#define V8_MINOR_VERSION 4 #define V8_MINOR_VERSION 5
#define V8_BUILD_NUMBER 63 #define V8_BUILD_NUMBER 92
#define V8_PATCH_LEVEL 12 #define V8_PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise. // Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.) // (Boolean macro values are not supported by all preprocessors.)

616
deps/v8/include/v8.h vendored

File diff suppressed because it is too large Load Diff

View File

@ -5,6 +5,8 @@
#ifndef V8CONFIG_H_ #ifndef V8CONFIG_H_
#define V8CONFIG_H_ #define V8CONFIG_H_
// clang-format off
// Platform headers for feature detection below. // Platform headers for feature detection below.
#if defined(__ANDROID__) #if defined(__ANDROID__)
# include <sys/cdefs.h> # include <sys/cdefs.h>
@ -183,6 +185,7 @@
// V8_HAS_BUILTIN_POPCOUNT - __builtin_popcount() supported // V8_HAS_BUILTIN_POPCOUNT - __builtin_popcount() supported
// V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported // V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported
// V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported // V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported
// V8_HAS_BUILTIN_UADD_OVERFLOW - __builtin_uadd_overflow() supported
// V8_HAS_DECLSPEC_ALIGN - __declspec(align(n)) supported // V8_HAS_DECLSPEC_ALIGN - __declspec(align(n)) supported
// V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported // V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported
// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported // V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
@ -199,8 +202,6 @@
#if defined(__GNUC__) // Clang in gcc mode. #if defined(__GNUC__) // Clang in gcc mode.
# define V8_CC_GNU 1 # define V8_CC_GNU 1
#elif defined(_MSC_VER) // Clang in cl mode.
# define V8_CC_MSVC 1
#endif #endif
// Clang defines __alignof__ as alias for __alignof // Clang defines __alignof__ as alias for __alignof
@ -223,6 +224,7 @@
# define V8_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount)) # define V8_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
# define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow)) # define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow))
# define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow)) # define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow))
# define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow))
# define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas)) # define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas))
# define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert)) # define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert))
@ -230,10 +232,15 @@
#elif defined(__GNUC__) #elif defined(__GNUC__)
# define V8_CC_GNU 1 # define V8_CC_GNU 1
// Intel C++ also masquerades as GCC 3.2.0 # if defined(__INTEL_COMPILER) // Intel C++ also masquerades as GCC 3.2.0
# define V8_CC_INTEL (defined(__INTEL_COMPILER)) # define V8_CC_INTEL 1
# define V8_CC_MINGW32 (defined(__MINGW32__)) # endif
# define V8_CC_MINGW64 (defined(__MINGW64__)) # if defined(__MINGW32__)
# define V8_CC_MINGW32 1
# endif
# if defined(__MINGW64__)
# define V8_CC_MINGW64 1
# endif
# define V8_CC_MINGW (V8_CC_MINGW32 || V8_CC_MINGW64) # define V8_CC_MINGW (V8_CC_MINGW32 || V8_CC_MINGW64)
# define V8_HAS___ALIGNOF__ (V8_GNUC_PREREQ(4, 3, 0)) # define V8_HAS___ALIGNOF__ (V8_GNUC_PREREQ(4, 3, 0))
@ -268,11 +275,10 @@
# define V8_HAS_CXX11_ALIGNOF (V8_GNUC_PREREQ(4, 8, 0)) # define V8_HAS_CXX11_ALIGNOF (V8_GNUC_PREREQ(4, 8, 0))
# define V8_HAS_CXX11_STATIC_ASSERT (V8_GNUC_PREREQ(4, 3, 0)) # define V8_HAS_CXX11_STATIC_ASSERT (V8_GNUC_PREREQ(4, 3, 0))
# endif # endif
#endif
#elif defined(_MSC_VER) #if defined(_MSC_VER)
# define V8_CC_MSVC 1 # define V8_CC_MSVC 1
# define V8_HAS___ALIGNOF 1 # define V8_HAS___ALIGNOF 1
# define V8_HAS_DECLSPEC_ALIGN 1 # define V8_HAS_DECLSPEC_ALIGN 1
@ -313,22 +319,33 @@
#endif #endif
// A macro to mark classes or functions as deprecated. // A macro (V8_DEPRECATED) to mark classes or functions as deprecated.
#if defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE #if defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE
# define V8_DEPRECATED(message, declarator) \ #define V8_DEPRECATED(message, declarator) \
declarator __attribute__((deprecated(message))) declarator __attribute__((deprecated(message)))
#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED #elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED
# define V8_DEPRECATED(message, declarator) \ #define V8_DEPRECATED(message, declarator) \
declarator __attribute__((deprecated)) declarator __attribute__((deprecated))
#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_DECLSPEC_DEPRECATED #elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_DECLSPEC_DEPRECATED
# define V8_DEPRECATED(message, declarator) __declspec(deprecated) declarator #define V8_DEPRECATED(message, declarator) __declspec(deprecated) declarator
#else #else
# define V8_DEPRECATED(message, declarator) declarator #define V8_DEPRECATED(message, declarator) declarator
#endif #endif
// a macro to make it easier to see what will be deprecated. // A macro (V8_DEPRECATE_SOON) to make it easier to see what will be deprecated.
#if defined(V8_IMMINENT_DEPRECATION_WARNINGS) && \
V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE
#define V8_DEPRECATE_SOON(message, declarator) \
declarator __attribute__((deprecated(message)))
#elif defined(V8_IMMINENT_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED
#define V8_DEPRECATE_SOON(message, declarator) \
declarator __attribute__((deprecated))
#elif defined(V8_IMMINENT_DEPRECATION_WARNINGS) && V8_HAS_DECLSPEC_DEPRECATED
#define V8_DEPRECATE_SOON(message, declarator) __declspec(deprecated) declarator
#else
#define V8_DEPRECATE_SOON(message, declarator) declarator #define V8_DEPRECATE_SOON(message, declarator) declarator
#endif
// A macro to provide the compiler with branch prediction information. // A macro to provide the compiler with branch prediction information.
@ -402,4 +419,6 @@ namespace v8 { template <typename T> class AlignOfHelper { char c; T t; }; }
#define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */ #define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */
#endif #endif
// clang-format on
#endif // V8CONFIG_H_ #endif // V8CONFIG_H_

3
deps/v8/infra/OWNERS vendored Normal file
View File

@ -0,0 +1,3 @@
machenbach@chromium.org
sergiyb@chromium.org
tandrii@chromium.org

1
deps/v8/infra/README.md vendored Normal file
View File

@ -0,0 +1 @@
This directory contains infra-specific files.

51
deps/v8/infra/config/cq.cfg vendored Normal file
View File

@ -0,0 +1,51 @@
# See http://luci-config.appspot.com/schemas/projects/refs:cq.cfg for the
# documentation of this file format.
version: 1
cq_name: "v8"
cq_status_url: "https://chromium-cq-status.appspot.com"
hide_ref_in_committed_msg: true
commit_burst_delay: 60
max_commit_burst: 1
target_ref: "refs/pending/heads/master"
rietveld {
url: "https://codereview.chromium.org"
}
verifiers {
reviewer_lgtm {
committer_list: "v8"
}
tree_status {
tree_status_url: "https://v8-status.appspot.com"
}
try_job {
buckets {
name: "tryserver.v8"
builders { name: "v8_android_arm_compile_rel" }
builders { name: "v8_linux64_asan_rel" }
builders { name: "v8_linux64_avx2_rel" }
builders { name: "v8_linux64_rel" }
builders { name: "v8_linux_arm64_rel" }
builders { name: "v8_linux_arm_rel" }
builders { name: "v8_linux_chromium_gn_rel" }
builders { name: "v8_linux_dbg" }
builders { name: "v8_linux_gcc_compile_rel" }
builders { name: "v8_linux_mipsel_compile_rel" }
builders { name: "v8_linux_mips64el_compile_rel" }
builders { name: "v8_linux_nodcheck_rel" }
builders { name: "v8_linux_rel" }
builders { name: "v8_mac_rel" }
builders { name: "v8_presubmit" }
builders { name: "v8_win64_rel" }
builders { name: "v8_win_compile_dbg" }
builders { name: "v8_win_nosnap_shared_compile_rel" }
builders { name: "v8_win_rel" }
}
}
sign_cla {}
}

View File

@ -0,0 +1 @@
This directory contains v8 project-wide configurations for infra services.

View File

@ -0,0 +1,23 @@
# Defines buckets on cr-buildbucket.appspot.com, used to schedule builds
# on buildbot. In particular, CQ uses some of these buckets to schedule tryjobs.
#
# See http://luci-config.appspot.com/schemas/projects:buildbucket.cfg for
# schema of this file and documentation.
#
# Please keep this list sorted by bucket name.
buckets {
name: "master.tryserver.v8"
acls {
role: READER
group: "all"
}
acls {
role: SCHEDULER
group: "service-account-cq"
}
acls {
role: WRITER
group: "service-account-v8-master"
}
}

69
deps/v8/samples/hello-world.cc vendored Normal file
View File

@ -0,0 +1,69 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
using namespace v8;
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
void* data = AllocateUninitialized(length);
return data == NULL ? data : memset(data, 0, length);
}
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t) { free(data); }
};
int main(int argc, char* argv[]) {
// Initialize V8.
V8::InitializeICU();
Platform* platform = platform::CreateDefaultPlatform();
V8::InitializePlatform(platform);
V8::Initialize();
// Create a new Isolate and make it the current one.
ArrayBufferAllocator allocator;
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = &allocator;
Isolate* isolate = Isolate::New(create_params);
{
Isolate::Scope isolate_scope(isolate);
// Create a stack-allocated handle scope.
HandleScope handle_scope(isolate);
// Create a new context.
Local<Context> context = Context::New(isolate);
// Enter the context for compiling and running the hello world script.
Context::Scope context_scope(context);
// Create a string containing the JavaScript source code.
Local<String> source = String::NewFromUtf8(isolate, "'Hello' + ', World!'");
// Compile the source code.
Local<Script> script = Script::Compile(source);
// Run the script to get the result.
Local<Value> result = script->Run();
// Convert the result to an UTF8 string and print it.
String::Utf8Value utf8(result);
printf("%s\n", *utf8);
}
// Dispose the isolate and tear down V8.
isolate->Dispose();
V8::Dispose();
V8::ShutdownPlatform();
delete platform;
return 0;
}

View File

@ -220,7 +220,7 @@ bool JsHttpRequestProcessor::ExecuteScript(Handle<String> script) {
// We're just about to compile the script; set up an error handler to // We're just about to compile the script; set up an error handler to
// catch any exceptions the script might throw. // catch any exceptions the script might throw.
TryCatch try_catch; TryCatch try_catch(GetIsolate());
// Compile the script and check for errors. // Compile the script and check for errors.
Handle<Script> compiled_script = Script::Compile(script); Handle<Script> compiled_script = Script::Compile(script);
@ -281,7 +281,7 @@ bool JsHttpRequestProcessor::Process(HttpRequest* request) {
Handle<Object> request_obj = WrapRequest(request); Handle<Object> request_obj = WrapRequest(request);
// Set up an exception handler before calling the Process function // Set up an exception handler before calling the Process function
TryCatch try_catch; TryCatch try_catch(GetIsolate());
// Invoke the process function, giving the global object as 'this' // Invoke the process function, giving the global object as 'this'
// and one argument, the request. // and one argument, the request.

View File

@ -61,6 +61,12 @@
'shell.cc', 'shell.cc',
], ],
}, },
{
'target_name': 'hello-world',
'sources': [
'hello-world.cc',
],
},
{ {
'target_name': 'process', 'target_name': 'process',
'sources': [ 'sources': [

View File

@ -325,7 +325,7 @@ bool ExecuteString(v8::Isolate* isolate,
bool print_result, bool print_result,
bool report_exceptions) { bool report_exceptions) {
v8::HandleScope handle_scope(isolate); v8::HandleScope handle_scope(isolate);
v8::TryCatch try_catch; v8::TryCatch try_catch(isolate);
v8::ScriptOrigin origin(name); v8::ScriptOrigin origin(name);
v8::Handle<v8::Script> script = v8::Script::Compile(source, &origin); v8::Handle<v8::Script> script = v8::Script::Compile(source, &origin);
if (script.IsEmpty()) { if (script.IsEmpty()) {

View File

@ -32,6 +32,7 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
info->set_property_attributes(attributes); info->set_property_attributes(attributes);
info->set_all_can_read(false); info->set_all_can_read(false);
info->set_all_can_write(false); info->set_all_can_write(false);
info->set_is_special_data_property(true);
info->set_name(*name); info->set_name(*name);
Handle<Object> get = v8::FromCData(isolate, getter); Handle<Object> get = v8::FromCData(isolate, getter);
Handle<Object> set = v8::FromCData(isolate, setter); Handle<Object> set = v8::FromCData(isolate, setter);
@ -126,31 +127,6 @@ bool Accessors::IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
} }
bool SetPropertyOnInstanceIfInherited(
Isolate* isolate, const v8::PropertyCallbackInfo<void>& info,
v8::Local<v8::Name> name, Handle<Object> value) {
Handle<Object> holder = Utils::OpenHandle(*info.Holder());
Handle<Object> receiver = Utils::OpenHandle(*info.This());
if (*holder == *receiver) return false;
if (receiver->IsJSObject()) {
Handle<JSObject> object = Handle<JSObject>::cast(receiver);
// This behaves sloppy since we lost the actual strict-mode.
// TODO(verwaest): Fix by making ExecutableAccessorInfo behave like data
// properties.
if (object->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, object);
if (iter.IsAtEnd()) return true;
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
object = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
}
if (!object->map()->is_extensible()) return true;
JSObject::SetOwnPropertyIgnoreAttributes(object, Utils::OpenHandle(*name),
value, NONE).Check();
}
return true;
}
// //
// Accessors::ArgumentsIterator // Accessors::ArgumentsIterator
// //
@ -174,8 +150,6 @@ void Accessors::ArgumentsIteratorSetter(
Handle<JSObject> object = Utils::OpenHandle(*info.This()); Handle<JSObject> object = Utils::OpenHandle(*info.This());
Handle<Object> value = Utils::OpenHandle(*val); Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
LookupIterator it(object, Utils::OpenHandle(*name)); LookupIterator it(object, Utils::OpenHandle(*name));
CHECK_EQ(LookupIterator::ACCESSOR, it.state()); CHECK_EQ(LookupIterator::ACCESSOR, it.state());
DCHECK(it.HolderIsReceiverOrHiddenPrototype()); DCHECK(it.HolderIsReceiverOrHiddenPrototype());
@ -199,21 +173,6 @@ Handle<AccessorInfo> Accessors::ArgumentsIteratorInfo(
// //
// The helper function will 'flatten' Number objects.
Handle<Object> Accessors::FlattenNumber(Isolate* isolate,
Handle<Object> value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
Handle<JSValue> wrapper = Handle<JSValue>::cast(value);
DCHECK(wrapper->GetIsolate()->native_context()->number_function()->
has_initial_map());
if (wrapper->map() == isolate->number_function()->initial_map()) {
return handle(wrapper->value(), isolate);
}
return value;
}
void Accessors::ArrayLengthGetter( void Accessors::ArrayLengthGetter(
v8::Local<v8::Name> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
@ -226,44 +185,55 @@ void Accessors::ArrayLengthGetter(
} }
// Tries to non-observably convert |value| to a valid array length.
// Returns false if it fails.
static bool FastAsArrayLength(Isolate* isolate, Handle<Object> value,
uint32_t* length) {
if (value->ToArrayLength(length)) return true;
// We don't support AsArrayLength, so use AsArrayIndex for now. This just
// misses out on kMaxUInt32.
if (value->IsString()) return String::cast(*value)->AsArrayIndex(length);
return false;
}
void Accessors::ArrayLengthSetter( void Accessors::ArrayLengthSetter(
v8::Local<v8::Name> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> val, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate); HandleScope scope(isolate);
Handle<JSObject> object = Utils::OpenHandle(*info.This()); Handle<JSObject> object = Utils::OpenHandle(*info.This());
Handle<Object> value = Utils::OpenHandle(*val); Handle<JSArray> array = Handle<JSArray>::cast(object);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) { Handle<Object> length_obj = Utils::OpenHandle(*val);
return;
uint32_t length = 0;
if (!FastAsArrayLength(isolate, length_obj, &length)) {
Handle<Object> uint32_v;
if (!Execution::ToUint32(isolate, length_obj).ToHandle(&uint32_v)) {
isolate->OptionalRescheduleException(false);
return;
}
Handle<Object> number_v;
if (!Execution::ToNumber(isolate, length_obj).ToHandle(&number_v)) {
isolate->OptionalRescheduleException(false);
return;
}
if (uint32_v->Number() != number_v->Number()) {
Handle<Object> exception = isolate->factory()->NewRangeError(
MessageTemplate::kInvalidArrayLength);
return isolate->ScheduleThrow(*exception);
}
CHECK(uint32_v->ToArrayLength(&length));
} }
value = FlattenNumber(isolate, value); if (JSArray::ObservableSetLength(array, length).is_null()) {
Handle<JSArray> array_handle = Handle<JSArray>::cast(object);
MaybeHandle<Object> maybe;
Handle<Object> uint32_v;
maybe = Execution::ToUint32(isolate, value);
if (!maybe.ToHandle(&uint32_v)) {
isolate->OptionalRescheduleException(false); isolate->OptionalRescheduleException(false);
return;
} }
Handle<Object> number_v;
maybe = Execution::ToNumber(isolate, value);
if (!maybe.ToHandle(&number_v)) {
isolate->OptionalRescheduleException(false);
return;
}
if (uint32_v->Number() == number_v->Number()) {
maybe = JSArray::SetElementsLength(array_handle, uint32_v);
if (maybe.is_null()) isolate->OptionalRescheduleException(false);
return;
}
Handle<Object> exception =
isolate->factory()->NewRangeError(MessageTemplate::kInvalidArrayLength);
isolate->ScheduleThrow(*exception);
} }
@ -706,8 +676,9 @@ void Accessors::ScriptIsEmbedderDebugScriptGetter(
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
HandleScope scope(isolate); HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This()); Object* object = *Utils::OpenHandle(*info.This());
bool is_embedder_debug_script = bool is_embedder_debug_script = Script::cast(JSValue::cast(object)->value())
Script::cast(JSValue::cast(object)->value())->is_embedder_debug_script(); ->origin_options()
.IsEmbedderDebugScript();
Object* res = *isolate->factory()->ToBoolean(is_embedder_debug_script); Object* res = *isolate->factory()->ToBoolean(is_embedder_debug_script);
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate))); info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
} }
@ -970,9 +941,6 @@ void Accessors::FunctionPrototypeSetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate); HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val); Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) {
return;
}
Handle<JSFunction> object = Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder())); Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionPrototype(isolate, object, value).is_null()) { if (SetFunctionPrototype(isolate, object, value).is_null()) {
@ -1061,8 +1029,6 @@ void Accessors::FunctionLengthSetter(
HandleScope scope(isolate); HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val); Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
Handle<JSFunction> object = Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder())); Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionLength(isolate, object, value).is_null()) { if (SetFunctionLength(isolate, object, value).is_null()) {
@ -1120,8 +1086,6 @@ void Accessors::FunctionNameSetter(
HandleScope scope(isolate); HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val); Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
Handle<JSFunction> object = Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder())); Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionName(isolate, object, value).is_null()) { if (SetFunctionName(isolate, object, value).is_null()) {
@ -1151,22 +1115,41 @@ static Handle<Object> ArgumentsForInlinedFunction(
int inlined_frame_index) { int inlined_frame_index) {
Isolate* isolate = inlined_function->GetIsolate(); Isolate* isolate = inlined_function->GetIsolate();
Factory* factory = isolate->factory(); Factory* factory = isolate->factory();
SlotRefValueBuilder slot_refs(
frame, inlined_frame_index,
inlined_function->shared()->internal_formal_parameter_count());
int args_count = slot_refs.args_length(); TranslatedState translated_values(frame);
translated_values.Prepare(false, frame->fp());
int argument_count = 0;
TranslatedFrame* translated_frame =
translated_values.GetArgumentsInfoFromJSFrameIndex(inlined_frame_index,
&argument_count);
TranslatedFrame::iterator iter = translated_frame->begin();
// Skip the function.
iter++;
// Skip the receiver.
iter++;
argument_count--;
Handle<JSObject> arguments = Handle<JSObject> arguments =
factory->NewArgumentsObject(inlined_function, args_count); factory->NewArgumentsObject(inlined_function, argument_count);
Handle<FixedArray> array = factory->NewFixedArray(args_count); Handle<FixedArray> array = factory->NewFixedArray(argument_count);
slot_refs.Prepare(isolate); bool should_deoptimize = false;
for (int i = 0; i < args_count; ++i) { for (int i = 0; i < argument_count; ++i) {
Handle<Object> value = slot_refs.GetNext(isolate, 0); // If we materialize any object, we should deopt because we might alias
// an object that was eliminated by escape analysis.
should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
Handle<Object> value = iter->GetValue();
array->set(i, *value); array->set(i, *value);
iter++;
} }
slot_refs.Finish(isolate);
arguments->set_elements(*array); arguments->set_elements(*array);
if (should_deoptimize) {
translated_values.StoreMaterializedValuesAndDeopt();
}
// Return the freshly allocated arguments object. // Return the freshly allocated arguments object.
return arguments; return arguments;
} }
@ -1437,9 +1420,19 @@ static void ModuleGetExport(
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder())); JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context()); Context* context = Context::cast(instance->context());
DCHECK(context->IsModuleContext()); DCHECK(context->IsModuleContext());
int slot = info.Data()->Int32Value();
Object* value = context->get(slot);
Isolate* isolate = instance->GetIsolate(); Isolate* isolate = instance->GetIsolate();
int slot = info.Data()
->Int32Value(info.GetIsolate()->GetCurrentContext())
.FromMaybe(-1);
if (slot < 0 || slot >= context->length()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
MessageTemplate::kNotDefined, name);
isolate->ScheduleThrow(*exception);
return;
}
Object* value = context->get(slot);
if (value->IsTheHole()) { if (value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property); Handle<String> name = v8::Utils::OpenHandle(*property);
@ -1459,9 +1452,18 @@ static void ModuleSetExport(
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder())); JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context()); Context* context = Context::cast(instance->context());
DCHECK(context->IsModuleContext()); DCHECK(context->IsModuleContext());
int slot = info.Data()->Int32Value(); Isolate* isolate = instance->GetIsolate();
int slot = info.Data()
->Int32Value(info.GetIsolate()->GetCurrentContext())
.FromMaybe(-1);
if (slot < 0 || slot >= context->length()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
MessageTemplate::kNotDefined, name);
isolate->ScheduleThrow(*exception);
return;
}
Object* old_value = context->get(slot); Object* old_value = context->get(slot);
Isolate* isolate = context->GetIsolate();
if (old_value->IsTheHole()) { if (old_value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property); Handle<String> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError( Handle<Object> exception = isolate->factory()->NewReferenceError(
@ -1493,4 +1495,5 @@ Handle<AccessorInfo> Accessors::MakeModuleExport(
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8

View File

@ -98,11 +98,6 @@ class Accessors : public AllStatic {
static Handle<ExecutableAccessorInfo> CloneAccessor( static Handle<ExecutableAccessorInfo> CloneAccessor(
Isolate* isolate, Isolate* isolate,
Handle<ExecutableAccessorInfo> accessor); Handle<ExecutableAccessorInfo> accessor);
private:
// Helper functions.
static Handle<Object> FlattenNumber(Isolate* isolate, Handle<Object> value);
}; };
} } // namespace v8::internal } } // namespace v8::internal

View File

@ -76,4 +76,5 @@ bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
return false; return false;
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8

View File

@ -337,4 +337,5 @@ void AllocationTracker::UnresolvedLocation::HandleWeakScript(
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8

View File

@ -108,4 +108,5 @@ void AlignedFree(void *ptr) {
#endif #endif
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8

View File

@ -7,6 +7,7 @@
#include "src/api.h" #include "src/api.h"
#include "src/isolate.h" #include "src/isolate.h"
#include "src/lookup.h" #include "src/lookup.h"
#include "src/messages.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -66,7 +67,7 @@ MaybeHandle<Object> DefineAccessorProperty(
MaybeHandle<Object> DefineDataProperty(Isolate* isolate, MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
Handle<JSObject> object, Handle<JSObject> object,
Handle<Name> key, Handle<Name> name,
Handle<Object> prop_data, Handle<Object> prop_data,
Smi* unchecked_attributes) { Smi* unchecked_attributes) {
DCHECK((unchecked_attributes->value() & DCHECK((unchecked_attributes->value() &
@ -77,35 +78,24 @@ MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
Handle<Object> value; Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(isolate, value, ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
Instantiate(isolate, prop_data, key), Object); Instantiate(isolate, prop_data, name), Object);
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
#ifdef DEBUG #ifdef DEBUG
bool duplicate; Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
if (key->IsName()) { DCHECK(maybe.IsJust());
LookupIterator it(object, Handle<Name>::cast(key), if (it.IsFound()) {
LookupIterator::OWN_SKIP_INTERCEPTOR); THROW_NEW_ERROR(
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it); isolate,
DCHECK(maybe.IsJust()); NewTypeError(MessageTemplate::kDuplicateTemplateProperty, name),
duplicate = it.IsFound(); Object);
} else {
uint32_t index = 0;
key->ToArrayIndex(&index);
Maybe<bool> maybe = JSReceiver::HasOwnElement(object, index);
if (!maybe.IsJust()) return MaybeHandle<Object>();
duplicate = maybe.FromJust();
}
if (duplicate) {
Handle<Object> args[1] = {key};
THROW_NEW_ERROR(isolate, NewTypeError("duplicate_template_property",
HandleVector(args, 1)),
Object);
} }
#endif #endif
RETURN_ON_EXCEPTION( return Object::AddDataProperty(&it, value, attributes, STRICT,
isolate, Runtime::DefineObjectProperty(object, key, value, attributes), Object::CERTAINLY_NOT_STORE_FROM_KEYED);
Object);
return object;
} }

896
deps/v8/src/api.cc vendored

File diff suppressed because it is too large Load Diff

15
deps/v8/src/api.h vendored
View File

@ -95,6 +95,7 @@ void NeanderObject::set(int offset, v8::internal::Object* value) {
template <typename T> inline T ToCData(v8::internal::Object* obj) { template <typename T> inline T ToCData(v8::internal::Object* obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address)); STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
if (obj == v8::internal::Smi::FromInt(0)) return nullptr;
return reinterpret_cast<T>( return reinterpret_cast<T>(
reinterpret_cast<intptr_t>( reinterpret_cast<intptr_t>(
v8::internal::Foreign::cast(obj)->foreign_address())); v8::internal::Foreign::cast(obj)->foreign_address()));
@ -105,6 +106,7 @@ template <typename T>
inline v8::internal::Handle<v8::internal::Object> FromCData( inline v8::internal::Handle<v8::internal::Object> FromCData(
v8::internal::Isolate* isolate, T obj) { v8::internal::Isolate* isolate, T obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address)); STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
if (obj == nullptr) return handle(v8::internal::Smi::FromInt(0), isolate);
return isolate->factory()->NewForeign( return isolate->factory()->NewForeign(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj))); reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
} }
@ -146,6 +148,8 @@ class RegisteredExtension {
V(RegExp, JSRegExp) \ V(RegExp, JSRegExp) \
V(Object, JSObject) \ V(Object, JSObject) \
V(Array, JSArray) \ V(Array, JSArray) \
V(Map, JSMap) \
V(Set, JSSet) \
V(ArrayBuffer, JSArrayBuffer) \ V(ArrayBuffer, JSArrayBuffer) \
V(ArrayBufferView, JSArrayBufferView) \ V(ArrayBufferView, JSArrayBufferView) \
V(TypedArray, JSTypedArray) \ V(TypedArray, JSTypedArray) \
@ -159,6 +163,7 @@ class RegisteredExtension {
V(Float32Array, JSTypedArray) \ V(Float32Array, JSTypedArray) \
V(Float64Array, JSTypedArray) \ V(Float64Array, JSTypedArray) \
V(DataView, JSDataView) \ V(DataView, JSDataView) \
V(SharedArrayBuffer, JSArrayBuffer) \
V(Name, Name) \ V(Name, Name) \
V(String, String) \ V(String, String) \
V(Symbol, Symbol) \ V(Symbol, Symbol) \
@ -202,6 +207,10 @@ class Utils {
v8::internal::Handle<v8::internal::JSObject> obj); v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Array> ToLocal( static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj); v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<Map> ToLocal(
v8::internal::Handle<v8::internal::JSMap> obj);
static inline Local<Set> ToLocal(
v8::internal::Handle<v8::internal::JSSet> obj);
static inline Local<ArrayBuffer> ToLocal( static inline Local<ArrayBuffer> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj); v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
static inline Local<ArrayBufferView> ToLocal( static inline Local<ArrayBufferView> ToLocal(
@ -230,6 +239,9 @@ class Utils {
static inline Local<Float64Array> ToLocalFloat64Array( static inline Local<Float64Array> ToLocalFloat64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj); v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<SharedArrayBuffer> ToLocalShared(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
static inline Local<Message> MessageToLocal( static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj); v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Promise> PromiseToLocal( static inline Local<Promise> PromiseToLocal(
@ -356,10 +368,13 @@ MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp) MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object) MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array) MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, JSMap, Map)
MAKE_TO_LOCAL(ToLocal, JSSet, Set)
MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer) MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView) MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
MAKE_TO_LOCAL(ToLocal, JSDataView, DataView) MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray) MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
MAKE_TO_LOCAL(ToLocalShared, JSArrayBuffer, SharedArrayBuffer)
TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY) TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)

View File

@ -102,4 +102,5 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8

View File

@ -118,10 +118,11 @@ Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT || rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE); || rmode_ == EXTERNAL_REFERENCE);
if (FLAG_enable_ool_constant_pool || if (FLAG_enable_embedded_constant_pool ||
Assembler::IsMovW(Memory::int32_at(pc_))) { Assembler::IsMovW(Memory::int32_at(pc_))) {
// We return the PC for ool constant pool since this function is used by the // We return the PC for embedded constant pool since this function is used
// serializer and expects the address to reside within the code object. // by the serializer and expects the address to reside within the code
// object.
return reinterpret_cast<Address>(pc_); return reinterpret_cast<Address>(pc_);
} else { } else {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_))); DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
@ -545,7 +546,7 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at( void Assembler::deserialization_set_special_target_at(
Address constant_pool_entry, Code* code, Address target) { Address constant_pool_entry, Code* code, Address target) {
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
set_target_address_at(constant_pool_entry, code, target); set_target_address_at(constant_pool_entry, code, target);
} else { } else {
Memory::Address_at(constant_pool_entry) = target; Memory::Address_at(constant_pool_entry) = target;
@ -562,21 +563,21 @@ void Assembler::deserialization_set_target_internal_reference_at(
bool Assembler::is_constant_pool_load(Address pc) { bool Assembler::is_constant_pool_load(Address pc) {
if (CpuFeatures::IsSupported(ARMv7)) { if (CpuFeatures::IsSupported(ARMv7)) {
return !Assembler::IsMovW(Memory::int32_at(pc)) || return !Assembler::IsMovW(Memory::int32_at(pc)) ||
(FLAG_enable_ool_constant_pool && (FLAG_enable_embedded_constant_pool &&
Assembler::IsLdrPpRegOffset( Assembler::IsLdrPpRegOffset(
Memory::int32_at(pc + 2 * Assembler::kInstrSize))); Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
} else { } else {
return !Assembler::IsMovImmed(Memory::int32_at(pc)) || return !Assembler::IsMovImmed(Memory::int32_at(pc)) ||
(FLAG_enable_ool_constant_pool && (FLAG_enable_embedded_constant_pool &&
Assembler::IsLdrPpRegOffset( Assembler::IsLdrPpRegOffset(
Memory::int32_at(pc + 4 * Assembler::kInstrSize))); Memory::int32_at(pc + 4 * Assembler::kInstrSize)));
} }
} }
Address Assembler::constant_pool_entry_address( Address Assembler::constant_pool_entry_address(Address pc,
Address pc, ConstantPoolArray* constant_pool) { Address constant_pool) {
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
DCHECK(constant_pool != NULL); DCHECK(constant_pool != NULL);
int cp_offset; int cp_offset;
if (!CpuFeatures::IsSupported(ARMv7) && IsMovImmed(Memory::int32_at(pc))) { if (!CpuFeatures::IsSupported(ARMv7) && IsMovImmed(Memory::int32_at(pc))) {
@ -604,7 +605,7 @@ Address Assembler::constant_pool_entry_address(
DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc))); DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc)));
cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc)); cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc));
} }
return reinterpret_cast<Address>(constant_pool) + cp_offset; return constant_pool + cp_offset;
} else { } else {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc))); DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Instr instr = Memory::int32_at(pc); Instr instr = Memory::int32_at(pc);
@ -613,8 +614,7 @@ Address Assembler::constant_pool_entry_address(
} }
Address Assembler::target_address_at(Address pc, Address Assembler::target_address_at(Address pc, Address constant_pool) {
ConstantPoolArray* constant_pool) {
if (is_constant_pool_load(pc)) { if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Return the value in the constant pool. // This is a constant pool lookup. Return the value in the constant pool.
return Memory::Address_at(constant_pool_entry_address(pc, constant_pool)); return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
@ -645,8 +645,7 @@ Address Assembler::target_address_at(Address pc,
} }
void Assembler::set_target_address_at(Address pc, void Assembler::set_target_address_at(Address pc, Address constant_pool,
ConstantPoolArray* constant_pool,
Address target, Address target,
ICacheFlushMode icache_flush_mode) { ICacheFlushMode icache_flush_mode) {
if (is_constant_pool_load(pc)) { if (is_constant_pool_load(pc)) {

View File

@ -234,9 +234,9 @@ const int RelocInfo::kApplyMask = 0;
bool RelocInfo::IsCodedSpecially() { bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded.  Being // The deserializer needs to know whether a pointer is specially coded.  Being
// specially coded on ARM means that it is a movw/movt instruction, or is an // specially coded on ARM means that it is a movw/movt instruction, or is an
// out of line constant pool entry.  These only occur if // embedded constant pool entry.  These only occur if
// FLAG_enable_ool_constant_pool is true. // FLAG_enable_embedded_constant_pool is true.
return FLAG_enable_ool_constant_pool; return FLAG_enable_embedded_constant_pool;
} }
@ -449,11 +449,11 @@ const Instr kLdrStrInstrTypeMask = 0xffff0000;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size), : AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()), recorded_ast_id_(TypeFeedbackId::None()),
constant_pool_builder_(), constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits),
positions_recorder_(this) { positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
num_pending_32_bit_reloc_info_ = 0; num_pending_32_bit_constants_ = 0;
num_pending_64_bit_reloc_info_ = 0; num_pending_64_bit_constants_ = 0;
next_buffer_check_ = 0; next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0; const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0; no_const_pool_before_ = 0;
@ -471,23 +471,30 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) { void Assembler::GetCode(CodeDesc* desc) {
reloc_info_writer.Finish(); reloc_info_writer.Finish();
if (!FLAG_enable_ool_constant_pool) {
// Emit constant pool if necessary. // Emit constant pool if necessary.
int constant_pool_offset = 0;
if (FLAG_enable_embedded_constant_pool) {
constant_pool_offset = EmitEmbeddedConstantPool();
} else {
CheckConstPool(true, false); CheckConstPool(true, false);
DCHECK(num_pending_32_bit_reloc_info_ == 0); DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0); DCHECK(num_pending_64_bit_constants_ == 0);
} }
// Set up code descriptor. // Set up code descriptor.
desc->buffer = buffer_; desc->buffer = buffer_;
desc->buffer_size = buffer_size_; desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset(); desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->constant_pool_size =
(constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
desc->origin = this; desc->origin = this;
} }
void Assembler::Align(int m) { void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m)); DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
while ((pc_offset() & (m - 1)) != 0) { while ((pc_offset() & (m - 1)) != 0) {
nop(); nop();
} }
@ -623,7 +630,7 @@ Register Assembler::GetRm(Instr instr) {
Instr Assembler::GetConsantPoolLoadPattern() { Instr Assembler::GetConsantPoolLoadPattern() {
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
return kLdrPpImmedPattern; return kLdrPpImmedPattern;
} else { } else {
return kLdrPCImmedPattern; return kLdrPCImmedPattern;
@ -632,7 +639,7 @@ Instr Assembler::GetConsantPoolLoadPattern() {
Instr Assembler::GetConsantPoolLoadMask() { Instr Assembler::GetConsantPoolLoadMask() {
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
return kLdrPpImmedMask; return kLdrPpImmedMask;
} else { } else {
return kLdrPCImmedMask; return kLdrPCImmedMask;
@ -1044,8 +1051,8 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
static bool use_mov_immediate_load(const Operand& x, static bool use_mov_immediate_load(const Operand& x,
const Assembler* assembler) { const Assembler* assembler) {
if (FLAG_enable_ool_constant_pool && assembler != NULL && if (FLAG_enable_embedded_constant_pool && assembler != NULL &&
!assembler->is_ool_constant_pool_available()) { !assembler->is_constant_pool_available()) {
return true; return true;
} else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size())) { (assembler == NULL || !assembler->predictable_code_size())) {
@ -1074,8 +1081,9 @@ int Operand::instructions_required(const Assembler* assembler,
if (use_mov_immediate_load(*this, assembler)) { if (use_mov_immediate_load(*this, assembler)) {
// A movw / movt or mov / orr immediate load. // A movw / movt or mov / orr immediate load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4; instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
} else if (assembler != NULL && assembler->use_extended_constant_pool()) { } else if (assembler != NULL &&
// An extended constant pool load. assembler->ConstantPoolAccessIsInOverflow()) {
// An overflowed constant pool load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5; instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
} else { } else {
// A small constant pool load. // A small constant pool load.
@ -1100,23 +1108,23 @@ int Operand::instructions_required(const Assembler* assembler,
void Assembler::move_32_bit_immediate(Register rd, void Assembler::move_32_bit_immediate(Register rd,
const Operand& x, const Operand& x,
Condition cond) { Condition cond) {
RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
uint32_t imm32 = static_cast<uint32_t>(x.imm32_); uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
if (x.must_output_reloc_info(this)) { if (x.must_output_reloc_info(this)) {
RecordRelocInfo(rinfo); RecordRelocInfo(x.rmode_);
} }
if (use_mov_immediate_load(x, this)) { if (use_mov_immediate_load(x, this)) {
Register target = rd.code() == pc.code() ? ip : rd; Register target = rd.code() == pc.code() ? ip : rd;
if (CpuFeatures::IsSupported(ARMv7)) { if (CpuFeatures::IsSupported(ARMv7)) {
if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) { if (!FLAG_enable_embedded_constant_pool &&
x.must_output_reloc_info(this)) {
// Make sure the movw/movt doesn't get separated. // Make sure the movw/movt doesn't get separated.
BlockConstPoolFor(2); BlockConstPoolFor(2);
} }
movw(target, imm32 & 0xffff, cond); movw(target, imm32 & 0xffff, cond);
movt(target, imm32 >> 16, cond); movt(target, imm32 >> 16, cond);
} else { } else {
DCHECK(FLAG_enable_ool_constant_pool); DCHECK(FLAG_enable_embedded_constant_pool);
mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond); mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond); orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond); orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
@ -1126,10 +1134,11 @@ void Assembler::move_32_bit_immediate(Register rd,
mov(rd, target, LeaveCC, cond); mov(rd, target, LeaveCC, cond);
} }
} else { } else {
DCHECK(!FLAG_enable_ool_constant_pool || is_ool_constant_pool_available()); DCHECK(!FLAG_enable_embedded_constant_pool || is_constant_pool_available());
ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo); ConstantPoolEntry::Access access =
if (section == ConstantPoolArray::EXTENDED_SECTION) { ConstantPoolAddEntry(pc_offset(), x.rmode_, x.imm32_);
DCHECK(FLAG_enable_ool_constant_pool); if (access == ConstantPoolEntry::OVERFLOWED) {
DCHECK(FLAG_enable_embedded_constant_pool);
Register target = rd.code() == pc.code() ? ip : rd; Register target = rd.code() == pc.code() ? ip : rd;
// Emit instructions to load constant pool offset. // Emit instructions to load constant pool offset.
if (CpuFeatures::IsSupported(ARMv7)) { if (CpuFeatures::IsSupported(ARMv7)) {
@ -1144,8 +1153,9 @@ void Assembler::move_32_bit_immediate(Register rd,
// Load from constant pool at offset. // Load from constant pool at offset.
ldr(rd, MemOperand(pp, target), cond); ldr(rd, MemOperand(pp, target), cond);
} else { } else {
DCHECK(section == ConstantPoolArray::SMALL_SECTION); DCHECK(access == ConstantPoolEntry::REGULAR);
ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond); ldr(rd, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0),
cond);
} }
} }
} }
@ -2554,7 +2564,7 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc); emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
} else if (FLAG_enable_vldr_imm && is_ool_constant_pool_available()) { } else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
// TODO(jfb) Temporarily turned off until we have constant blinding or // TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control // some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad // generated data which also happens to be executable, a Very Bad
@ -2570,18 +2580,17 @@ void Assembler::vmov(const DwVfpRegister dst,
// The code could also randomize the order of values, though // The code could also randomize the order of values, though
// that's tricky because vldr has a limited reach. Furthermore // that's tricky because vldr has a limited reach. Furthermore
// it breaks load locality. // it breaks load locality.
RelocInfo rinfo(pc_, imm); ConstantPoolEntry::Access access = ConstantPoolAddEntry(pc_offset(), imm);
ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo); if (access == ConstantPoolEntry::OVERFLOWED) {
if (section == ConstantPoolArray::EXTENDED_SECTION) { DCHECK(FLAG_enable_embedded_constant_pool);
DCHECK(FLAG_enable_ool_constant_pool);
// Emit instructions to load constant pool offset. // Emit instructions to load constant pool offset.
movw(ip, 0); movw(ip, 0);
movt(ip, 0); movt(ip, 0);
// Load from constant pool at offset. // Load from constant pool at offset.
vldr(dst, MemOperand(pp, ip)); vldr(dst, MemOperand(pp, ip));
} else { } else {
DCHECK(section == ConstantPoolArray::SMALL_SECTION); DCHECK(access == ConstantPoolEntry::REGULAR);
vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0)); vldr(dst, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0));
} }
} else { } else {
// Synthesise the double from ARM immediates. // Synthesise the double from ARM immediates.
@ -2596,7 +2605,8 @@ void Assembler::vmov(const DwVfpRegister dst,
} else if (scratch.is(no_reg)) { } else if (scratch.is(no_reg)) {
mov(ip, Operand(lo)); mov(ip, Operand(lo));
vmov(dst, VmovIndexLo, ip); vmov(dst, VmovIndexLo, ip);
if ((lo & 0xffff) == (hi & 0xffff)) { if (((lo & 0xffff) == (hi & 0xffff)) &&
CpuFeatures::IsSupported(ARMv7)) {
movt(ip, hi >> 16); movt(ip, hi >> 16);
} else { } else {
mov(ip, Operand(hi)); mov(ip, Operand(hi));
@ -3555,22 +3565,6 @@ void Assembler::GrowBuffer() {
// None of our relocation types are pc relative pointing outside the code // None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need // buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries. // to relocate any emitted relocation entries.
// Relocate pending relocation entries.
for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION);
if (rinfo.rmode() != RelocInfo::JS_RETURN) {
rinfo.set_pc(rinfo.pc() + pc_delta);
}
}
for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
DCHECK(rinfo.rmode() == RelocInfo::NONE64);
rinfo.set_pc(rinfo.pc() + pc_delta);
}
constant_pool_builder_.Relocate(pc_delta);
} }
@ -3578,8 +3572,8 @@ void Assembler::db(uint8_t data) {
// No relocation info should be pending while using db. db is used // No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should // to write pure data with no pointers and the constant pool should
// be emitted before using db. // be emitted before using db.
DCHECK(num_pending_32_bit_reloc_info_ == 0); DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0); DCHECK(num_pending_64_bit_constants_ == 0);
CheckBuffer(); CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data; *reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t); pc_ += sizeof(uint8_t);
@ -3590,14 +3584,26 @@ void Assembler::dd(uint32_t data) {
// No relocation info should be pending while using dd. dd is used // No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should // to write pure data with no pointers and the constant pool should
// be emitted before using dd. // be emitted before using dd.
DCHECK(num_pending_32_bit_reloc_info_ == 0); DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0); DCHECK(num_pending_64_bit_constants_ == 0);
CheckBuffer(); CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data; *reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t); pc_ += sizeof(uint32_t);
} }
void Assembler::dq(uint64_t value) {
// No relocation info should be pending while using dq. dq is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
CheckBuffer();
*reinterpret_cast<uint64_t*>(pc_) = value;
pc_ += sizeof(uint64_t);
}
void Assembler::emit_code_stub_address(Code* stub) { void Assembler::emit_code_stub_address(Code* stub) {
CheckBuffer(); CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = *reinterpret_cast<uint32_t*>(pc_) =
@ -3607,64 +3613,73 @@ void Assembler::emit_code_stub_address(Code* stub) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (RelocInfo::IsNone(rmode) ||
// Don't record external references unless the heap will be serialized.
(rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
!emit_debug_code())) {
return;
}
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
data = RecordedAstId().ToInt();
ClearRecordedAstId();
}
RelocInfo rinfo(pc_, rmode, data, NULL); RelocInfo rinfo(pc_, rmode, data, NULL);
RecordRelocInfo(rinfo); reloc_info_writer.Write(&rinfo);
} }
void Assembler::RecordRelocInfo(const RelocInfo& rinfo) { ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
if (!RelocInfo::IsNone(rinfo.rmode())) { RelocInfo::Mode rmode,
// Don't record external references unless the heap will be serialized. intptr_t value) {
if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE && DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION &&
!serializer_enabled() && !emit_debug_code()) { rmode != RelocInfo::STATEMENT_POSITION &&
return; rmode != RelocInfo::CONST_POOL && rmode != RelocInfo::NONE64);
} bool sharing_ok = RelocInfo::IsNone(rmode) ||
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here !(serializer_enabled() || rmode < RelocInfo::CELL);
if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) { if (FLAG_enable_embedded_constant_pool) {
RelocInfo reloc_info_with_ast_id(rinfo.pc(), return constant_pool_builder_.AddEntry(position, value, sharing_ok);
rinfo.rmode(), } else {
RecordedAstId().ToInt(), DCHECK(num_pending_32_bit_constants_ < kMaxNumPending32Constants);
NULL); if (num_pending_32_bit_constants_ == 0) {
ClearRecordedAstId(); first_const_pool_32_use_ = position;
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
reloc_info_writer.Write(&rinfo);
} }
ConstantPoolEntry entry(position, value, sharing_ok);
pending_32_bit_constants_[num_pending_32_bit_constants_++] = entry;
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
return ConstantPoolEntry::REGULAR;
} }
} }
ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry( ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
const RelocInfo& rinfo) { double value) {
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
return constant_pool_builder_.AddEntry(this, rinfo); return constant_pool_builder_.AddEntry(position, value);
} else { } else {
if (rinfo.rmode() == RelocInfo::NONE64) { DCHECK(num_pending_64_bit_constants_ < kMaxNumPending64Constants);
DCHECK(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo); if (num_pending_64_bit_constants_ == 0) {
if (num_pending_64_bit_reloc_info_ == 0) { first_const_pool_64_use_ = position;
first_const_pool_64_use_ = pc_offset();
}
pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
} else {
DCHECK(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
if (num_pending_32_bit_reloc_info_ == 0) {
first_const_pool_32_use_ = pc_offset();
}
pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
} }
ConstantPoolEntry entry(position, value);
pending_64_bit_constants_[num_pending_64_bit_constants_++] = entry;
// Make sure the constant pool is not emitted in place of the next // Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info. // instruction for which we just recorded relocation info.
BlockConstPoolFor(1); BlockConstPoolFor(1);
return ConstantPoolArray::SMALL_SECTION; return ConstantPoolEntry::REGULAR;
} }
} }
void Assembler::BlockConstPoolFor(int instructions) { void Assembler::BlockConstPoolFor(int instructions) {
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
// Should be a no-op if using an out-of-line constant pool. // Should be a no-op if using an embedded constant pool.
DCHECK(num_pending_32_bit_reloc_info_ == 0); DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0); DCHECK(num_pending_64_bit_constants_ == 0);
return; return;
} }
@ -3673,10 +3688,11 @@ void Assembler::BlockConstPoolFor(int instructions) {
// Max pool start (if we need a jump and an alignment). // Max pool start (if we need a jump and an alignment).
#ifdef DEBUG #ifdef DEBUG
int start = pc_limit + kInstrSize + 2 * kPointerSize; int start = pc_limit + kInstrSize + 2 * kPointerSize;
DCHECK((num_pending_32_bit_reloc_info_ == 0) || DCHECK((num_pending_32_bit_constants_ == 0) ||
(start - first_const_pool_32_use_ + (start - first_const_pool_32_use_ +
num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool)); num_pending_64_bit_constants_ * kDoubleSize <
DCHECK((num_pending_64_bit_reloc_info_ == 0) || kMaxDistToIntPool));
DCHECK((num_pending_64_bit_constants_ == 0) ||
(start - first_const_pool_64_use_ < kMaxDistToFPPool)); (start - first_const_pool_64_use_ < kMaxDistToFPPool));
#endif #endif
no_const_pool_before_ = pc_limit; no_const_pool_before_ = pc_limit;
@ -3689,10 +3705,10 @@ void Assembler::BlockConstPoolFor(int instructions) {
void Assembler::CheckConstPool(bool force_emit, bool require_jump) { void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
// Should be a no-op if using an out-of-line constant pool. // Should be a no-op if using an embedded constant pool.
DCHECK(num_pending_32_bit_reloc_info_ == 0); DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0); DCHECK(num_pending_64_bit_constants_ == 0);
return; return;
} }
@ -3706,8 +3722,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
} }
// There is nothing to do if there are no pending constant pool entries. // There is nothing to do if there are no pending constant pool entries.
if ((num_pending_32_bit_reloc_info_ == 0) && if ((num_pending_32_bit_constants_ == 0) &&
(num_pending_64_bit_reloc_info_ == 0)) { (num_pending_64_bit_constants_ == 0)) {
// Calculate the offset of the next check. // Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval; next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return; return;
@ -3718,15 +3734,15 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// the gap to the relocation information). // the gap to the relocation information).
int jump_instr = require_jump ? kInstrSize : 0; int jump_instr = require_jump ? kInstrSize : 0;
int size_up_to_marker = jump_instr + kInstrSize; int size_up_to_marker = jump_instr + kInstrSize;
int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize; int size_after_marker = num_pending_32_bit_constants_ * kPointerSize;
bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0); bool has_fp_values = (num_pending_64_bit_constants_ > 0);
bool require_64_bit_align = false; bool require_64_bit_align = false;
if (has_fp_values) { if (has_fp_values) {
require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7); require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
if (require_64_bit_align) { if (require_64_bit_align) {
size_after_marker += kInstrSize; size_after_marker += kInstrSize;
} }
size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize; size_after_marker += num_pending_64_bit_constants_ * kDoubleSize;
} }
int size = size_up_to_marker + size_after_marker; int size = size_up_to_marker + size_after_marker;
@ -3743,9 +3759,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0)); DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
bool need_emit = false; bool need_emit = false;
if (has_fp_values) { if (has_fp_values) {
int dist64 = pc_offset() + int dist64 = pc_offset() + size -
size - num_pending_32_bit_constants_ * kPointerSize -
num_pending_32_bit_reloc_info_ * kPointerSize -
first_const_pool_64_use_; first_const_pool_64_use_;
if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) || if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
(!require_jump && (dist64 >= kMaxDistToFPPool / 2))) { (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
@ -3787,60 +3802,52 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Emit 64-bit constant pool entries first: their range is smaller than // Emit 64-bit constant pool entries first: their range is smaller than
// 32-bit entries. // 32-bit entries.
for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) { for (int i = 0; i < num_pending_64_bit_constants_; i++) {
RelocInfo& rinfo = pending_64_bit_reloc_info_[i]; ConstantPoolEntry& entry = pending_64_bit_constants_[i];
DCHECK(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment. DCHECK(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment.
Instr instr = instr_at(rinfo.pc()); Instr instr = instr_at(entry.position());
// Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0. // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
DCHECK((IsVldrDPcImmediateOffset(instr) && DCHECK((IsVldrDPcImmediateOffset(instr) &&
GetVldrDRegisterImmediateOffset(instr) == 0)); GetVldrDRegisterImmediateOffset(instr) == 0));
int delta = pc_ - rinfo.pc() - kPcLoadDelta; int delta = pc_offset() - entry.position() - kPcLoadDelta;
DCHECK(is_uint10(delta)); DCHECK(is_uint10(delta));
bool found = false; bool found = false;
uint64_t value = rinfo.raw_data64(); uint64_t value = entry.value64();
for (int j = 0; j < i; j++) { for (int j = 0; j < i; j++) {
RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j]; ConstantPoolEntry& entry2 = pending_64_bit_constants_[j];
if (value == rinfo2.raw_data64()) { if (value == entry2.value64()) {
found = true; found = true;
DCHECK(rinfo2.rmode() == RelocInfo::NONE64); Instr instr2 = instr_at(entry2.position());
Instr instr2 = instr_at(rinfo2.pc());
DCHECK(IsVldrDPcImmediateOffset(instr2)); DCHECK(IsVldrDPcImmediateOffset(instr2));
delta = GetVldrDRegisterImmediateOffset(instr2); delta = GetVldrDRegisterImmediateOffset(instr2);
delta += rinfo2.pc() - rinfo.pc(); delta += entry2.position() - entry.position();
break; break;
} }
} }
instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta)); instr_at_put(entry.position(),
SetVldrDRegisterImmediateOffset(instr, delta));
if (!found) { if (!found) {
uint64_t uint_data = rinfo.raw_data64(); dq(entry.value64());
emit(uint_data & 0xFFFFFFFF);
emit(uint_data >> 32);
} }
} }
// Emit 32-bit constant pool entries. // Emit 32-bit constant pool entries.
for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) { for (int i = 0; i < num_pending_32_bit_constants_; i++) {
RelocInfo& rinfo = pending_32_bit_reloc_info_[i]; ConstantPoolEntry& entry = pending_32_bit_constants_[i];
DCHECK(rinfo.rmode() != RelocInfo::COMMENT && Instr instr = instr_at(entry.position());
rinfo.rmode() != RelocInfo::POSITION &&
rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
rinfo.rmode() != RelocInfo::CONST_POOL &&
rinfo.rmode() != RelocInfo::NONE64);
Instr instr = instr_at(rinfo.pc());
// 64-bit loads shouldn't get here. // 64-bit loads shouldn't get here.
DCHECK(!IsVldrDPcImmediateOffset(instr)); DCHECK(!IsVldrDPcImmediateOffset(instr));
if (IsLdrPcImmediateOffset(instr) && if (IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0) { GetLdrRegisterImmediateOffset(instr) == 0) {
int delta = pc_ - rinfo.pc() - kPcLoadDelta; int delta = pc_offset() - entry.position() - kPcLoadDelta;
DCHECK(is_uint12(delta)); DCHECK(is_uint12(delta));
// 0 is the smallest delta: // 0 is the smallest delta:
// ldr rd, [pc, #0] // ldr rd, [pc, #0]
@ -3848,16 +3855,15 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// data // data
bool found = false; bool found = false;
if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) { if (entry.sharing_ok()) {
for (int j = 0; j < i; j++) { for (int j = 0; j < i; j++) {
RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j]; ConstantPoolEntry& entry2 = pending_32_bit_constants_[j];
if ((rinfo2.data() == rinfo.data()) && if (entry2.value() == entry.value()) {
(rinfo2.rmode() == rinfo.rmode())) { Instr instr2 = instr_at(entry2.position());
Instr instr2 = instr_at(rinfo2.pc());
if (IsLdrPcImmediateOffset(instr2)) { if (IsLdrPcImmediateOffset(instr2)) {
delta = GetLdrRegisterImmediateOffset(instr2); delta = GetLdrRegisterImmediateOffset(instr2);
delta += rinfo2.pc() - rinfo.pc(); delta += entry2.position() - entry.position();
found = true; found = true;
break; break;
} }
@ -3865,18 +3871,19 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
} }
} }
instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta)); instr_at_put(entry.position(),
SetLdrRegisterImmediateOffset(instr, delta));
if (!found) { if (!found) {
emit(rinfo.data()); emit(entry.value());
} }
} else { } else {
DCHECK(IsMovW(instr)); DCHECK(IsMovW(instr));
} }
} }
num_pending_32_bit_reloc_info_ = 0; num_pending_32_bit_constants_ = 0;
num_pending_64_bit_reloc_info_ = 0; num_pending_64_bit_constants_ = 0;
first_const_pool_32_use_ = -1; first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1; first_const_pool_64_use_ = -1;
@ -3893,229 +3900,61 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
} }
Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { void Assembler::PatchConstantPoolAccessInstruction(
if (!FLAG_enable_ool_constant_pool) { int pc_offset, int offset, ConstantPoolEntry::Access access,
return isolate->factory()->empty_constant_pool_array(); ConstantPoolEntry::Type type) {
} DCHECK(FLAG_enable_embedded_constant_pool);
return constant_pool_builder_.New(isolate); Address pc = buffer_ + pc_offset;
}
// Patch vldr/ldr instruction with correct offset.
void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { Instr instr = instr_at(pc);
constant_pool_builder_.Populate(this, constant_pool); if (access == ConstantPoolEntry::OVERFLOWED) {
} if (CpuFeatures::IsSupported(ARMv7)) {
// Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
Instr next_instr = instr_at(pc + kInstrSize);
ConstantPoolBuilder::ConstantPoolBuilder() DCHECK((IsMovW(instr) && Instruction::ImmedMovwMovtValue(instr) == 0));
: entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {} DCHECK((IsMovT(next_instr) &&
Instruction::ImmedMovwMovtValue(next_instr) == 0));
instr_at_put(pc, PatchMovwImmediate(instr, offset & 0xffff));
bool ConstantPoolBuilder::IsEmpty() { instr_at_put(pc + kInstrSize,
return entries_.size() == 0; PatchMovwImmediate(next_instr, offset >> 16));
}
ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
RelocInfo::Mode rmode) {
if (rmode == RelocInfo::NONE64) {
return ConstantPoolArray::INT64;
} else if (!RelocInfo::IsGCRelocMode(rmode)) {
return ConstantPoolArray::INT32;
} else if (RelocInfo::IsCodeTarget(rmode)) {
return ConstantPoolArray::CODE_PTR;
} else {
DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
return ConstantPoolArray::HEAP_PTR;
}
}
ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
Assembler* assm, const RelocInfo& rinfo) {
RelocInfo::Mode rmode = rinfo.rmode();
DCHECK(rmode != RelocInfo::COMMENT &&
rmode != RelocInfo::POSITION &&
rmode != RelocInfo::STATEMENT_POSITION &&
rmode != RelocInfo::CONST_POOL);
// Try to merge entries which won't be patched.
int merged_index = -1;
ConstantPoolArray::LayoutSection entry_section = current_section_;
if (RelocInfo::IsNone(rmode) ||
(!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
size_t i;
std::vector<ConstantPoolEntry>::const_iterator it;
for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
// Merge with found entry.
merged_index = i;
entry_section = entries_[i].section_;
break;
}
}
}
DCHECK(entry_section <= current_section_);
entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
if (merged_index == -1) {
// Not merged, so update the appropriate count.
number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
}
// Check if we still have room for another entry in the small section
// given Arm's ldr and vldr immediate offset range.
if (current_section_ == ConstantPoolArray::SMALL_SECTION &&
!(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) &&
is_uint10(ConstantPoolArray::MaxInt64Offset(
small_entries()->count_of(ConstantPoolArray::INT64))))) {
current_section_ = ConstantPoolArray::EXTENDED_SECTION;
}
return entry_section;
}
void ConstantPoolBuilder::Relocate(int pc_delta) {
for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
entry != entries_.end(); entry++) {
DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
}
}
Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
if (IsEmpty()) {
return isolate->factory()->empty_constant_pool_array();
} else if (extended_entries()->is_empty()) {
return isolate->factory()->NewConstantPoolArray(*small_entries());
} else {
DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
return isolate->factory()->NewExtendedConstantPoolArray(
*small_entries(), *extended_entries());
}
}
void ConstantPoolBuilder::Populate(Assembler* assm,
ConstantPoolArray* constant_pool) {
DCHECK_EQ(extended_entries()->is_empty(),
!constant_pool->is_extended_layout());
DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
constant_pool, ConstantPoolArray::SMALL_SECTION)));
if (constant_pool->is_extended_layout()) {
DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
}
// Set up initial offsets.
int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
[ConstantPoolArray::NUMBER_OF_TYPES];
for (int section = 0; section <= constant_pool->final_section(); section++) {
int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
? small_entries()->total_count()
: 0;
for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
if (number_of_entries_[section].count_of(type) != 0) {
offsets[section][type] = constant_pool->OffsetOfElementAt(
number_of_entries_[section].base_of(type) + section_start);
}
}
}
for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
entry != entries_.end(); entry++) {
RelocInfo rinfo = entry->rinfo_;
RelocInfo::Mode rmode = entry->rinfo_.rmode();
ConstantPoolArray::Type type = GetConstantPoolType(rmode);
// Update constant pool if necessary and get the entry's offset.
int offset;
if (entry->merged_index_ == -1) {
offset = offsets[entry->section_][type];
offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
if (type == ConstantPoolArray::INT64) {
constant_pool->set_at_offset(offset, rinfo.data64());
} else if (type == ConstantPoolArray::INT32) {
constant_pool->set_at_offset(offset,
static_cast<int32_t>(rinfo.data()));
} else if (type == ConstantPoolArray::CODE_PTR) {
constant_pool->set_at_offset(offset,
reinterpret_cast<Address>(rinfo.data()));
} else {
DCHECK(type == ConstantPoolArray::HEAP_PTR);
constant_pool->set_at_offset(offset,
reinterpret_cast<Object*>(rinfo.data()));
}
offset -= kHeapObjectTag;
entry->merged_index_ = offset; // Stash offset for merged entries.
} else { } else {
DCHECK(entry->merged_index_ < (entry - entries_.begin())); // Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
offset = entries_[entry->merged_index_].merged_index_; Instr instr_2 = instr_at(pc + kInstrSize);
} Instr instr_3 = instr_at(pc + 2 * kInstrSize);
Instr instr_4 = instr_at(pc + 3 * kInstrSize);
// Patch vldr/ldr instruction with correct offset. DCHECK((IsMovImmed(instr) && Instruction::Immed8Value(instr) == 0));
Instr instr = assm->instr_at(rinfo.pc()); DCHECK((IsOrrImmed(instr_2) && Instruction::Immed8Value(instr_2) == 0) &&
if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) { GetRn(instr_2).is(GetRd(instr_2)));
if (CpuFeatures::IsSupported(ARMv7)) { DCHECK((IsOrrImmed(instr_3) && Instruction::Immed8Value(instr_3) == 0) &&
// Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0]. GetRn(instr_3).is(GetRd(instr_3)));
Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize); DCHECK((IsOrrImmed(instr_4) && Instruction::Immed8Value(instr_4) == 0) &&
DCHECK((Assembler::IsMovW(instr) && GetRn(instr_4).is(GetRd(instr_4)));
Instruction::ImmedMovwMovtValue(instr) == 0)); instr_at_put(pc, PatchShiftImm(instr, (offset & kImm8Mask)));
DCHECK((Assembler::IsMovT(next_instr) && instr_at_put(pc + kInstrSize,
Instruction::ImmedMovwMovtValue(next_instr) == 0)); PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
assm->instr_at_put( instr_at_put(pc + 2 * kInstrSize,
rinfo.pc(), Assembler::PatchMovwImmediate(instr, offset & 0xffff)); PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
assm->instr_at_put( instr_at_put(pc + 3 * kInstrSize,
rinfo.pc() + Assembler::kInstrSize, PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
Assembler::PatchMovwImmediate(next_instr, offset >> 16));
} else {
// Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
Instr instr_2 = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
Instr instr_3 = assm->instr_at(rinfo.pc() + 2 * Assembler::kInstrSize);
Instr instr_4 = assm->instr_at(rinfo.pc() + 3 * Assembler::kInstrSize);
DCHECK((Assembler::IsMovImmed(instr) &&
Instruction::Immed8Value(instr) == 0));
DCHECK((Assembler::IsOrrImmed(instr_2) &&
Instruction::Immed8Value(instr_2) == 0) &&
Assembler::GetRn(instr_2).is(Assembler::GetRd(instr_2)));
DCHECK((Assembler::IsOrrImmed(instr_3) &&
Instruction::Immed8Value(instr_3) == 0) &&
Assembler::GetRn(instr_3).is(Assembler::GetRd(instr_3)));
DCHECK((Assembler::IsOrrImmed(instr_4) &&
Instruction::Immed8Value(instr_4) == 0) &&
Assembler::GetRn(instr_4).is(Assembler::GetRd(instr_4)));
assm->instr_at_put(
rinfo.pc(), Assembler::PatchShiftImm(instr, (offset & kImm8Mask)));
assm->instr_at_put(
rinfo.pc() + Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
assm->instr_at_put(
rinfo.pc() + 2 * Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
assm->instr_at_put(
rinfo.pc() + 3 * Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
}
} else if (type == ConstantPoolArray::INT64) {
// Instruction to patch must be 'vldr rd, [pp, #0]'.
DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) &&
Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
DCHECK(is_uint10(offset));
assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset(
instr, offset));
} else {
// Instruction to patch must be 'ldr rd, [pp, #0]'.
DCHECK((Assembler::IsLdrPpImmediateOffset(instr) &&
Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
DCHECK(is_uint12(offset));
assm->instr_at_put(
rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset));
} }
} else if (type == ConstantPoolEntry::DOUBLE) {
// Instruction to patch must be 'vldr rd, [pp, #0]'.
DCHECK((IsVldrDPpImmediateOffset(instr) &&
GetVldrDRegisterImmediateOffset(instr) == 0));
DCHECK(is_uint10(offset));
instr_at_put(pc, SetVldrDRegisterImmediateOffset(instr, offset));
} else {
// Instruction to patch must be 'ldr rd, [pp, #0]'.
DCHECK((IsLdrPpImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0));
DCHECK(is_uint12(offset));
instr_at_put(pc, SetLdrRegisterImmediateOffset(instr, offset));
} }
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

View File

@ -94,7 +94,7 @@ const int kRegister_pc_Code = 15;
struct Register { struct Register {
static const int kNumRegisters = 16; static const int kNumRegisters = 16;
static const int kMaxNumAllocatableRegisters = static const int kMaxNumAllocatableRegisters =
FLAG_enable_ool_constant_pool ? 8 : 9; FLAG_enable_embedded_constant_pool ? 8 : 9;
static const int kSizeInBytes = 4; static const int kSizeInBytes = 4;
inline static int NumAllocatableRegisters(); inline static int NumAllocatableRegisters();
@ -122,7 +122,7 @@ struct Register {
"r7", "r7",
"r8", "r8",
}; };
if (FLAG_enable_ool_constant_pool && (index >= 7)) { if (FLAG_enable_embedded_constant_pool && (index >= 7)) {
return names[index + 1]; return names[index + 1];
} }
return names[index]; return names[index];
@ -164,7 +164,7 @@ const Register r5 = { kRegister_r5_Code };
const Register r6 = { kRegister_r6_Code }; const Register r6 = { kRegister_r6_Code };
// Used as context register. // Used as context register.
const Register r7 = {kRegister_r7_Code}; const Register r7 = {kRegister_r7_Code};
// Used as constant pool pointer register if FLAG_enable_ool_constant_pool. // Used as constant pool pointer register if FLAG_enable_embedded_constant_pool.
const Register r8 = { kRegister_r8_Code }; const Register r8 = { kRegister_r8_Code };
// Used as lithium codegen scratch register. // Used as lithium codegen scratch register.
const Register r9 = { kRegister_r9_Code }; const Register r9 = { kRegister_r9_Code };
@ -651,52 +651,6 @@ class NeonListOperand BASE_EMBEDDED {
}; };
// Class used to build a constant pool.
class ConstantPoolBuilder BASE_EMBEDDED {
public:
ConstantPoolBuilder();
ConstantPoolArray::LayoutSection AddEntry(Assembler* assm,
const RelocInfo& rinfo);
void Relocate(int pc_delta);
bool IsEmpty();
Handle<ConstantPoolArray> New(Isolate* isolate);
void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
inline ConstantPoolArray::LayoutSection current_section() const {
return current_section_;
}
inline ConstantPoolArray::NumberOfEntries* number_of_entries(
ConstantPoolArray::LayoutSection section) {
return &number_of_entries_[section];
}
inline ConstantPoolArray::NumberOfEntries* small_entries() {
return number_of_entries(ConstantPoolArray::SMALL_SECTION);
}
inline ConstantPoolArray::NumberOfEntries* extended_entries() {
return number_of_entries(ConstantPoolArray::EXTENDED_SECTION);
}
private:
struct ConstantPoolEntry {
ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section,
int merged_index)
: rinfo_(rinfo), section_(section), merged_index_(merged_index) {}
RelocInfo rinfo_;
ConstantPoolArray::LayoutSection section_;
int merged_index_;
};
ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode);
std::vector<ConstantPoolEntry> entries_;
ConstantPoolArray::LayoutSection current_section_;
ConstantPoolArray::NumberOfEntries number_of_entries_[2];
};
struct VmovIndex { struct VmovIndex {
unsigned char index; unsigned char index;
}; };
@ -754,19 +708,16 @@ class Assembler : public AssemblerBase {
// Return the address in the constant pool of the code target address used by // Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov. // the branch/call instruction at pc, or the object in a mov.
INLINE(static Address constant_pool_entry_address( INLINE(static Address constant_pool_entry_address(Address pc,
Address pc, ConstantPoolArray* constant_pool)); Address constant_pool));
// Read/Modify the code target address in the branch/call instruction at pc. // Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc, INLINE(static Address target_address_at(Address pc, Address constant_pool));
ConstantPoolArray* constant_pool)); INLINE(static void set_target_address_at(
INLINE(static void set_target_address_at(Address pc, Address pc, Address constant_pool, Address target,
ConstantPoolArray* constant_pool, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) { INLINE(static Address target_address_at(Address pc, Code* code)) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool); return target_address_at(pc, constant_pool);
} }
INLINE(static void set_target_address_at(Address pc, INLINE(static void set_target_address_at(Address pc,
@ -774,7 +725,7 @@ class Assembler : public AssemblerBase {
Address target, Address target,
ICacheFlushMode icache_flush_mode = ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED)) { FLUSH_ICACHE_IF_NEEDED)) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode); set_target_address_at(pc, constant_pool, target, icache_flush_mode);
} }
@ -841,6 +792,9 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple // possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4). // of m. m must be a power of 2 (>= 4).
void Align(int m); void Align(int m);
// Insert the smallest number of zero bytes possible to align the pc offset
// to a mulitple of m. m must be a power of 2 (>= 2).
void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform. // Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign(); void CodeTargetAlign();
@ -1450,11 +1404,13 @@ class Assembler : public AssemblerBase {
void RecordConstPool(int size); void RecordConstPool(int size);
// Writes a single byte or word of data in the code stream. Used // Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables. The constant pool should be // for inline tables, e.g., jump-tables. CheckConstantPool() should be
// emitted before any use of db and dd to ensure that constant pools // called before any use of db/dd/dq/dp to ensure that constant pools
// are not emitted as part of the tables generated. // are not emitted as part of the tables generated.
void db(uint8_t data); void db(uint8_t data);
void dd(uint32_t data); void dd(uint32_t data);
void dq(uint64_t data);
void dp(uintptr_t data) { dd(data); }
// Emits the address of the code stub's first instruction. // Emits the address of the code stub's first instruction.
void emit_code_stub_address(Code* stub); void emit_code_stub_address(Code* stub);
@ -1526,8 +1482,8 @@ class Assembler : public AssemblerBase {
static const int kMaxDistToIntPool = 4*KB; static const int kMaxDistToIntPool = 4*KB;
static const int kMaxDistToFPPool = 1*KB; static const int kMaxDistToFPPool = 1*KB;
// All relocations could be integer, it therefore acts as the limit. // All relocations could be integer, it therefore acts as the limit.
static const int kMaxNumPending32RelocInfo = kMaxDistToIntPool/kInstrSize; static const int kMaxNumPending32Constants = kMaxDistToIntPool / kInstrSize;
static const int kMaxNumPending64RelocInfo = kMaxDistToFPPool/kInstrSize; static const int kMaxNumPending64Constants = kMaxDistToFPPool / kInstrSize;
// Postpone the generation of the constant pool for the specified number of // Postpone the generation of the constant pool for the specified number of
// instructions. // instructions.
@ -1536,17 +1492,19 @@ class Assembler : public AssemblerBase {
// Check if is time to emit a constant pool. // Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump); void CheckConstPool(bool force_emit, bool require_jump);
// Allocate a constant pool of the correct size for the generated code. int EmitEmbeddedConstantPool() {
Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate); DCHECK(FLAG_enable_embedded_constant_pool);
return constant_pool_builder_.Emit(this);
// Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool);
bool use_extended_constant_pool() const {
return constant_pool_builder_.current_section() ==
ConstantPoolArray::EXTENDED_SECTION;
} }
bool ConstantPoolAccessIsInOverflow() const {
return constant_pool_builder_.NextAccess(ConstantPoolEntry::INTPTR) ==
ConstantPoolEntry::OVERFLOWED;
}
void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type);
protected: protected:
// Relocation for a type-recording IC has the AST id added to it. This // Relocation for a type-recording IC has the AST id added to it. This
@ -1581,10 +1539,10 @@ class Assembler : public AssemblerBase {
// Max pool start (if we need a jump and an alignment). // Max pool start (if we need a jump and an alignment).
int start = pc_offset() + kInstrSize + 2 * kPointerSize; int start = pc_offset() + kInstrSize + 2 * kPointerSize;
// Check the constant pool hasn't been blocked for too long. // Check the constant pool hasn't been blocked for too long.
DCHECK((num_pending_32_bit_reloc_info_ == 0) || DCHECK((num_pending_32_bit_constants_ == 0) ||
(start + num_pending_64_bit_reloc_info_ * kDoubleSize < (start + num_pending_64_bit_constants_ * kDoubleSize <
(first_const_pool_32_use_ + kMaxDistToIntPool))); (first_const_pool_32_use_ + kMaxDistToIntPool)));
DCHECK((num_pending_64_bit_reloc_info_ == 0) || DCHECK((num_pending_64_bit_constants_ == 0) ||
(start < (first_const_pool_64_use_ + kMaxDistToFPPool))); (start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
#endif #endif
// Two cases: // Two cases:
@ -1643,20 +1601,20 @@ class Assembler : public AssemblerBase {
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer; RelocInfoWriter reloc_info_writer;
// Relocation info records are also used during code generation as temporary // ConstantPoolEntry records are used during code generation as temporary
// containers for constants and code target addresses until they are emitted // containers for constants and code target addresses until they are emitted
// to the constant pool. These pending relocation info records are temporarily // to the constant pool. These records are temporarily stored in a separate
// stored in a separate buffer until a constant pool is emitted. // buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one // If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction. // pending relocation entry per instruction.
// The buffers of pending relocation info. // The buffers of pending constant pool entries.
RelocInfo pending_32_bit_reloc_info_[kMaxNumPending32RelocInfo]; ConstantPoolEntry pending_32_bit_constants_[kMaxNumPending32Constants];
RelocInfo pending_64_bit_reloc_info_[kMaxNumPending64RelocInfo]; ConstantPoolEntry pending_64_bit_constants_[kMaxNumPending64Constants];
// Number of pending reloc info entries in the 32 bits buffer. // Number of pending constant pool entries in the 32 bits buffer.
int num_pending_32_bit_reloc_info_; int num_pending_32_bit_constants_;
// Number of pending reloc info entries in the 64 bits buffer. // Number of pending constant pool entries in the 64 bits buffer.
int num_pending_64_bit_reloc_info_; int num_pending_64_bit_constants_;
ConstantPoolBuilder constant_pool_builder_; ConstantPoolBuilder constant_pool_builder_;
@ -1685,15 +1643,12 @@ class Assembler : public AssemblerBase {
void bind_to(Label* L, int pos); void bind_to(Label* L, int pos);
void next(Label* L); void next(Label* L);
enum UseConstantPoolMode {
USE_CONSTANT_POOL,
DONT_USE_CONSTANT_POOL
};
// Record reloc info for current pc_ // Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
void RecordRelocInfo(const RelocInfo& rinfo); ConstantPoolEntry::Access ConstantPoolAddEntry(int position,
ConstantPoolArray::LayoutSection ConstantPoolAddEntry(const RelocInfo& rinfo); RelocInfo::Mode rmode,
intptr_t value);
ConstantPoolEntry::Access ConstantPoolAddEntry(int position, double value);
friend class RelocInfo; friend class RelocInfo;
friend class CodePatcher; friend class CodePatcher;

View File

@ -343,6 +343,7 @@ static void Generate_Runtime_NewObject(MacroAssembler* masm,
static void Generate_JSConstructStubHelper(MacroAssembler* masm, static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function, bool is_api_function,
bool use_new_target,
bool create_memento) { bool create_memento) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : number of arguments // -- r0 : number of arguments
@ -367,10 +368,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(r2); __ push(r2);
} }
// Preserve the two incoming parameters on the stack. // Preserve the incoming parameters on the stack.
__ SmiTag(r0); __ SmiTag(r0);
__ push(r0); // Smi-tagged arguments count. __ push(r0);
__ push(r1); // Constructor function. __ push(r1);
if (use_new_target) {
__ push(r3);
}
Label rt_call, allocated, normal_new, count_incremented; Label rt_call, allocated, normal_new, count_incremented;
__ cmp(r1, r3); __ cmp(r1, r3);
@ -446,7 +450,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map and properties and elements are set to empty fixed array. // initial map and properties and elements are set to empty fixed array.
// r1: constructor function // r1: constructor function
// r2: initial map // r2: initial map
// r3: object size (not including memento if create_memento) // r3: object size (including memento if create_memento)
// r4: JSObject (not tagged) // r4: JSObject (not tagged)
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4); __ mov(r5, r4);
@ -520,7 +524,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r4, r4, Operand(kHeapObjectTag)); __ add(r4, r4, Operand(kHeapObjectTag));
// Check if a non-empty properties array is needed. Continue with // Check if a non-empty properties array is needed. Continue with
// allocated object if not fall through to runtime call if it is. // allocated object if not; allocate and initialize a FixedArray if yes.
// r1: constructor function // r1: constructor function
// r4: JSObject // r4: JSObject
// r5: start of next object (not tagged) // r5: start of next object (not tagged)
@ -575,15 +579,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r5: FixedArray (not tagged) // r5: FixedArray (not tagged)
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object. __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize); DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry; __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex); __ InitializeFieldsWithFiller(r2, r6, r0);
__ b(&entry);
__ bind(&loop);
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(r2, r6);
__ b(lt, &loop);
}
// Store the initialized FixedArray into the properties field of // Store the initialized FixedArray into the properties field of
// the JSObject // the JSObject
@ -617,7 +614,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&allocated); __ bind(&allocated);
if (create_memento) { if (create_memento) {
__ ldr(r2, MemOperand(sp, kPointerSize * 2)); int offset = (use_new_target ? 3 : 2) * kPointerSize;
__ ldr(r2, MemOperand(sp, offset));
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex); __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r5); __ cmp(r2, r5);
__ b(eq, &count_incremented); __ b(eq, &count_incremented);
@ -631,23 +629,27 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&count_incremented); __ bind(&count_incremented);
} }
__ push(r4); // Restore the parameters.
__ push(r4); if (use_new_target) {
__ pop(r3);
}
__ pop(r1);
// Reload the number of arguments and the constructor from the stack. // Retrieve smi-tagged arguments count from the stack.
// sp[0]: receiver __ ldr(r0, MemOperand(sp));
// sp[1]: receiver __ SmiUntag(r0);
// sp[2]: constructor function
// sp[3]: number of arguments (smi-tagged) // Push new.target onto the construct frame. This is stored just below the
__ ldr(r1, MemOperand(sp, 2 * kPointerSize)); // receiver on the stack.
__ ldr(r3, MemOperand(sp, 3 * kPointerSize)); if (use_new_target) {
__ push(r3);
}
__ push(r4);
__ push(r4);
// Set up pointer to last argument. // Set up pointer to last argument.
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Set up number of arguments for function call below
__ SmiUntag(r0, r3);
// Copy arguments and receiver to the expression stack. // Copy arguments and receiver to the expression stack.
// r0: number of arguments // r0: number of arguments
// r1: constructor function // r1: constructor function
@ -655,9 +657,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of arguments (smi-tagged) // r3: number of arguments (smi-tagged)
// sp[0]: receiver // sp[0]: receiver
// sp[1]: receiver // sp[1]: receiver
// sp[2]: constructor function // sp[2]: new.target (if used)
// sp[3]: number of arguments (smi-tagged) // sp[2/3]: number of arguments (smi-tagged)
Label loop, entry; Label loop, entry;
__ SmiTag(r3, r0);
__ b(&entry); __ b(&entry);
__ bind(&loop); __ bind(&loop);
__ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1)); __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
@ -680,15 +683,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
} }
// Store offset of return address for deoptimizer. // Store offset of return address for deoptimizer.
if (!is_api_function) { // TODO(arv): Remove the "!use_new_target" before supporting optimization
// of functions that reference new.target
if (!is_api_function && !use_new_target) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
} }
// Restore context from the frame. // Restore context from the frame.
// r0: result // r0: result
// sp[0]: receiver // sp[0]: receiver
// sp[1]: constructor function // sp[1]: new.target (if used)
// sp[2]: number of arguments (smi-tagged) // sp[1/2]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid // If the result is an object (in the ECMA sense), we should get rid
@ -699,8 +704,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense. // If the result is a smi, it is *not* an object in the ECMA sense.
// r0: result // r0: result
// sp[0]: receiver (newly allocated object) // sp[0]: receiver (newly allocated object)
// sp[1]: constructor function // sp[1]: new.target (if used)
// sp[2]: number of arguments (smi-tagged) // sp[1/2]: number of arguments (smi-tagged)
__ JumpIfSmi(r0, &use_receiver); __ JumpIfSmi(r0, &use_receiver);
// If the type of the result (stored in its map) is less than // If the type of the result (stored in its map) is less than
@ -718,9 +723,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&exit); __ bind(&exit);
// r0: result // r0: result
// sp[0]: receiver (newly allocated object) // sp[0]: receiver (newly allocated object)
// sp[1]: constructor function // sp[1]: new.target (if used)
// sp[2]: number of arguments (smi-tagged) // sp[1/2]: number of arguments (smi-tagged)
__ ldr(r1, MemOperand(sp, 2 * kPointerSize)); int offset = (use_new_target ? 2 : 1) * kPointerSize;
__ ldr(r1, MemOperand(sp, offset));
// Leave construct frame. // Leave construct frame.
} }
@ -733,12 +739,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new); Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
} }
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false); Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
} }
@ -789,8 +800,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ sub(r4, r4, Operand(2), SetCC); __ sub(r4, r4, Operand(2), SetCC);
__ b(ge, &loop); __ b(ge, &loop);
__ add(r0, r0, Operand(1));
// Handle step in. // Handle step in.
Label skip_step_in; Label skip_step_in;
ExternalReference debug_step_in_fp = ExternalReference debug_step_in_fp =
@ -819,7 +828,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// r0: result // r0: result
// sp[0]: number of arguments (smi-tagged) // sp[0]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ ldr(r1, MemOperand(sp, 0)); // Get arguments count, skipping over new.target.
__ ldr(r1, MemOperand(sp, kPointerSize));
// Leave construct frame. // Leave construct frame.
} }
@ -874,7 +884,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r2: receiver // r2: receiver
// r3: argc // r3: argc
// r4: argv // r4: argv
// r5-r6, r8 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered // r5-r6, r8 (if !FLAG_enable_embedded_constant_pool) and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm); ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame. // Clear the context before we push it when entering the internal frame.
@ -922,7 +932,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex); __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mov(r5, Operand(r4)); __ mov(r5, Operand(r4));
__ mov(r6, Operand(r4)); __ mov(r6, Operand(r4));
if (!FLAG_enable_ool_constant_pool) { if (!FLAG_enable_embedded_constant_pool) {
__ mov(r8, Operand(r4)); __ mov(r8, Operand(r4));
} }
if (kR9Available == 1) { if (kR9Available == 1) {
@ -1166,8 +1176,10 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset)); __ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm); { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
if (FLAG_enable_ool_constant_pool) { __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
__ ldr(pp, FieldMemOperand(r0, Code::kConstantPoolOffset));
if (FLAG_enable_embedded_constant_pool) {
__ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r0);
} }
// Load the OSR entrypoint offset from the deoptimization data. // Load the OSR entrypoint offset from the deoptimization data.
@ -1175,10 +1187,8 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt( __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
DeoptimizationInputData::kOsrPcOffsetIndex))); DeoptimizationInputData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset // Compute the target address = code start + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset> __ add(lr, r0, Operand::SmiUntag(r1));
__ add(r0, r0, Operand::SmiUntag(r1));
__ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// And "return" to the OSR entry point of the function. // And "return" to the OSR entry point of the function.
__ Ret(); __ Ret();
@ -1392,6 +1402,8 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
Label entry, loop; Label entry, loop;
Register receiver = LoadDescriptor::ReceiverRegister(); Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister(); Register key = LoadDescriptor::NameRegister();
Register slot = LoadDescriptor::SlotRegister();
Register vector = LoadWithVectorDescriptor::VectorRegister();
__ ldr(key, MemOperand(fp, indexOffset)); __ ldr(key, MemOperand(fp, indexOffset));
__ b(&entry); __ b(&entry);
@ -1401,7 +1413,14 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ ldr(receiver, MemOperand(fp, argumentsOffset)); __ ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments. // Use inline caching to speed up access to arguments.
Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic(); FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
Handle<TypeFeedbackVector> feedback_vector =
masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
__ mov(slot, Operand(Smi::FromInt(index)));
__ Move(vector, feedback_vector);
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET); __ Call(ic, RelocInfo::CODE_TARGET);
// Push the nth argument. // Push the nth argument.
@ -1649,8 +1668,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0); __ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
(FLAG_enable_ool_constant_pool ? pp.bit() : 0) | (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit()); fp.bit() | lr.bit());
__ add(fp, sp, __ add(fp, sp,
Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
} }
@ -1722,6 +1741,27 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected { // Too few parameters: Actual < expected
__ bind(&too_few); __ bind(&too_few);
// If the function is strong we need to throw an error.
Label no_strong_error;
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r5, Operand(1 << (SharedFunctionInfo::kStrongModeFunction +
kSmiTagSize)));
__ b(eq, &no_strong_error);
// What we really care about is the required number of arguments.
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kLengthOffset));
__ cmp(r0, Operand::SmiUntag(r4));
__ b(ge, &no_strong_error);
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
__ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm); EnterArgumentsAdaptorFrame(masm);
// Calculate copy start address into r0 and copy end address is fp. // Calculate copy start address into r0 and copy end address is fp.
@ -1792,6 +1832,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __ #undef __
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

View File

@ -93,9 +93,8 @@ void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm, static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Label* slow, Condition cond, Strength strength);
Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm, static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs, Register lhs,
Register rhs, Register rhs,
@ -113,15 +112,15 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
isolate()->counters()->code_stubs()->Increment(); isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor(); CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
int param_count = descriptor.GetEnvironmentParameterCount(); int param_count = descriptor.GetRegisterParameterCount();
{ {
// Call the runtime system in a fresh internal frame. // Call the runtime system in a fresh internal frame.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 || DCHECK(param_count == 0 ||
r0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1))); r0.is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments // Push arguments
for (int i = 0; i < param_count; ++i) { for (int i = 0; i < param_count; ++i) {
__ push(descriptor.GetEnvironmentParameterRegister(i)); __ push(descriptor.GetRegisterParameter(i));
} }
__ CallExternalReference(miss, param_count); __ CallExternalReference(miss, param_count);
} }
@ -238,9 +237,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Handle the case where the lhs and rhs are the same object. // Handle the case where the lhs and rhs are the same object.
// Equality is almost reflexive (everything but NaN), so this is a test // Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN". // for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm, static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Label* slow, Condition cond, Strength strength) {
Condition cond) {
Label not_identical; Label not_identical;
Label heap_number, return_equal; Label heap_number, return_equal;
__ cmp(r0, r1); __ cmp(r0, r1);
@ -251,10 +249,20 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// They are both equal and they are not both Smis so both of them are not // They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal. // Smis. If it's not a heap number, then return equal.
if (cond == lt || cond == gt) { if (cond == lt || cond == gt) {
// Call runtime on identical JSObjects.
__ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, slow); __ b(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE)); __ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow); __ b(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
__ cmp(r4, Operand(HEAP_NUMBER_TYPE));
__ b(eq, &return_equal);
__ tst(r4, Operand(kIsNotStringMask));
__ b(ne, slow);
}
} else { } else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number); __ b(eq, &heap_number);
@ -262,8 +270,16 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
if (cond != eq) { if (cond != eq) {
__ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(ge, slow); __ b(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE)); __ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow); __ b(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics,
// since we need to throw a TypeError. Smis and heap numbers have
// already been ruled out.
__ tst(r4, Operand(kIsNotStringMask));
__ b(ne, slow);
}
// Normally here we fall through to return_equal, but undefined is // Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but // special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5. // (undefined <= undefined) == false! See ECMAScript 11.8.5.
@ -561,7 +577,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer // Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical. // or goes to slow. Only falls through if the objects were not identical.
EmitIdenticalObjectComparison(masm, &slow, cc); EmitIdenticalObjectComparison(masm, &slow, cc, strength());
// If either is a Smi (we know that not both are), then they can only // If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber. // be strictly equal if the other is a HeapNumber.
@ -663,7 +679,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) { if (cc == eq) {
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else { } else {
native = Builtins::COMPARE; native =
is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result int ncr; // NaN compare result
if (cc == lt || cc == le) { if (cc == lt || cc == le) {
ncr = GREATER; ncr = GREATER;
@ -1084,10 +1101,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ ldr(r1, MemOperand(r1)); __ ldr(r1, MemOperand(r1));
__ mov(r2, Operand(pending_handler_offset_address)); __ mov(r2, Operand(pending_handler_offset_address));
__ ldr(r2, MemOperand(r2)); __ ldr(r2, MemOperand(r2));
if (FLAG_enable_ool_constant_pool) { __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
__ ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset)); if (FLAG_enable_embedded_constant_pool) {
__ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r1);
} }
__ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ add(pc, r1, r2); __ add(pc, r1, r2);
} }
@ -1132,8 +1149,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r3: argc // r3: argc
// r4: argv // r4: argv
int marker = type(); int marker = type();
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
__ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array())); __ mov(r8, Operand::Zero());
} }
__ mov(r7, Operand(Smi::FromInt(marker))); __ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker))); __ mov(r6, Operand(Smi::FromInt(marker)));
@ -1142,8 +1159,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ldr(r5, MemOperand(r5)); __ ldr(r5, MemOperand(r5));
__ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used. __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
__ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
(FLAG_enable_ool_constant_pool ? r8.bit() : 0) | (FLAG_enable_embedded_constant_pool ? r8.bit() : 0) |
ip.bit()); ip.bit());
// Set up frame pointer for the frame to be pushed. // Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@ -1331,11 +1348,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ ldr(map_load_offset, MemOperand(map_load_offset)); __ ldr(map_load_offset, MemOperand(map_load_offset));
__ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset)); __ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset));
__ mov(r8, map); __ mov(scratch, map);
// |map_load_offset| points at the beginning of the cell. Calculate the // |map_load_offset| points at the beginning of the cell. Calculate the
// field containing the map. // field containing the map.
__ add(function, map_load_offset, Operand(Cell::kValueOffset - 1)); __ add(function, map_load_offset, Operand(Cell::kValueOffset - 1));
__ RecordWriteField(map_load_offset, Cell::kValueOffset, r8, function, __ RecordWriteField(map_load_offset, Cell::kValueOffset, scratch, function,
kLRHasNotBeenSaved, kDontSaveFPRegs, kLRHasNotBeenSaved, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
} }
@ -1473,9 +1490,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister(); Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before // Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler. // calling the miss handler.
DCHECK(!FLAG_vector_ics || DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::VectorRegister(),
!AreAliased(r4, r5, VectorLoadICDescriptor::VectorRegister(), LoadWithVectorDescriptor::SlotRegister()));
VectorLoadICDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4, NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
r5, &miss); r5, &miss);
@ -1494,9 +1510,8 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register scratch = r5; Register scratch = r5;
Register result = r0; Register result = r0;
DCHECK(!scratch.is(receiver) && !scratch.is(index)); DCHECK(!scratch.is(receiver) && !scratch.is(index));
DCHECK(!FLAG_vector_ics || DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) && result.is(LoadWithVectorDescriptor::SlotRegister()));
result.is(VectorLoadICDescriptor::SlotRegister())));
// StringCharAtGenerator doesn't use the result register until it's passed // StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict // the different miss possibilities. If it did, we would have a conflict
@ -1520,7 +1535,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
CHECK(!has_new_target());
// The displacement is the offset of the last parameter (if any) // The displacement is the offset of the last parameter (if any)
// relative to the frame pointer. // relative to the frame pointer.
const int kDisplacement = const int kDisplacement =
@ -1578,8 +1592,6 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[4] : receiver displacement // sp[4] : receiver displacement
// sp[8] : function // sp[8] : function
CHECK(!has_new_target());
// Check if the calling frame is an arguments adaptor frame. // Check if the calling frame is an arguments adaptor frame.
Label runtime; Label runtime;
__ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@ -1608,8 +1620,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r6 : allocated object (tagged) // r6 : allocated object (tagged)
// r9 : mapped parameter count (tagged) // r9 : mapped parameter count (tagged)
CHECK(!has_new_target());
__ ldr(r1, MemOperand(sp, 0 * kPointerSize)); __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
// r1 = parameter count (tagged) // r1 = parameter count (tagged)
@ -1850,14 +1860,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer. // Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame); __ bind(&adaptor_frame);
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
if (has_new_target()) {
__ cmp(r1, Operand(Smi::FromInt(0)));
Label skip_decrement;
__ b(eq, &skip_decrement);
// Subtract 1 from smi-tagged arguments count.
__ sub(r1, r1, Operand(2));
__ bind(&skip_decrement);
}
__ str(r1, MemOperand(sp, 0)); __ str(r1, MemOperand(sp, 0));
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1)); __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
@ -1939,9 +1941,10 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) { void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Stack layout on entry. // Stack layout on entry.
// sp[0] : index of rest parameter // sp[0] : language mode
// sp[4] : number of parameters // sp[4] : index of rest parameter
// sp[8] : receiver displacement // sp[8] : number of parameters
// sp[12] : receiver displacement
Label runtime; Label runtime;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@ -1951,13 +1954,13 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer. // Patch the arguments.length and the parameters pointer.
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ str(r1, MemOperand(sp, 1 * kPointerSize)); __ str(r1, MemOperand(sp, 2 * kPointerSize));
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1)); __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
__ str(r3, MemOperand(sp, 2 * kPointerSize)); __ str(r3, MemOperand(sp, 3 * kPointerSize));
__ bind(&runtime); __ bind(&runtime);
__ TailCallRuntime(Runtime::kNewRestParam, 3, 1); __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
} }
@ -2418,7 +2421,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// this position in a symbol (see static asserts in type-feedback-vector.h). // this position in a symbol (see static asserts in type-feedback-vector.h).
Label check_allocation_site; Label check_allocation_site;
Register feedback_map = r5; Register feedback_map = r5;
Register weak_value = r8; Register weak_value = r6;
__ ldr(weak_value, FieldMemOperand(r4, WeakCell::kValueOffset)); __ ldr(weak_value, FieldMemOperand(r4, WeakCell::kValueOffset));
__ cmp(r1, weak_value); __ cmp(r1, weak_value);
__ b(eq, &done); __ b(eq, &done);
@ -2703,6 +2706,13 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &miss); __ b(ne, &miss);
// Increment the call count for monomorphic function calls.
__ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
__ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
__ ldr(r3, FieldMemOperand(r2, 0));
__ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ str(r3, FieldMemOperand(r2, 0));
__ mov(r2, r4); __ mov(r2, r4);
__ mov(r3, r1); __ mov(r3, r1);
ArrayConstructorStub stub(masm->isolate(), arg_count()); ArrayConstructorStub stub(masm->isolate(), arg_count());
@ -2762,6 +2772,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction. // convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(r1, &extra_checks_or_miss); __ JumpIfSmi(r1, &extra_checks_or_miss);
// Increment the call count for monomorphic function calls.
__ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
__ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
__ ldr(r3, FieldMemOperand(r2, 0));
__ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ str(r3, FieldMemOperand(r2, 0));
__ bind(&have_js_function); __ bind(&have_js_function);
if (CallAsMethod()) { if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont); EmitContinueIfStrictOrNative(masm, &cont);
@ -2837,6 +2854,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ add(r4, r4, Operand(Smi::FromInt(1))); __ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, with_types_offset)); __ str(r4, FieldMemOperand(r2, with_types_offset));
// Initialize the call counter.
__ Move(r5, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
// Store the function. Use a stub since we need a frame for allocation. // Store the function. Use a stub since we need a frame for allocation.
// r2 - vector // r2 - vector
// r3 - slot // r3 - slot
@ -2937,9 +2959,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_, index_not_number_,
DONT_DO_SMI_CHECK); DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm); call_helper.BeforeCall(masm);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) { if (embed_mode == PART_OF_IC_HANDLER) {
__ Push(VectorLoadICDescriptor::VectorRegister(), __ Push(LoadWithVectorDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister(), object_, index_); LoadWithVectorDescriptor::SlotRegister(), object_, index_);
} else { } else {
// index_ is consumed by runtime conversion function. // index_ is consumed by runtime conversion function.
__ Push(object_, index_); __ Push(object_, index_);
@ -2954,9 +2976,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below // Save the conversion result before the pop instructions below
// have a chance to overwrite it. // have a chance to overwrite it.
__ Move(index_, r0); __ Move(index_, r0);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) { if (embed_mode == PART_OF_IC_HANDLER) {
__ Pop(VectorLoadICDescriptor::VectorRegister(), __ Pop(LoadWithVectorDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister(), object_); LoadWithVectorDescriptor::SlotRegister(), object_);
} else { } else {
__ pop(object_); __ pop(object_);
} }
@ -3567,7 +3589,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered); __ bind(&unordered);
__ bind(&generic_stub); __ bind(&generic_stub);
CompareICStub stub(isolate(), op(), CompareICState::GENERIC, CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC); CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@ -4348,15 +4370,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) { void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister()); EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
VectorRawLoadStub stub(isolate(), state()); LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm); stub.GenerateForTrampoline(masm);
} }
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) { void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister()); EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
VectorRawKeyedLoadStub stub(isolate()); KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm); stub.GenerateForTrampoline(masm);
} }
@ -4375,12 +4397,10 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
} }
void VectorRawLoadStub::Generate(MacroAssembler* masm) { void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
GenerateImpl(masm, false);
}
void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) { void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true); GenerateImpl(masm, true);
} }
@ -4474,14 +4494,14 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
} }
void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r1 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r1
Register name = VectorLoadICDescriptor::NameRegister(); // r2 Register name = LoadWithVectorDescriptor::NameRegister(); // r2
Register vector = VectorLoadICDescriptor::VectorRegister(); // r3 Register vector = LoadWithVectorDescriptor::VectorRegister(); // r3
Register slot = VectorLoadICDescriptor::SlotRegister(); // r0 Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
Register feedback = r4; Register feedback = r4;
Register receiver_map = r5; Register receiver_map = r5;
Register scratch1 = r8; Register scratch1 = r6;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot)); __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
@ -4521,24 +4541,24 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
} }
void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) { void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false); GenerateImpl(masm, false);
} }
void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) { void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true); GenerateImpl(masm, true);
} }
void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r1 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r1
Register key = VectorLoadICDescriptor::NameRegister(); // r2 Register key = LoadWithVectorDescriptor::NameRegister(); // r2
Register vector = VectorLoadICDescriptor::VectorRegister(); // r3 Register vector = LoadWithVectorDescriptor::VectorRegister(); // r3
Register slot = VectorLoadICDescriptor::SlotRegister(); // r0 Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
Register feedback = r4; Register feedback = r4;
Register receiver_map = r5; Register receiver_map = r5;
Register scratch1 = r8; Register scratch1 = r6;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot)); __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
@ -4568,7 +4588,7 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ b(ne, &try_poly_name); __ b(ne, &try_poly_name);
Handle<Code> megamorphic_stub = Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate()); KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name); __ bind(&try_poly_name);
@ -4592,6 +4612,58 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
} }
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label miss;
// TODO(mvstanton): Implement.
__ bind(&miss);
StoreIC::GenerateMiss(masm);
}
void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label miss;
// TODO(mvstanton): Implement.
__ bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) { if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate()); ProfileEntryHookStub stub(masm->isolate());
@ -5297,6 +5369,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
#undef __ #undef __
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

View File

@ -946,6 +946,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

View File

@ -126,6 +126,7 @@ int Registers::Number(const char* name) {
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

View File

@ -42,6 +42,11 @@ const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
const int kPCRegister = 15; const int kPCRegister = 15;
const int kNoRegister = -1; const int kNoRegister = -1;
// Used in embedded constant pool builder - max reach in bits for
// various load instructions (unsigned)
const int kLdrMaxReachBits = 12;
const int kVldrMaxReachBits = 10;
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Conditions. // Conditions.

View File

@ -77,6 +77,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
#endif #endif
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

View File

@ -144,10 +144,8 @@ void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc). // Calling convention for IC load (from ic-arm.cc).
Register receiver = LoadDescriptor::ReceiverRegister(); Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister(); Register name = LoadDescriptor::NameRegister();
RegList regs = receiver.bit() | name.bit(); Register slot = LoadDescriptor::SlotRegister();
if (FLAG_vector_ics) { RegList regs = receiver.bit() | name.bit() | slot.bit();
regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
}
Generate_DebugBreakCallHelper(masm, regs, 0); Generate_DebugBreakCallHelper(masm, regs, 0);
} }
@ -157,8 +155,11 @@ void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister(); Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister(); Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister(); Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper( RegList regs = receiver.bit() | name.bit() | value.bit();
masm, receiver.bit() | name.bit() | value.bit(), 0); if (FLAG_vector_stores) {
regs |= VectorStoreICDescriptor::SlotRegister().bit();
}
Generate_DebugBreakCallHelper(masm, regs, 0);
} }
@ -170,11 +171,7 @@ void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-arm.cc). // Calling convention for IC keyed store call (from ic-arm.cc).
Register receiver = StoreDescriptor::ReceiverRegister(); GenerateStoreICDebugBreak(masm);
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0);
} }
@ -267,7 +264,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
StandardFrameConstants::kConstantPoolOffset - kPointerSize)); StandardFrameConstants::kConstantPoolOffset - kPointerSize));
// Pop return address, frame and constant pool pointer (if // Pop return address, frame and constant pool pointer (if
// FLAG_enable_ool_constant_pool). // FLAG_enable_embedded_constant_pool).
__ LeaveFrame(StackFrame::INTERNAL); __ LeaveFrame(StackFrame::INTERNAL);
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm); { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
@ -289,6 +286,7 @@ const bool LiveEdit::kFrameDropperSupported = true;
#undef __ #undef __
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

View File

@ -353,11 +353,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
DCHECK(FLAG_enable_ool_constant_pool); DCHECK(FLAG_enable_embedded_constant_pool);
SetFrameSlot(offset, value); SetFrameSlot(offset, value);
} }
#undef __ #undef __
} } // namespace v8::internal } // namespace internal
} // namespace v8

View File

@ -1904,8 +1904,8 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------

View File

@ -21,7 +21,7 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; } Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; } Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() { Register JavaScriptFrame::constant_pool_pointer_register() {
DCHECK(FLAG_enable_ool_constant_pool); DCHECK(FLAG_enable_embedded_constant_pool);
return pp; return pp;
} }
@ -29,18 +29,12 @@ Register JavaScriptFrame::constant_pool_pointer_register() {
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; } Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; } Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() { Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
DCHECK(FLAG_enable_ool_constant_pool); DCHECK(FLAG_enable_embedded_constant_pool);
return pp; return pp;
} }
Object*& ExitFrame::constant_pool_slot() const { } // namespace internal
DCHECK(FLAG_enable_ool_constant_pool); } // namespace v8
const int offset = ExitFrameConstants::kConstantPoolOffset;
return Memory::Object_at(fp() + offset);
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

View File

@ -66,11 +66,23 @@ const int kNumDoubleCalleeSaved = 8;
// TODO(regis): Only 8 registers may actually be sufficient. Revisit. // TODO(regis): Only 8 registers may actually be sufficient. Revisit.
const int kNumSafepointRegisters = 16; const int kNumSafepointRegisters = 16;
// The embedded constant pool pointer (r8/pp) is not included in the safepoint
// since it is not tagged. This register is preserved in the stack frame where
// its value will be updated if GC code movement occurs. Including it in the
// safepoint (where it will not be relocated) would cause a stale value to be
// restored.
const RegList kConstantPointerRegMask =
FLAG_enable_embedded_constant_pool ? (1 << 8) : 0;
const int kNumConstantPoolPointerReg =
FLAG_enable_embedded_constant_pool ? 1 : 0;
// Define the list of registers actually saved at safepoints. // Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved // Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters. // space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved; const RegList kSafepointSavedRegisters =
const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved; kJSCallerSaved | (kCalleeSaved & ~kConstantPointerRegMask);
const int kNumSafepointSavedRegisters =
kNumJSCallerSaved + kNumCalleeSaved - kNumConstantPoolPointerReg;
// ---------------------------------------------------- // ----------------------------------------------------
@ -84,11 +96,11 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic { class ExitFrameConstants : public AllStatic {
public: public:
static const int kFrameSize = FLAG_enable_ool_constant_pool ? static const int kFrameSize =
3 * kPointerSize : 2 * kPointerSize; FLAG_enable_embedded_constant_pool ? 3 * kPointerSize : 2 * kPointerSize;
static const int kConstantPoolOffset = FLAG_enable_ool_constant_pool ? static const int kConstantPoolOffset =
-3 * kPointerSize : 0; FLAG_enable_embedded_constant_pool ? -3 * kPointerSize : 0;
static const int kCodeOffset = -2 * kPointerSize; static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize; static const int kSPOffset = -1 * kPointerSize;
@ -129,13 +141,12 @@ class ArgumentsAdaptorFrameConstants : public AllStatic {
class ConstructFrameConstants : public AllStatic { class ConstructFrameConstants : public AllStatic {
public: public:
// FP-relative. // FP-relative.
static const int kImplicitReceiverOffset = -6 * kPointerSize; static const int kImplicitReceiverOffset = -5 * kPointerSize;
static const int kConstructorOffset = -5 * kPointerSize;
static const int kLengthOffset = -4 * kPointerSize; static const int kLengthOffset = -4 * kPointerSize;
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize = static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize; StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
}; };

File diff suppressed because it is too large Load Diff

View File

@ -16,12 +16,10 @@ const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return r1; } const Register LoadDescriptor::ReceiverRegister() { return r1; }
const Register LoadDescriptor::NameRegister() { return r2; } const Register LoadDescriptor::NameRegister() { return r2; }
const Register LoadDescriptor::SlotRegister() { return r0; }
const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return r0; } const Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
const Register VectorLoadICDescriptor::VectorRegister() { return r3; }
const Register StoreDescriptor::ReceiverRegister() { return r1; } const Register StoreDescriptor::ReceiverRegister() { return r1; }
@ -29,6 +27,12 @@ const Register StoreDescriptor::NameRegister() { return r2; }
const Register StoreDescriptor::ValueRegister() { return r0; } const Register StoreDescriptor::ValueRegister() { return r0; }
const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r4; }
const Register VectorStoreICDescriptor::VectorRegister() { return r3; }
const Register StoreTransitionDescriptor::MapRegister() { return r3; } const Register StoreTransitionDescriptor::MapRegister() { return r3; }
@ -56,109 +60,101 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; } const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; } const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
const Register GrowArrayElementsDescriptor::CapacityRegister() { return r2; }
void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) { void FastNewClosureDescriptor::InitializePlatformSpecific(
Register registers[] = {cp, r2};
data->Initialize(arraysize(registers), registers, NULL);
}
void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1};
data->Initialize(arraysize(registers), registers, NULL);
}
void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3};
data->Initialize(arraysize(registers), registers, NULL);
}
void FastCloneShallowArrayDescriptor::Initialize(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3, r2, r1}; Register registers[] = {r2};
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers);
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
} }
void FastCloneShallowObjectDescriptor::Initialize( void FastNewContextDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3, r2, r1, r0}; Register registers[] = {r1};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void CreateAllocationSiteDescriptor::Initialize( void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r3}; Register registers[] = {r0};
Representation representations[] = {Representation::Tagged(), data->InitializePlatformSpecific(arraysize(registers), registers);
Representation::Tagged(),
Representation::Smi()};
data->Initialize(arraysize(registers), registers, representations);
} }
void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) { void NumberToStringDescriptor::InitializePlatformSpecific(
Register registers[] = {cp, r2, r3, r1};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
}
void StoreArrayLiteralElementDescriptor::Initialize(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3, r0}; Register registers[] = {r0};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) { void TypeofDescriptor::InitializePlatformSpecific(
Register registers[] = {cp, r1};
data->Initialize(arraysize(registers), registers, NULL);
}
void CallFunctionWithFeedbackDescriptor::Initialize(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r3}; Register registers[] = {r3};
Representation representations[] = {Representation::Tagged(), data->InitializePlatformSpecific(arraysize(registers), registers);
Representation::Tagged(),
Representation::Smi()};
data->Initialize(arraysize(registers), registers, representations);
} }
void CallFunctionWithFeedbackAndVectorDescriptor::Initialize( void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r3, r2}; Register registers[] = {r3, r2, r1};
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers);
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
} }
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) { void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2, r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r3, r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments // r0 : number of arguments
// r1 : the function to call // r1 : the function to call
// r2 : feedback vector // r2 : feedback vector
@ -166,234 +162,206 @@ void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// vector (Smi) // vector (Smi)
// TODO(turbofan): So far we don't gather type feedback and hence skip the // TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined. // slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {cp, r0, r1, r2}; Register registers[] = {r0, r1, r2};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void RegExpConstructResultDescriptor::Initialize( void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r1, r0}; Register registers[] = {r2, r1, r0};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void TransitionElementsKindDescriptor::Initialize( void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0, r1}; Register registers[] = {r0, r1};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void AllocateHeapNumberDescriptor::Initialize( void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// register state // register state
// cp -- context
Register registers[] = {cp};
data->Initialize(arraysize(registers), registers, nullptr);
}
void ArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// register state
// cp -- context
// r0 -- number of arguments // r0 -- number of arguments
// r1 -- function // r1 -- function
// r2 -- allocation site with elements kind // r2 -- allocation site with elements kind
Register registers[] = {cp, r1, r2}; Register registers[] = {r1, r2};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) { void ArrayConstructorDescriptor::InitializePlatformSpecific(
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, r1, r2, r0};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(),
Representation::Tagged(), Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
}
void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {r1, r2, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InternalArrayConstructorConstantArgCountDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state // register state
// cp -- context
// r0 -- number of arguments // r0 -- number of arguments
// r1 -- constructor function // r1 -- constructor function
Register registers[] = {cp, r1}; Register registers[] = {r1};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void InternalArrayConstructorDescriptor::Initialize( void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument) // stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, r1, r0}; Register registers[] = {r1, r0};
Representation representations[] = {Representation::Tagged(), data->InitializePlatformSpecific(arraysize(registers), registers);
Representation::Tagged(),
Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
} }
void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) { void CompareDescriptor::InitializePlatformSpecific(
Register registers[] = {cp, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void BinaryOpWithAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r1, r0}; Register registers[] = {r1, r0};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) { void CompareNilDescriptor::InitializePlatformSpecific(
Register registers[] = {cp, r1, r0}; CallInterfaceDescriptorData* data) {
data->Initialize(arraysize(registers), registers, NULL); Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) { void ToBooleanDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void KeyedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor = static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS); PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = { Register registers[] = {
cp, // context
r2, // key r2, // key
}; };
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers,
Representation::Tagged(), // context &noInlineDescriptor);
Representation::Tagged(), // key
};
data->Initialize(arraysize(registers), registers, representations,
&noInlineDescriptor);
} }
void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) { void NamedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor = static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS); PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = { Register registers[] = {
cp, // context
r2, // name r2, // name
}; };
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers,
Representation::Tagged(), // context &noInlineDescriptor);
Representation::Tagged(), // name
};
data->Initialize(arraysize(registers), registers, representations,
&noInlineDescriptor);
} }
void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) { void CallHandlerDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor = static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = { Register registers[] = {
cp, // context
r0, // receiver r0, // receiver
}; };
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers,
Representation::Tagged(), // context &default_descriptor);
Representation::Tagged(), // receiver
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
} }
void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) { void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor = static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = { Register registers[] = {
cp, // context
r1, // JSFunction r1, // JSFunction
r0, // actual number of arguments r0, // actual number of arguments
r2, // expected number of arguments r2, // expected number of arguments
}; };
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers,
Representation::Tagged(), // context &default_descriptor);
Representation::Tagged(), // JSFunction
Representation::Integer32(), // actual number of arguments
Representation::Integer32(), // expected number of arguments
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
} }
void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) { void ApiFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor = static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = { Register registers[] = {
cp, // context
r0, // callee r0, // callee
r4, // call_data r4, // call_data
r2, // holder r2, // holder
r1, // api_function_address r1, // api_function_address
r3, // actual number of arguments r3, // actual number of arguments
}; };
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers,
Representation::Tagged(), // context &default_descriptor);
Representation::Tagged(), // callee
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address
Representation::Integer32(), // actual number of arguments
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
} }
void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) { void ApiAccessorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor = static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = { Register registers[] = {
cp, // context
r0, // callee r0, // callee
r4, // call_data r4, // call_data
r2, // holder r2, // holder
r1, // api_function_address r1, // api_function_address
}; };
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers,
Representation::Tagged(), // context &default_descriptor);
Representation::Tagged(), // callee }
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address void MathRoundVariantDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r1, // math rounding function
r3, // vector slot id
}; };
data->Initialize(arraysize(registers), registers, representations, data->InitializePlatformSpecific(arraysize(registers), registers);
&default_descriptor);
} }
} } // namespace internal
} // namespace v8::internal } // namespace v8
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

View File

@ -1092,10 +1092,18 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LOperand* target = UseRegisterOrConstantAtStart(instr->target()); LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone()); ZoneList<LOperand*> ops(instr->OperandCount(), zone());
// Target
ops.Add(target, zone()); ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) { // Context
LOperand* op = LOperand* op = UseFixed(instr->OperandAt(1), cp);
UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1)); ops.Add(op, zone());
// Other register parameters
for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
i < instr->OperandCount(); i++) {
op =
UseFixed(instr->OperandAt(i),
descriptor.GetRegisterParameter(
i - LCallWithDescriptor::kImplicitRegisterParameterCount));
ops.Add(op, zone()); ops.Add(op, zone());
} }
@ -1105,20 +1113,6 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
} }
LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
HTailCallThroughMegamorphicCache* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* receiver_register =
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
context, receiver_register, name_register);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1); LOperand* function = UseFixed(instr->function(), r1);
@ -1869,7 +1863,7 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0); LOperand* object = UseFixed(instr->value(), r0);
LDateField* result = LDateField* result =
new(zone()) LDateField(object, FixedTemp(r1), instr->index()); new(zone()) LDateField(object, FixedTemp(r1), instr->index());
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY); return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
} }
@ -2148,7 +2142,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister()); UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL; LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) { if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister()); vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
} }
LLoadGlobalGeneric* result = LLoadGlobalGeneric* result =
new(zone()) LLoadGlobalGeneric(context, global_object, vector); new(zone()) LLoadGlobalGeneric(context, global_object, vector);
@ -2197,7 +2191,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister()); UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL; LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) { if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister()); vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
} }
LInstruction* result = LInstruction* result =
@ -2271,7 +2265,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister()); LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL; LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) { if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister()); vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
} }
LInstruction* result = LInstruction* result =
@ -2336,8 +2330,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged()); DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged()); DCHECK(instr->value()->representation().IsTagged());
return MarkAsCall( LOperand* slot = NULL;
new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr); LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
}
LStoreKeyedGeneric* result =
new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
return MarkAsCall(result, instr);
} }
@ -2369,6 +2371,21 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
} }
LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = Use(instr->object());
LOperand* elements = Use(instr->elements());
LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
LMaybeGrowElements* result = new (zone())
LMaybeGrowElements(context, object, elements, key, current_capacity);
DefineFixed(result, r0);
return AssignPointerMap(AssignEnvironment(result));
}
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_in_object = instr->access().IsInobject(); bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier(); bool needs_write_barrier = instr->NeedsWriteBarrier();
@ -2407,8 +2424,15 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj = LOperand* obj =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister()); UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister()); LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LOperand* slot = NULL;
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
}
LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val); LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
return MarkAsCall(result, instr); return MarkAsCall(result, instr);
} }
@ -2485,7 +2509,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CallInterfaceDescriptor descriptor = CallInterfaceDescriptor descriptor =
info()->code_stub()->GetCallInterfaceDescriptor(); info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index()); int index = static_cast<int>(instr->index());
Register reg = descriptor.GetEnvironmentParameterRegister(index); Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg); return DefineFixed(result, reg);
} }
} }
@ -2602,7 +2626,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context()); inner->BindContext(instr->closure_context());
inner->set_entry(instr); inner->set_entry(instr);
current_block_->UpdateEnvironment(inner); current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure()); chunk_->AddInlinedFunction(instr->shared());
return NULL; return NULL;
} }
@ -2671,4 +2695,5 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
return MarkAsCall(DefineFixed(result, cp), instr); return MarkAsCall(DefineFixed(result, cp), instr);
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8

View File

@ -117,6 +117,7 @@ class LCodeGen;
V(MathPowHalf) \ V(MathPowHalf) \
V(MathRound) \ V(MathRound) \
V(MathSqrt) \ V(MathSqrt) \
V(MaybeGrowElements) \
V(ModByConstI) \ V(ModByConstI) \
V(ModByPowerOf2I) \ V(ModByPowerOf2I) \
V(ModI) \ V(ModI) \
@ -153,7 +154,6 @@ class LCodeGen;
V(SubI) \ V(SubI) \
V(RSubI) \ V(RSubI) \
V(TaggedToI) \ V(TaggedToI) \
V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \ V(ThisFunction) \
V(ToFastProperties) \ V(ToFastProperties) \
V(TransitionElementsKind) \ V(TransitionElementsKind) \
@ -474,26 +474,6 @@ class LCallStub final : public LTemplateInstruction<1, 1, 0> {
}; };
class LTailCallThroughMegamorphicCache final
: public LTemplateInstruction<0, 3, 0> {
public:
LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
LOperand* name) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
};
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> { class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public: public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; } bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@ -1196,6 +1176,8 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Strength strength() { return hydrogen()->strength(); }
Token::Value op() const { return hydrogen()->token(); } Token::Value op() const { return hydrogen()->token(); }
}; };
@ -1567,7 +1549,7 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
LanguageMode language_mode() { return hydrogen()->language_mode(); } Strength strength() { return hydrogen()->strength(); }
private: private:
Token::Value op_; Token::Value op_;
@ -1865,8 +1847,12 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
LCallWithDescriptor(CallInterfaceDescriptor descriptor, LCallWithDescriptor(CallInterfaceDescriptor descriptor,
const ZoneList<LOperand*>& operands, Zone* zone) const ZoneList<LOperand*>& operands, Zone* zone)
: descriptor_(descriptor), : descriptor_(descriptor),
inputs_(descriptor.GetRegisterParameterCount() + 1, zone) { inputs_(descriptor.GetRegisterParameterCount() +
DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length()); kImplicitRegisterParameterCount,
zone) {
DCHECK(descriptor.GetRegisterParameterCount() +
kImplicitRegisterParameterCount ==
operands.length());
inputs_.AddAll(operands, zone); inputs_.AddAll(operands, zone);
} }
@ -1876,6 +1862,10 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor) DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
// The target and context are passed as implicit parameters that are not
// explicitly listed in the descriptor.
static const int kImplicitRegisterParameterCount = 2;
private: private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@ -2188,17 +2178,22 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
}; };
class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> { class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
public: public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) { LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
LOperand* slot, LOperand* vector) {
inputs_[0] = context; inputs_[0] = context;
inputs_[1] = object; inputs_[1] = object;
inputs_[2] = value; inputs_[2] = value;
temps_[0] = slot;
temps_[1] = vector;
} }
LOperand* context() { return inputs_[0]; } LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; } LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; } LOperand* value() { return inputs_[2]; }
LOperand* temp_slot() { return temps_[0]; }
LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@ -2247,22 +2242,24 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
}; };
class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 0> { class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
public: public:
LStoreKeyedGeneric(LOperand* context, LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
LOperand* obj, LOperand* value, LOperand* slot, LOperand* vector) {
LOperand* key,
LOperand* value) {
inputs_[0] = context; inputs_[0] = context;
inputs_[1] = obj; inputs_[1] = object;
inputs_[2] = key; inputs_[2] = key;
inputs_[3] = value; inputs_[3] = value;
temps_[0] = slot;
temps_[1] = vector;
} }
LOperand* context() { return inputs_[0]; } LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; } LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; } LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; } LOperand* value() { return inputs_[3]; }
LOperand* temp_slot() { return temps_[0]; }
LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric) DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@ -2318,6 +2315,28 @@ class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
}; };
class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
public:
LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
LOperand* key, LOperand* current_capacity) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = elements;
inputs_[3] = key;
inputs_[4] = current_capacity;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* elements() { return inputs_[2]; }
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
class LStringAdd final : public LTemplateInstruction<1, 3, 0> { class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
public: public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) { LStringAdd(LOperand* context, LOperand* left, LOperand* right) {

View File

@ -113,7 +113,7 @@ bool LCodeGen::GeneratePrologue() {
// r1: Callee's JS function. // r1: Callee's JS function.
// cp: Callee's context. // cp: Callee's context.
// pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool) // pp: Callee's constant pool pointer (if enabled)
// fp: Caller's frame pointer. // fp: Caller's frame pointer.
// lr: Caller's pc. // lr: Caller's pc.
@ -121,7 +121,7 @@ bool LCodeGen::GeneratePrologue() {
// global proxy when called as functions (without an explicit receiver // global proxy when called as functions (without an explicit receiver
// object). // object).
if (is_sloppy(info_->language_mode()) && info()->MayUseThis() && if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
!info_->is_native()) { !info_->is_native() && info_->scope()->has_this_declaration()) {
Label ok; Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize; int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset)); __ ldr(r2, MemOperand(sp, receiver_offset));
@ -197,8 +197,9 @@ bool LCodeGen::GeneratePrologue() {
__ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context. // Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters(); int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) { int first_parameter = scope()->has_this_declaration() ? -1 : 0;
Variable* var = scope()->parameter(i); for (int i = first_parameter; i < num_parameters; i++) {
Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) { if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset + int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize; (num_parameters - 1 - i) * kPointerSize;
@ -595,52 +596,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The translation includes one command per value in the environment. // The translation includes one command per value in the environment.
int translation_size = environment->translation_size(); int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation); WriteTranslation(environment->outer(), translation);
bool has_closure_id = !info()->closure().is_null() && WriteTranslationFrame(environment, translation);
!info()->closure().is_identical_to(environment->closure());
int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
break;
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
case JS_GETTER:
DCHECK(translation_size == 1);
DCHECK(height == 0);
translation->BeginGetterStubFrame(closure_id);
break;
case JS_SETTER:
DCHECK(translation_size == 2);
DCHECK(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
case STUB:
translation->BeginCompiledStubFrame();
break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
}
int object_index = 0; int object_index = 0;
int dematerialized_index = 0; int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) { for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i); LOperand* value = environment->values()->at(i);
AddToTranslation(environment, AddToTranslation(
translation, environment, translation, value, environment->HasTaggedValueAt(i),
value, environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
&object_index,
&dematerialized_index);
} }
} }
@ -960,28 +926,11 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
} }
int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
int result = deoptimization_literals_.length();
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
deoptimization_literals_.Add(literal, zone());
return result;
}
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
DCHECK(deoptimization_literals_.length() == 0); DCHECK_EQ(0, deoptimization_literals_.length());
for (auto function : chunk()->inlined_functions()) {
const ZoneList<Handle<JSFunction> >* inlined_closures = DefineDeoptimizationLiteral(function);
chunk()->inlined_closures();
for (int i = 0, length = inlined_closures->length();
i < length;
i++) {
DefineDeoptimizationLiteral(inlined_closures->at(i));
} }
inlined_function_count_ = deoptimization_literals_.length(); inlined_function_count_ = deoptimization_literals_.length();
} }
@ -1016,10 +965,6 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer), zone()); safepoint.DefinePointerRegister(ToRegister(pointer), zone());
} }
} }
if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
// Register pp always contains a pointer to the constant pool.
safepoint.DefinePointerRegister(pp, zone());
}
} }
@ -1936,20 +1881,15 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp()); Register scratch = ToRegister(instr->temp());
Smi* index = instr->index(); Smi* index = instr->index();
Label runtime, done;
DCHECK(object.is(result)); DCHECK(object.is(result));
DCHECK(object.is(r0)); DCHECK(object.is(r0));
DCHECK(!scratch.is(scratch0())); DCHECK(!scratch.is(scratch0()));
DCHECK(!scratch.is(object)); DCHECK(!scratch.is(object));
__ SmiTst(object);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
if (index->value() == 0) { if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
} else { } else {
Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) { if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch, Operand(stamp)); __ mov(scratch, Operand(stamp));
@ -2174,8 +2114,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(r0)); DCHECK(ToRegister(instr->right()).is(r0));
DCHECK(ToRegister(instr->result()).is(r0)); DCHECK(ToRegister(instr->result()).is(r0));
Handle<Code> code = CodeFactory::BinaryOpIC( Handle<Code> code =
isolate(), instr->op(), instr->language_mode()).code(); CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
// Block literal pool emission to ensure nop indicating no inlined smi code // Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position. // is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm()); Assembler::BlockConstPoolScope block_const_pool(masm());
@ -2611,7 +2551,8 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp)); DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op(); Token::Value op = instr->op();
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code(); Handle<Code> ic =
CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined. // This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero()); __ cmp(r0, Operand::Zero());
@ -2885,37 +2826,41 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET); int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
int additional_delta = (call_size / Assembler::kInstrSize) + 4; int additional_delta = (call_size / Assembler::kInstrSize) + 4;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
PredictableCodeSizeScope predictable(
masm_, (additional_delta + 1) * Assembler::kInstrSize);
// Make sure we don't emit any additional entries in the constant pool before
// the call to ensure that the CallCodeSize() calculated the correct number of
// instructions for the constant pool load.
{ {
ConstantPoolUnavailableScope constant_pool_unavailable(masm_); // Make sure that code size is predicable, since we use specific constants
int map_check_delta = // offsets in the code to find embedded values..
masm_->InstructionsGeneratedSince(map_check) + additional_delta; PredictableCodeSizeScope predictable(
int bool_load_delta = masm_, additional_delta * Assembler::kInstrSize);
masm_->InstructionsGeneratedSince(bool_load) + additional_delta; // The labels must be already bound since the code has predictabel size up
Label before_push_delta; // to the call instruction.
__ bind(&before_push_delta); DCHECK(map_check->is_bound());
__ BlockConstPoolFor(additional_delta); DCHECK(bool_load->is_bound());
// r5 is used to communicate the offset to the location of the map check. // Make sure we don't emit any additional entries in the constant pool
__ mov(r5, Operand(map_check_delta * kPointerSize)); // before the call to ensure that the CallCodeSize() calculated the
// r6 is used to communicate the offset to the location of the bool load. // correct number of instructions for the constant pool load.
__ mov(r6, Operand(bool_load_delta * kPointerSize)); {
// The mov above can generate one or two instructions. The delta was ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
// computed for two instructions, so we need to pad here in case of one int map_check_delta =
// instruction. masm_->InstructionsGeneratedSince(map_check) + additional_delta;
while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) { int bool_load_delta =
__ nop(); masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
Label before_push_delta;
__ bind(&before_push_delta);
__ BlockConstPoolFor(additional_delta);
// r5 is used to communicate the offset to the location of the map check.
__ mov(r5, Operand(map_check_delta * kPointerSize));
// r6 is used to communicate the offset to the location of the bool load.
__ mov(r6, Operand(bool_load_delta * kPointerSize));
// The mov above can generate one or two instructions. The delta was
// computed for two instructions, so we need to pad here in case of one
// instruction.
while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
__ nop();
}
} }
CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
} }
CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Put the result value (r0) into the result register slot and // Put the result value (r0) into the result register slot and
@ -2928,7 +2873,8 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(cp)); DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op(); Token::Value op = instr->op();
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code(); Handle<Code> ic =
CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined. // This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero()); __ cmp(r0, Operand::Zero());
@ -2986,10 +2932,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
template <class T> template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) { void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector()); Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = VectorLoadICDescriptor::SlotRegister(); Register slot_register = LoadDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(r0)); DCHECK(slot_register.is(r0));
AllowDeferredHandleDereference vector_structure_check; AllowDeferredHandleDereference vector_structure_check;
@ -3002,6 +2947,20 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
} }
template <class T>
void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = ToRegister(instr->temp_slot());
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Operand(Smi::FromInt(index)));
}
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp)); DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->global_object()) DCHECK(ToRegister(instr->global_object())
@ -3009,11 +2968,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->result()).is(r0)); DCHECK(ToRegister(instr->result()).is(r0));
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name())); __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) { EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code(); PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
} }
@ -3108,12 +3065,11 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in r2. // Name is always in r2.
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name())); __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) { EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr); Handle<Code> ic =
} CodeFactory::LoadICInOptimizedCode(
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode( isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
isolate(), NOT_CONTEXTUAL, instr->hydrogen()->initialization_state()).code();
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
} }
@ -3420,9 +3376,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr); EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
} }
Handle<Code> ic = Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
CodeFactory::KeyedLoadICInOptimizedCode( isolate(), instr->hydrogen()->language_mode(),
isolate(), instr->hydrogen()->initialization_state()).code(); instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
} }
@ -3961,29 +3917,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
} }
void LCodeGen::DoTailCallThroughMegamorphicCache(
LTailCallThroughMegamorphicCache* instr) {
Register receiver = ToRegister(instr->receiver());
Register name = ToRegister(instr->name());
DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
DCHECK(name.is(LoadDescriptor::NameRegister()));
DCHECK(receiver.is(r1));
DCHECK(name.is(r2));
Register scratch = r4;
Register extra = r5;
Register extra2 = r6;
Register extra3 = r9;
// The probe will tail call to a handler if found.
isolate()->stub_cache()->GenerateProbe(
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), false, receiver, name,
scratch, extra, extra2, extra3);
// Tail call to miss if we ended up here.
LoadIC::GenerateMiss(masm());
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(r0)); DCHECK(ToRegister(instr->result()).is(r0));
@ -4274,10 +4207,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
}
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name())); __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic = Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
StoreIC::initialize_stub(isolate(), instr->language_mode(), isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()); instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
} }
@ -4498,6 +4435,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister())); DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode( Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(), isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code(); instr->hydrogen()->initialization_state()).code();
@ -4505,6 +4446,100 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
} }
void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
class DeferredMaybeGrowElements final : public LDeferredCode {
public:
DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
: LDeferredCode(codegen), instr_(instr) {}
void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
LInstruction* instr() override { return instr_; }
private:
LMaybeGrowElements* instr_;
};
Register result = r0;
DeferredMaybeGrowElements* deferred =
new (zone()) DeferredMaybeGrowElements(this, instr);
LOperand* key = instr->key();
LOperand* current_capacity = instr->current_capacity();
DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
DCHECK(key->IsConstantOperand() || key->IsRegister());
DCHECK(current_capacity->IsConstantOperand() ||
current_capacity->IsRegister());
if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
int32_t constant_capacity =
ToInteger32(LConstantOperand::cast(current_capacity));
if (constant_key >= constant_capacity) {
// Deferred case.
__ jmp(deferred->entry());
}
} else if (key->IsConstantOperand()) {
int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
__ cmp(ToRegister(current_capacity), Operand(constant_key));
__ b(le, deferred->entry());
} else if (current_capacity->IsConstantOperand()) {
int32_t constant_capacity =
ToInteger32(LConstantOperand::cast(current_capacity));
__ cmp(ToRegister(key), Operand(constant_capacity));
__ b(ge, deferred->entry());
} else {
__ cmp(ToRegister(key), ToRegister(current_capacity));
__ b(ge, deferred->entry());
}
if (instr->elements()->IsRegister()) {
__ Move(result, ToRegister(instr->elements()));
} else {
__ ldr(result, ToMemOperand(instr->elements()));
}
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register result = r0;
__ mov(result, Operand::Zero());
// We have to call a stub.
{
PushSafepointRegistersScope scope(this);
if (instr->object()->IsRegister()) {
__ Move(result, ToRegister(instr->object()));
} else {
__ ldr(result, ToMemOperand(instr->object()));
}
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
__ Move(r3, Operand(ToSmi(LConstantOperand::cast(key))));
} else {
__ Move(r3, ToRegister(key));
__ SmiTag(r3);
}
GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
__ StoreToSafepointRegisterSlot(result, result);
}
// Deopt on smi, which means the elements array changed to dictionary mode.
__ SmiTst(result);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
}
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object()); Register object_reg = ToRegister(instr->object());
Register scratch = scratch0(); Register scratch = scratch0();
@ -5957,4 +5992,5 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
#undef __ #undef __
} } // namespace v8::internal } // namespace internal
} // namespace v8

View File

@ -27,7 +27,6 @@ class LCodeGen: public LCodeGenBase {
: LCodeGenBase(chunk, assembler, info), : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()), deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()), jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0), inlined_function_count_(0),
scope_(info->scope()), scope_(info->scope()),
translations_(info->zone()), translations_(info->zone()),
@ -112,6 +111,7 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredTaggedToI(LTaggedToI* instr); void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr); void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr); void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr); void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr); void DoDeferredAllocate(LAllocate* instr);
@ -241,7 +241,6 @@ class LCodeGen: public LCodeGenBase {
int* object_index_pointer, int* object_index_pointer,
int* dematerialized_index_pointer); int* dematerialized_index_pointer);
void PopulateDeoptimizationData(Handle<Code> code); void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions(); void PopulateDeoptimizationLiteralsWithInlinedFunctions();
@ -324,10 +323,11 @@ class LCodeGen: public LCodeGenBase {
template <class T> template <class T>
void EmitVectorLoadICRegisters(T* instr); void EmitVectorLoadICRegisters(T* instr);
template <class T>
void EmitVectorStoreICRegisters(T* instr);
ZoneList<LEnvironment*> deoptimizations_; ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_; ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_; int inlined_function_count_;
Scope* const scope_; Scope* const scope_;
TranslationBuffer translations_; TranslationBuffer translations_;

View File

@ -299,4 +299,5 @@ void LGapResolver::EmitMove(int index) {
#undef __ #undef __
} } // namespace v8::internal } // namespace internal
} // namespace v8

View File

@ -691,28 +691,28 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
void MacroAssembler::PushFixedFrame(Register marker_reg) { void MacroAssembler::PushFixedFrame(Register marker_reg) {
DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code()); DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
cp.bit() | (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
(FLAG_enable_ool_constant_pool ? pp.bit() : 0) | fp.bit() | lr.bit());
fp.bit() |
lr.bit());
} }
void MacroAssembler::PopFixedFrame(Register marker_reg) { void MacroAssembler::PopFixedFrame(Register marker_reg) {
DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code()); DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
cp.bit() | (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
(FLAG_enable_ool_constant_pool ? pp.bit() : 0) | fp.bit() | lr.bit());
fp.bit() |
lr.bit());
} }
// Push and pop all registers that can hold pointers. // Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() { void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of contiguous register values starting with r0: // Safepoints expect a block of contiguous register values starting with r0.
DCHECK(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters); // except when FLAG_enable_embedded_constant_pool, which omits pp.
DCHECK(kSafepointSavedRegisters ==
(FLAG_enable_embedded_constant_pool
? ((1 << (kNumSafepointSavedRegisters + 1)) - 1) & ~pp.bit()
: (1 << kNumSafepointSavedRegisters) - 1));
// Safepoints expect a block of kNumSafepointRegisters values on the // Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers. // stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
@ -742,6 +742,10 @@ void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding, // The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer. // which means that lowest encodings are closest to the stack pointer.
if (FLAG_enable_embedded_constant_pool && reg_code > pp.code()) {
// RegList omits pp.
reg_code -= 1;
}
DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters); DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
return reg_code; return reg_code;
} }
@ -985,13 +989,20 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
} }
void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
DCHECK(FLAG_enable_embedded_constant_pool);
ldr(pp, MemOperand(code_target_address,
Code::kConstantPoolOffset - Code::kHeaderSize));
add(pp, pp, code_target_address);
}
void MacroAssembler::LoadConstantPoolPointerRegister() { void MacroAssembler::LoadConstantPoolPointerRegister() {
if (FLAG_enable_ool_constant_pool) { DCHECK(FLAG_enable_embedded_constant_pool);
int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize - int entry_offset = pc_offset() + Instruction::kPCReadOffset;
pc_offset() - Instruction::kPCReadOffset; sub(ip, pc, Operand(entry_offset));
DCHECK(ImmediateFitsAddrMode2Instruction(constant_pool_offset)); LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
ldr(pp, MemOperand(pc, constant_pool_offset));
}
} }
@ -1000,9 +1011,9 @@ void MacroAssembler::StubPrologue() {
Push(Smi::FromInt(StackFrame::STUB)); Push(Smi::FromInt(StackFrame::STUB));
// Adjust FP to point to saved FP. // Adjust FP to point to saved FP.
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
LoadConstantPoolPointerRegister(); LoadConstantPoolPointerRegister();
set_ool_constant_pool_available(true); set_constant_pool_available(true);
} }
} }
@ -1025,9 +1036,9 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
} }
} }
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
LoadConstantPoolPointerRegister(); LoadConstantPoolPointerRegister();
set_ool_constant_pool_available(true); set_constant_pool_available(true);
} }
} }
@ -1036,7 +1047,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) { bool load_constant_pool_pointer_reg) {
// r0-r3: preserved // r0-r3: preserved
PushFixedFrame(); PushFixedFrame();
if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) { if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
LoadConstantPoolPointerRegister(); LoadConstantPoolPointerRegister();
} }
mov(ip, Operand(Smi::FromInt(type))); mov(ip, Operand(Smi::FromInt(type)));
@ -1056,9 +1067,9 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type) {
// Drop the execution stack down to the frame pointer and restore // Drop the execution stack down to the frame pointer and restore
// the caller frame pointer, return address and constant pool pointer // the caller frame pointer, return address and constant pool pointer
// (if FLAG_enable_ool_constant_pool). // (if FLAG_enable_embedded_constant_pool).
int frame_ends; int frame_ends;
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset)); add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
frame_ends = pc_offset(); frame_ends = pc_offset();
ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit()); ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
@ -1084,7 +1095,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
mov(ip, Operand::Zero()); mov(ip, Operand::Zero());
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
} }
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
} }
mov(ip, Operand(CodeObject())); mov(ip, Operand(CodeObject()));
@ -1103,7 +1114,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// fp - ExitFrameConstants::kFrameSize - // fp - ExitFrameConstants::kFrameSize -
// DwVfpRegister::kMaxNumRegisters * kDoubleSize, // DwVfpRegister::kMaxNumRegisters * kDoubleSize,
// since the sp slot, code slot and constant pool slot (if // since the sp slot, code slot and constant pool slot (if
// FLAG_enable_ool_constant_pool) were pushed after the fp. // FLAG_enable_embedded_constant_pool) were pushed after the fp.
} }
// Reserve place for the return address and stack space and align the frame // Reserve place for the return address and stack space and align the frame
@ -1183,7 +1194,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
#endif #endif
// Tear down the exit frame, pop the arguments, and return. // Tear down the exit frame, pop the arguments, and return.
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
} }
mov(sp, Operand(fp)); mov(sp, Operand(fp));
@ -1559,6 +1570,7 @@ void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
add(t0, t0, scratch); add(t0, t0, scratch);
// hash = hash ^ (hash >> 16); // hash = hash ^ (hash >> 16);
eor(t0, t0, Operand(t0, LSR, 16)); eor(t0, t0, Operand(t0, LSR, 16));
bic(t0, t0, Operand(0xc0000000u));
} }
@ -3162,7 +3174,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
str(filler, MemOperand(start_offset, kPointerSize, PostIndex)); str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
bind(&entry); bind(&entry);
cmp(start_offset, end_offset); cmp(start_offset, end_offset);
b(lt, &loop); b(lo, &loop);
} }
@ -3390,7 +3402,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
if (ActivationFrameAlignment() > kPointerSize) { if (ActivationFrameAlignment() > kPointerSize) {
ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else { } else {
add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
} }
} }
@ -3401,7 +3413,7 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
Label small_constant_pool_load, load_result; Label small_constant_pool_load, load_result;
ldr(result, MemOperand(ldr_location)); ldr(result, MemOperand(ldr_location));
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
// Check if this is an extended constant pool load. // Check if this is an extended constant pool load.
and_(scratch, result, Operand(GetConsantPoolLoadMask())); and_(scratch, result, Operand(GetConsantPoolLoadMask()));
teq(scratch, Operand(GetConsantPoolLoadPattern())); teq(scratch, Operand(GetConsantPoolLoadPattern()));
@ -3455,7 +3467,7 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
bind(&load_result); bind(&load_result);
// Get the address of the constant. // Get the address of the constant.
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_embedded_constant_pool) {
add(result, pp, Operand(result)); add(result, pp, Operand(result));
} else { } else {
add(result, ldr_location, Operand(result)); add(result, ldr_location, Operand(result));

View File

@ -437,7 +437,7 @@ class MacroAssembler: public Assembler {
} }
// Push a fixed frame, consisting of lr, fp, constant pool (if // Push a fixed frame, consisting of lr, fp, constant pool (if
// FLAG_enable_ool_constant_pool), context and JS function / marker id if // FLAG_enable_embedded_constant_pool), context and JS function / marker id if
// marker_reg is a valid register. // marker_reg is a valid register.
void PushFixedFrame(Register marker_reg = no_reg); void PushFixedFrame(Register marker_reg = no_reg);
void PopFixedFrame(Register marker_reg = no_reg); void PopFixedFrame(Register marker_reg = no_reg);
@ -1441,6 +1441,11 @@ class MacroAssembler: public Assembler {
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found); Register scratch1, Label* found);
// Loads the constant pool pointer (pp) register.
void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address);
void LoadConstantPoolPointerRegister();
private: private:
void CallCFunctionHelper(Register function, void CallCFunctionHelper(Register function,
int num_reg_arguments, int num_reg_arguments,
@ -1482,9 +1487,6 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegisterSlot(Register reg); MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg); MemOperand SafepointRegistersAndDoublesSlot(Register reg);
// Loads the constant pool pointer (pp) register.
void LoadConstantPoolPointerRegister();
bool generating_stub_; bool generating_stub_;
bool has_frame_; bool has_frame_;
// This handle will be patched with the code object on installation. // This handle will be patched with the code object on installation.

View File

@ -1193,6 +1193,7 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP #endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

View File

@ -774,8 +774,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
} }
Simulator::~Simulator() { Simulator::~Simulator() { free(stack_); }
}
// When the generated code calls an external reference we need to catch that in // When the generated code calls an external reference we need to catch that in
@ -824,7 +823,7 @@ class Redirection {
static Redirection* FromSwiInstruction(Instruction* swi_instruction) { static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
char* addr_of_swi = reinterpret_cast<char*>(swi_instruction); char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
char* addr_of_redirection = char* addr_of_redirection =
addr_of_swi - OFFSET_OF(Redirection, swi_instruction_); addr_of_swi - offsetof(Redirection, swi_instruction_);
return reinterpret_cast<Redirection*>(addr_of_redirection); return reinterpret_cast<Redirection*>(addr_of_redirection);
} }
@ -834,6 +833,14 @@ class Redirection {
return redirection->external_function(); return redirection->external_function();
} }
static void DeleteChain(Redirection* redirection) {
while (redirection != nullptr) {
Redirection* next = redirection->next_;
delete redirection;
redirection = next;
}
}
private: private:
void* external_function_; void* external_function_;
uint32_t swi_instruction_; uint32_t swi_instruction_;
@ -842,6 +849,19 @@ class Redirection {
}; };
// static
void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
Redirection::DeleteChain(first);
if (i_cache != nullptr) {
for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
entry = i_cache->Next(entry)) {
delete static_cast<CachePage*>(entry->value);
}
delete i_cache;
}
}
void* Simulator::RedirectExternalReference(void* external_function, void* Simulator::RedirectExternalReference(void* external_function,
ExternalReference::Type type) { ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(external_function, type); Redirection* redirection = Redirection::Get(external_function, type);
@ -4131,7 +4151,8 @@ uintptr_t Simulator::PopAddress() {
return address; return address;
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // USE_SIMULATOR #endif // USE_SIMULATOR

View File

@ -194,6 +194,8 @@ class Simulator {
// Call on program start. // Call on program start.
static void Initialize(Isolate* isolate); static void Initialize(Isolate* isolate);
static void TearDown(HashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into // V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function, // generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return. // which sets up the simulator state and grabs the result on return.

View File

@ -586,14 +586,13 @@ Address Assembler::target_pointer_address_at(Address pc) {
// Read/Modify the code target address in the branch/call instruction at pc. // Read/Modify the code target address in the branch/call instruction at pc.
Address Assembler::target_address_at(Address pc, Address Assembler::target_address_at(Address pc, Address constant_pool) {
ConstantPoolArray* constant_pool) {
return Memory::Address_at(target_pointer_address_at(pc)); return Memory::Address_at(target_pointer_address_at(pc));
} }
Address Assembler::target_address_at(Address pc, Code* code) { Address Assembler::target_address_at(Address pc, Code* code) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool); return target_address_at(pc, constant_pool);
} }
@ -665,8 +664,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
} }
void Assembler::set_target_address_at(Address pc, void Assembler::set_target_address_at(Address pc, Address constant_pool,
ConstantPoolArray* constant_pool,
Address target, Address target,
ICacheFlushMode icache_flush_mode) { ICacheFlushMode icache_flush_mode) {
Memory::Address_at(target_pointer_address_at(pc)) = target; Memory::Address_at(target_pointer_address_at(pc)) = target;
@ -685,7 +683,7 @@ void Assembler::set_target_address_at(Address pc,
Code* code, Code* code,
Address target, Address target,
ICacheFlushMode icache_flush_mode) { ICacheFlushMode icache_flush_mode) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode); set_target_address_at(pc, constant_pool, target, icache_flush_mode);
} }
@ -867,8 +865,8 @@ bool RelocInfo::IsPatchedReturnSequence() {
// See arm64/debug-arm64.cc BreakLocation::SetDebugBreakAtReturn(). // See arm64/debug-arm64.cc BreakLocation::SetDebugBreakAtReturn().
Instruction* i1 = reinterpret_cast<Instruction*>(pc_); Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
Instruction* i2 = i1->following(); Instruction* i2 = i1->following();
return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) && return i1->IsLdrLiteralX() && (i1->Rt() == kIp0Code) &&
i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code()); i2->IsBranchAndLinkToRegister() && (i2->Rn() == kIp0Code);
} }
@ -1084,13 +1082,14 @@ Instr Assembler::SF(Register rd) {
} }
Instr Assembler::ImmAddSub(int64_t imm) { Instr Assembler::ImmAddSub(int imm) {
DCHECK(IsImmAddSub(imm)); DCHECK(IsImmAddSub(imm));
if (is_uint12(imm)) { // No shift required. if (is_uint12(imm)) { // No shift required.
return imm << ImmAddSub_offset; imm <<= ImmAddSub_offset;
} else { } else {
return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset); imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
} }
return imm;
} }
@ -1239,13 +1238,13 @@ LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
} }
Instr Assembler::ImmMoveWide(uint64_t imm) { Instr Assembler::ImmMoveWide(int imm) {
DCHECK(is_uint16(imm)); DCHECK(is_uint16(imm));
return imm << ImmMoveWide_offset; return imm << ImmMoveWide_offset;
} }
Instr Assembler::ShiftMoveWide(int64_t shift) { Instr Assembler::ShiftMoveWide(int shift) {
DCHECK(is_uint2(shift)); DCHECK(is_uint2(shift));
return shift << ShiftMoveWide_offset; return shift << ShiftMoveWide_offset;
} }

View File

@ -580,8 +580,9 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer = reinterpret_cast<byte*>(buffer_); desc->buffer = reinterpret_cast<byte*>(buffer_);
desc->buffer_size = buffer_size_; desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset(); desc->instr_size = pc_offset();
desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) - desc->reloc_size =
reloc_info_writer.pos(); static_cast<int>((reinterpret_cast<byte*>(buffer_) + buffer_size_) -
reloc_info_writer.pos());
desc->origin = this; desc->origin = this;
} }
} }
@ -600,13 +601,13 @@ void Assembler::CheckLabelLinkChain(Label const * label) {
if (label->is_linked()) { if (label->is_linked()) {
static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour. static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour.
int links_checked = 0; int links_checked = 0;
int linkoffset = label->pos(); int64_t linkoffset = label->pos();
bool end_of_chain = false; bool end_of_chain = false;
while (!end_of_chain) { while (!end_of_chain) {
if (++links_checked > kMaxLinksToCheck) break; if (++links_checked > kMaxLinksToCheck) break;
Instruction * link = InstructionAt(linkoffset); Instruction * link = InstructionAt(linkoffset);
int linkpcoffset = link->ImmPCOffset(); int64_t linkpcoffset = link->ImmPCOffset();
int prevlinkoffset = linkoffset + linkpcoffset; int64_t prevlinkoffset = linkoffset + linkpcoffset;
end_of_chain = (linkoffset == prevlinkoffset); end_of_chain = (linkoffset == prevlinkoffset);
linkoffset = linkoffset + linkpcoffset; linkoffset = linkoffset + linkpcoffset;
@ -645,7 +646,8 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
// currently referring to this label. // currently referring to this label.
label->Unuse(); label->Unuse();
} else { } else {
label->link_to(reinterpret_cast<byte*>(next_link) - buffer_); label->link_to(
static_cast<int>(reinterpret_cast<byte*>(next_link) - buffer_));
} }
} else if (branch == next_link) { } else if (branch == next_link) {
@ -721,7 +723,7 @@ void Assembler::bind(Label* label) {
while (label->is_linked()) { while (label->is_linked()) {
int linkoffset = label->pos(); int linkoffset = label->pos();
Instruction* link = InstructionAt(linkoffset); Instruction* link = InstructionAt(linkoffset);
int prevlinkoffset = linkoffset + link->ImmPCOffset(); int prevlinkoffset = linkoffset + static_cast<int>(link->ImmPCOffset());
CheckLabelLinkChain(label); CheckLabelLinkChain(label);
@ -811,12 +813,13 @@ void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
while (!end_of_chain) { while (!end_of_chain) {
Instruction * link = InstructionAt(link_offset); Instruction * link = InstructionAt(link_offset);
link_pcoffset = link->ImmPCOffset(); link_pcoffset = static_cast<int>(link->ImmPCOffset());
// ADR instructions are not handled by veneers. // ADR instructions are not handled by veneers.
if (link->IsImmBranch()) { if (link->IsImmBranch()) {
int max_reachable_pc = InstructionOffset(link) + int max_reachable_pc =
Instruction::ImmBranchRange(link->BranchType()); static_cast<int>(InstructionOffset(link) +
Instruction::ImmBranchRange(link->BranchType()));
typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it; typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
std::pair<unresolved_info_it, unresolved_info_it> range; std::pair<unresolved_info_it, unresolved_info_it> range;
range = unresolved_branches_.equal_range(max_reachable_pc); range = unresolved_branches_.equal_range(max_reachable_pc);
@ -888,12 +891,12 @@ bool Assembler::IsConstantPoolAt(Instruction* instr) {
// The constant pool marker is made of two instructions. These instructions // The constant pool marker is made of two instructions. These instructions
// will never be emitted by the JIT, so checking for the first one is enough: // will never be emitted by the JIT, so checking for the first one is enough:
// 0: ldr xzr, #<size of pool> // 0: ldr xzr, #<size of pool>
bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code()); bool result = instr->IsLdrLiteralX() && (instr->Rt() == kZeroRegCode);
// It is still worth asserting the marker is complete. // It is still worth asserting the marker is complete.
// 4: blr xzr // 4: blr xzr
DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() && DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() &&
instr->following()->Rn() == xzr.code())); instr->following()->Rn() == kZeroRegCode));
return result; return result;
} }
@ -909,7 +912,7 @@ int Assembler::ConstantPoolSizeAt(Instruction* instr) {
const char* message = const char* message =
reinterpret_cast<const char*>( reinterpret_cast<const char*>(
instr->InstructionAtOffset(kDebugMessageOffset)); instr->InstructionAtOffset(kDebugMessageOffset));
int size = kDebugMessageOffset + strlen(message) + 1; int size = static_cast<int>(kDebugMessageOffset + strlen(message) + 1);
return RoundUp(size, kInstructionSize) / kInstructionSize; return RoundUp(size, kInstructionSize) / kInstructionSize;
} }
// Same for printf support, see MacroAssembler::CallPrintf(). // Same for printf support, see MacroAssembler::CallPrintf().
@ -1599,9 +1602,11 @@ void Assembler::LoadStorePair(const CPURegister& rt,
// 'rt' and 'rt2' can only be aliased for stores. // 'rt' and 'rt2' can only be aliased for stores.
DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2)); DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
DCHECK(AreSameSizeAndType(rt, rt2)); DCHECK(AreSameSizeAndType(rt, rt2));
DCHECK(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op)));
int offset = static_cast<int>(addr.offset());
Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
ImmLSPair(addr.offset(), CalcLSPairDataSize(op)); ImmLSPair(offset, CalcLSPairDataSize(op));
Instr addrmodeop; Instr addrmodeop;
if (addr.IsImmediateOffset()) { if (addr.IsImmediateOffset()) {
@ -1645,11 +1650,11 @@ void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
DCHECK(!rt.Is(rt2)); DCHECK(!rt.Is(rt2));
DCHECK(AreSameSizeAndType(rt, rt2)); DCHECK(AreSameSizeAndType(rt, rt2));
DCHECK(addr.IsImmediateOffset()); DCHECK(addr.IsImmediateOffset());
LSDataSize size = CalcLSPairDataSize( LSDataSize size = CalcLSPairDataSize(
static_cast<LoadStorePairOp>(op & LoadStorePairMask)); static_cast<LoadStorePairOp>(op & LoadStorePairMask));
Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | DCHECK(IsImmLSPair(addr.offset(), size));
ImmLSPair(addr.offset(), size)); int offset = static_cast<int>(addr.offset());
Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size));
} }
@ -2137,13 +2142,13 @@ Instr Assembler::ImmFP64(double imm) {
// 0000.0000.0000.0000.0000.0000.0000.0000 // 0000.0000.0000.0000.0000.0000.0000.0000
uint64_t bits = double_to_rawbits(imm); uint64_t bits = double_to_rawbits(imm);
// bit7: a000.0000 // bit7: a000.0000
uint32_t bit7 = ((bits >> 63) & 0x1) << 7; uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
// bit6: 0b00.0000 // bit6: 0b00.0000
uint32_t bit6 = ((bits >> 61) & 0x1) << 6; uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
// bit5_to_0: 00cd.efgh // bit5_to_0: 00cd.efgh
uint32_t bit5_to_0 = (bits >> 48) & 0x3f; uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
return (bit7 | bit6 | bit5_to_0) << ImmFP_offset; return static_cast<Instr>((bit7 | bit6 | bit5_to_0) << ImmFP_offset);
} }
@ -2188,8 +2193,8 @@ void Assembler::MoveWide(const Register& rd,
DCHECK(is_uint16(imm)); DCHECK(is_uint16(imm));
Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) |
Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift)); ImmMoveWide(static_cast<int>(imm)) | ShiftMoveWide(shift));
} }
@ -2205,7 +2210,7 @@ void Assembler::AddSub(const Register& rd,
DCHECK(IsImmAddSub(immediate)); DCHECK(IsImmAddSub(immediate));
Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
ImmAddSub(immediate) | dest_reg | RnSP(rn)); ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
} else if (operand.IsShiftedRegister()) { } else if (operand.IsShiftedRegister()) {
DCHECK(operand.reg().SizeInBits() == rd.SizeInBits()); DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
DCHECK(operand.shift() != ROR); DCHECK(operand.shift() != ROR);
@ -2259,7 +2264,7 @@ void Assembler::brk(int code) {
void Assembler::EmitStringData(const char* string) { void Assembler::EmitStringData(const char* string) {
size_t len = strlen(string) + 1; size_t len = strlen(string) + 1;
DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap)); DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
EmitData(string, len); EmitData(string, static_cast<int>(len));
// Pad with NULL characters until pc_ is aligned. // Pad with NULL characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'}; const char pad[] = {'\0', '\0', '\0', '\0'};
STATIC_ASSERT(sizeof(pad) == kInstructionSize); STATIC_ASSERT(sizeof(pad) == kInstructionSize);
@ -2362,7 +2367,8 @@ void Assembler::ConditionalCompare(const Register& rn,
if (operand.IsImmediate()) { if (operand.IsImmediate()) {
int64_t immediate = operand.ImmediateValue(); int64_t immediate = operand.ImmediateValue();
DCHECK(IsImmConditionalCompare(immediate)); DCHECK(IsImmConditionalCompare(immediate));
ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate); ccmpop = ConditionalCompareImmediateFixed | op |
ImmCondCmp(static_cast<unsigned>(immediate));
} else { } else {
DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg()); ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
@ -2502,15 +2508,16 @@ void Assembler::LoadStore(const CPURegister& rt,
const MemOperand& addr, const MemOperand& addr,
LoadStoreOp op) { LoadStoreOp op) {
Instr memop = op | Rt(rt) | RnSP(addr.base()); Instr memop = op | Rt(rt) | RnSP(addr.base());
int64_t offset = addr.offset();
if (addr.IsImmediateOffset()) { if (addr.IsImmediateOffset()) {
LSDataSize size = CalcLSDataSize(op); LSDataSize size = CalcLSDataSize(op);
if (IsImmLSScaled(offset, size)) { if (IsImmLSScaled(addr.offset(), size)) {
int offset = static_cast<int>(addr.offset());
// Use the scaled addressing mode. // Use the scaled addressing mode.
Emit(LoadStoreUnsignedOffsetFixed | memop | Emit(LoadStoreUnsignedOffsetFixed | memop |
ImmLSUnsigned(offset >> size)); ImmLSUnsigned(offset >> size));
} else if (IsImmLSUnscaled(offset)) { } else if (IsImmLSUnscaled(addr.offset())) {
int offset = static_cast<int>(addr.offset());
// Use the unscaled addressing mode. // Use the unscaled addressing mode.
Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset)); Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
} else { } else {
@ -2536,7 +2543,8 @@ void Assembler::LoadStore(const CPURegister& rt,
} else { } else {
// Pre-index and post-index modes. // Pre-index and post-index modes.
DCHECK(!rt.Is(addr.base())); DCHECK(!rt.Is(addr.base()));
if (IsImmLSUnscaled(offset)) { if (IsImmLSUnscaled(addr.offset())) {
int offset = static_cast<int>(addr.offset());
if (addr.IsPreIndex()) { if (addr.IsPreIndex()) {
Emit(LoadStorePreIndexFixed | memop | ImmLS(offset)); Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
} else { } else {
@ -2568,6 +2576,14 @@ bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) {
} }
bool Assembler::IsImmLLiteral(int64_t offset) {
int inst_size = static_cast<int>(kInstructionSizeLog2);
bool offset_is_inst_multiple =
(((offset >> inst_size) << inst_size) == offset);
return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
}
// Test if a given value can be encoded in the immediate field of a logical // Test if a given value can be encoded in the immediate field of a logical
// instruction. // instruction.
// If it can be encoded, the function returns true, and values pointed to by n, // If it can be encoded, the function returns true, and values pointed to by n,
@ -2849,7 +2865,8 @@ void Assembler::GrowBuffer() {
desc.buffer = NewArray<byte>(desc.buffer_size); desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset(); desc.instr_size = pc_offset();
desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos(); desc.reloc_size =
static_cast<int>((buffer + buffer_size_) - reloc_info_writer.pos());
// Copy the data. // Copy the data.
intptr_t pc_delta = desc.buffer - buffer; intptr_t pc_delta = desc.buffer - buffer;
@ -3065,7 +3082,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
} }
// Record the veneer pool size. // Record the veneer pool size.
int pool_size = SizeOfCodeGeneratedSince(&size_check); int pool_size = static_cast<int>(SizeOfCodeGeneratedSince(&size_check));
RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size); RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
if (unresolved_branches_.empty()) { if (unresolved_branches_.empty()) {
@ -3113,7 +3130,8 @@ void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
int Assembler::buffer_space() const { int Assembler::buffer_space() const {
return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_); return static_cast<int>(reloc_info_writer.pos() -
reinterpret_cast<byte*>(pc_));
} }
@ -3124,20 +3142,6 @@ void Assembler::RecordConstPool(int size) {
} }
Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
// No out-of-line constant pool support.
DCHECK(!FLAG_enable_ool_constant_pool);
return isolate->factory()->empty_constant_pool_array();
}
void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
// No out-of-line constant pool support.
DCHECK(!FLAG_enable_ool_constant_pool);
return;
}
void PatchingAssembler::PatchAdrFar(int64_t target_offset) { void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
// The code at the current instruction should be: // The code at the current instruction should be:
// adr rd, 0 // adr rd, 0
@ -3171,6 +3175,7 @@ void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

View File

@ -764,7 +764,7 @@ class ConstPool {
shared_entries_count(0) {} shared_entries_count(0) {}
void RecordEntry(intptr_t data, RelocInfo::Mode mode); void RecordEntry(intptr_t data, RelocInfo::Mode mode);
int EntryCount() const { int EntryCount() const {
return shared_entries_count + unique_entries_.size(); return shared_entries_count + static_cast<int>(unique_entries_.size());
} }
bool IsEmpty() const { bool IsEmpty() const {
return shared_entries_.empty() && unique_entries_.empty(); return shared_entries_.empty() && unique_entries_.empty();
@ -851,6 +851,9 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple // possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4). // of m. m must be a power of 2 (>= 4).
void Align(int m); void Align(int m);
// Insert the smallest number of zero bytes possible to align the pc offset
// to a mulitple of m. m must be a power of 2 (>= 2).
void DataAlign(int m);
inline void Unreachable(); inline void Unreachable();
@ -871,13 +874,10 @@ class Assembler : public AssemblerBase {
inline static Address target_pointer_address_at(Address pc); inline static Address target_pointer_address_at(Address pc);
// Read/Modify the code target address in the branch/call instruction at pc. // Read/Modify the code target address in the branch/call instruction at pc.
inline static Address target_address_at(Address pc, inline static Address target_address_at(Address pc, Address constant_pool);
ConstantPoolArray* constant_pool); inline static void set_target_address_at(
inline static void set_target_address_at(Address pc, Address pc, Address constant_pool, Address target,
ConstantPoolArray* constant_pool, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code); static inline Address target_address_at(Address pc, Code* code);
static inline void set_target_address_at(Address pc, static inline void set_target_address_at(Address pc,
Code* code, Code* code,
@ -951,7 +951,7 @@ class Assembler : public AssemblerBase {
// Return the number of instructions generated from label to the // Return the number of instructions generated from label to the
// current position. // current position.
int InstructionsGeneratedSince(const Label* label) { uint64_t InstructionsGeneratedSince(const Label* label) {
return SizeOfCodeGeneratedSince(label) / kInstructionSize; return SizeOfCodeGeneratedSince(label) / kInstructionSize;
} }
@ -1767,6 +1767,8 @@ class Assembler : public AssemblerBase {
// Required by V8. // Required by V8.
void dd(uint32_t data) { dc32(data); } void dd(uint32_t data) { dc32(data); }
void db(uint8_t data) { dc8(data); } void db(uint8_t data) { dc8(data); }
void dq(uint64_t data) { dc64(data); }
void dp(uintptr_t data) { dc64(data); }
// Code generation helpers -------------------------------------------------- // Code generation helpers --------------------------------------------------
@ -1774,7 +1776,7 @@ class Assembler : public AssemblerBase {
Instruction* pc() const { return Instruction::Cast(pc_); } Instruction* pc() const { return Instruction::Cast(pc_); }
Instruction* InstructionAt(int offset) const { Instruction* InstructionAt(ptrdiff_t offset) const {
return reinterpret_cast<Instruction*>(buffer_ + offset); return reinterpret_cast<Instruction*>(buffer_ + offset);
} }
@ -1841,7 +1843,7 @@ class Assembler : public AssemblerBase {
// Data Processing encoding. // Data Processing encoding.
inline static Instr SF(Register rd); inline static Instr SF(Register rd);
inline static Instr ImmAddSub(int64_t imm); inline static Instr ImmAddSub(int imm);
inline static Instr ImmS(unsigned imms, unsigned reg_size); inline static Instr ImmS(unsigned imms, unsigned reg_size);
inline static Instr ImmR(unsigned immr, unsigned reg_size); inline static Instr ImmR(unsigned immr, unsigned reg_size);
inline static Instr ImmSetBits(unsigned imms, unsigned reg_size); inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
@ -1876,10 +1878,11 @@ class Assembler : public AssemblerBase {
static bool IsImmLSUnscaled(int64_t offset); static bool IsImmLSUnscaled(int64_t offset);
static bool IsImmLSScaled(int64_t offset, LSDataSize size); static bool IsImmLSScaled(int64_t offset, LSDataSize size);
static bool IsImmLLiteral(int64_t offset);
// Move immediates encoding. // Move immediates encoding.
inline static Instr ImmMoveWide(uint64_t imm); inline static Instr ImmMoveWide(int imm);
inline static Instr ShiftMoveWide(int64_t shift); inline static Instr ShiftMoveWide(int shift);
// FP Immediates. // FP Immediates.
static Instr ImmFP32(float imm); static Instr ImmFP32(float imm);
@ -1908,11 +1911,12 @@ class Assembler : public AssemblerBase {
// Check if is time to emit a constant pool. // Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump); void CheckConstPool(bool force_emit, bool require_jump);
// Allocate a constant pool of the correct size for the generated code. void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate); ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
// Generate the constant pool for the generated code. // No embedded constant pool support.
void PopulateConstantPool(ConstantPoolArray* constant_pool); UNREACHABLE();
}
// Returns true if we should emit a veneer as soon as possible for a branch // Returns true if we should emit a veneer as soon as possible for a branch
// which can at most reach to specified pc. // which can at most reach to specified pc.

View File

@ -331,6 +331,7 @@ static void Generate_Runtime_NewObject(MacroAssembler* masm,
static void Generate_JSConstructStubHelper(MacroAssembler* masm, static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function, bool is_api_function,
bool use_new_target,
bool create_memento) { bool create_memento) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- x0 : number of arguments // -- x0 : number of arguments
@ -360,11 +361,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Register argc = x0; Register argc = x0;
Register constructor = x1; Register constructor = x1;
Register original_constructor = x3; Register original_constructor = x3;
// x1: constructor function
// Preserve the incoming parameters on the stack.
__ SmiTag(argc); __ SmiTag(argc);
__ Push(argc, constructor); if (use_new_target) {
// sp[0] : Constructor function. __ Push(argc, constructor, original_constructor);
// sp[1]: number of arguments (smi-tagged) } else {
__ Push(argc, constructor);
}
// sp[0]: new.target (if used)
// sp[0/1]: Constructor function.
// sp[1/2]: number of arguments (smi-tagged)
Label rt_call, count_incremented, allocated, normal_new; Label rt_call, count_incremented, allocated, normal_new;
__ Cmp(constructor, original_constructor); __ Cmp(constructor, original_constructor);
@ -522,7 +529,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Add(new_obj, new_obj, kHeapObjectTag); __ Add(new_obj, new_obj, kHeapObjectTag);
// Check if a non-empty properties array is needed. Continue with // Check if a non-empty properties array is needed. Continue with
// allocated object if not, or fall through to runtime call if it is. // allocated object if not; allocate and initialize a FixedArray if yes.
Register element_count = x3; Register element_count = x3;
__ Ldrb(element_count, __ Ldrb(element_count,
FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset)); FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset));
@ -580,7 +587,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Bind(&allocated); __ Bind(&allocated);
if (create_memento) { if (create_memento) {
__ Peek(x10, 2 * kXRegSize); int offset = (use_new_target ? 3 : 2) * kXRegSize;
__ Peek(x10, offset);
__ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &count_incremented); __ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &count_incremented);
// r2 is an AllocationSite. We are creating a memento from it, so we // r2 is an AllocationSite. We are creating a memento from it, so we
// need to increment the memento create count. // need to increment the memento create count.
@ -592,18 +600,24 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&count_incremented); __ bind(&count_incremented);
} }
__ Push(x4, x4); // Restore the parameters.
if (use_new_target) {
__ Pop(original_constructor);
}
__ Pop(constructor);
// Reload the number of arguments from the stack. // Reload the number of arguments from the stack.
// Set it up in x0 for the function call below. // Set it up in x0 for the function call below.
// jssp[0]: receiver // jssp[0]: number of arguments (smi-tagged)
// jssp[1]: receiver __ Peek(argc, 0); // Load number of arguments.
// jssp[2]: constructor function
// jssp[3]: number of arguments (smi-tagged)
__ Peek(constructor, 2 * kXRegSize); // Load constructor.
__ Peek(argc, 3 * kXRegSize); // Load number of arguments.
__ SmiUntag(argc); __ SmiUntag(argc);
if (use_new_target) {
__ Push(original_constructor, x4, x4);
} else {
__ Push(x4, x4);
}
// Set up pointer to last argument. // Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset); __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@ -614,8 +628,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// x2: address of last argument (caller sp) // x2: address of last argument (caller sp)
// jssp[0]: receiver // jssp[0]: receiver
// jssp[1]: receiver // jssp[1]: receiver
// jssp[2]: constructor function // jssp[2]: new.target (if used)
// jssp[3]: number of arguments (smi-tagged) // jssp[2/3]: number of arguments (smi-tagged)
// Compute the start address of the copy in x3. // Compute the start address of the copy in x3.
__ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2)); __ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
Label loop, entry, done_copying_arguments; Label loop, entry, done_copying_arguments;
@ -646,15 +660,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
} }
// Store offset of return address for deoptimizer. // Store offset of return address for deoptimizer.
if (!is_api_function) { // TODO(arv): Remove the "!use_new_target" before supporting optimization
// of functions that reference new.target
if (!is_api_function && !use_new_target) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
} }
// Restore the context from the frame. // Restore the context from the frame.
// x0: result // x0: result
// jssp[0]: receiver // jssp[0]: receiver
// jssp[1]: constructor function // jssp[1]: new.target (if used)
// jssp[2]: number of arguments (smi-tagged) // jssp[1/2]: number of arguments (smi-tagged)
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid // If the result is an object (in the ECMA sense), we should get rid
@ -665,8 +681,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense. // If the result is a smi, it is *not* an object in the ECMA sense.
// x0: result // x0: result
// jssp[0]: receiver (newly allocated object) // jssp[0]: receiver (newly allocated object)
// jssp[1]: constructor function // jssp[1]: number of arguments (smi-tagged)
// jssp[2]: number of arguments (smi-tagged)
__ JumpIfSmi(x0, &use_receiver); __ JumpIfSmi(x0, &use_receiver);
// If the type of the result (stored in its map) is less than // If the type of the result (stored in its map) is less than
@ -683,9 +698,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Bind(&exit); __ Bind(&exit);
// x0: result // x0: result
// jssp[0]: receiver (newly allocated object) // jssp[0]: receiver (newly allocated object)
// jssp[1]: constructor function // jssp[1]: new.target (if used)
// jssp[2]: number of arguments (smi-tagged) // jssp[1/2]: number of arguments (smi-tagged)
__ Peek(x1, 2 * kXRegSize); int offset = (use_new_target ? 2 : 1) * kXRegSize;
__ Peek(x1, offset);
// Leave construct frame. // Leave construct frame.
} }
@ -698,12 +714,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new); Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
} }
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false); Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
} }
@ -731,7 +752,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// sp[1]: new.target // sp[1]: new.target
// sp[2]: receiver (the hole) // sp[2]: receiver (the hole)
// Set up pointer to last argument. // Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset); __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@ -759,8 +779,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ Drop(1); __ Drop(1);
__ Bind(&done_copying_arguments); __ Bind(&done_copying_arguments);
__ Add(x0, x0, Operand(1)); // new.target
// Handle step in. // Handle step in.
Label skip_step_in; Label skip_step_in;
ExternalReference debug_step_in_fp = ExternalReference debug_step_in_fp =
@ -787,8 +805,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// jssp[0]: number of arguments (smi-tagged) // jssp[0]: number of arguments (smi-tagged)
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Load number of arguments (smi). // Load number of arguments (smi), skipping over new.target.
__ Peek(x1, 0); __ Peek(x1, kPointerSize);
// Leave construct frame // Leave construct frame
} }
@ -1388,6 +1406,8 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
Label entry, loop; Label entry, loop;
Register receiver = LoadDescriptor::ReceiverRegister(); Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister(); Register key = LoadDescriptor::NameRegister();
Register slot = LoadDescriptor::SlotRegister();
Register vector = LoadWithVectorDescriptor::VectorRegister();
__ Ldr(key, MemOperand(fp, indexOffset)); __ Ldr(key, MemOperand(fp, indexOffset));
__ B(&entry); __ B(&entry);
@ -1397,7 +1417,14 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ Ldr(receiver, MemOperand(fp, argumentsOffset)); __ Ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments. // Use inline caching to speed up access to arguments.
Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic(); FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
Handle<TypeFeedbackVector> feedback_vector =
masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
__ Mov(slot, Smi::FromInt(index));
__ Mov(vector, feedback_vector);
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET); __ Call(ic, RelocInfo::CODE_TARGET);
// Push the nth argument. // Push the nth argument.
@ -1733,13 +1760,38 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected { // Too few parameters: Actual < expected
__ Bind(&too_few); __ Bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
Register copy_from = x10; Register copy_from = x10;
Register copy_end = x11; Register copy_end = x11;
Register copy_to = x12; Register copy_to = x12;
Register scratch1 = x13, scratch2 = x14; Register scratch1 = x13, scratch2 = x14;
// If the function is strong we need to throw an error.
Label no_strong_error;
__ Ldr(scratch1,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(scratch2.W(),
FieldMemOperand(scratch1, SharedFunctionInfo::kCompilerHintsOffset));
__ TestAndBranchIfAllClear(scratch2.W(),
(1 << SharedFunctionInfo::kStrongModeFunction),
&no_strong_error);
// What we really care about is the required number of arguments.
DCHECK_EQ(kPointerSize, kInt64Size);
__ Ldr(scratch2.W(),
FieldMemOperand(scratch1, SharedFunctionInfo::kLengthOffset));
__ Cmp(argc_actual, Operand(scratch2, LSR, 1));
__ B(ge, &no_strong_error);
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
__ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
}
__ Bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
__ Lsl(argc_expected, argc_expected, kPointerSizeLog2); __ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
__ Lsl(argc_actual, argc_actual, kPointerSizeLog2); __ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
@ -1810,6 +1862,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __ #undef __
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

View File

@ -102,17 +102,17 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
isolate()->counters()->code_stubs()->Increment(); isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor(); CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
int param_count = descriptor.GetEnvironmentParameterCount(); int param_count = descriptor.GetRegisterParameterCount();
{ {
// Call the runtime system in a fresh internal frame. // Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK((param_count == 0) || DCHECK((param_count == 0) ||
x0.Is(descriptor.GetEnvironmentParameterRegister(param_count - 1))); x0.Is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments // Push arguments
MacroAssembler::PushPopQueue queue(masm); MacroAssembler::PushPopQueue queue(masm);
for (int i = 0; i < param_count; ++i) { for (int i = 0; i < param_count; ++i) {
queue.Queue(descriptor.GetEnvironmentParameterRegister(i)); queue.Queue(descriptor.GetRegisterParameter(i));
} }
queue.PushQueued(); queue.PushQueued();
@ -203,13 +203,11 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// See call site for description. // See call site for description.
static void EmitIdenticalObjectComparison(MacroAssembler* masm, static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
Register left, Register right, Register scratch,
Register right,
Register scratch,
FPRegister double_scratch, FPRegister double_scratch,
Label* slow, Label* slow, Condition cond,
Condition cond) { Strength strength) {
DCHECK(!AreAliased(left, right, scratch)); DCHECK(!AreAliased(left, right, scratch));
Label not_identical, return_equal, heap_number; Label not_identical, return_equal, heap_number;
Register result = x0; Register result = x0;
@ -223,10 +221,20 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// Smis. If it's not a heap number, then return equal. // Smis. If it's not a heap number, then return equal.
Register right_type = scratch; Register right_type = scratch;
if ((cond == lt) || (cond == gt)) { if ((cond == lt) || (cond == gt)) {
// Call runtime on identical JSObjects. Otherwise return equal.
__ JumpIfObjectType(right, right_type, right_type, FIRST_SPEC_OBJECT_TYPE, __ JumpIfObjectType(right, right_type, right_type, FIRST_SPEC_OBJECT_TYPE,
slow, ge); slow, ge);
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE); __ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow); __ B(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
__ Cmp(right_type, Operand(HEAP_NUMBER_TYPE));
__ B(eq, &return_equal);
__ Tst(right_type, Operand(kIsNotStringMask));
__ B(ne, slow);
}
} else if (cond == eq) { } else if (cond == eq) {
__ JumpIfHeapNumber(right, &heap_number); __ JumpIfHeapNumber(right, &heap_number);
} else { } else {
@ -235,8 +243,16 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// Comparing JS objects with <=, >= is complicated. // Comparing JS objects with <=, >= is complicated.
__ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE); __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
__ B(ge, slow); __ B(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE); __ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow); __ B(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics,
// since we need to throw a TypeError. Smis and heap numbers have
// already been ruled out.
__ Tst(right_type, Operand(kIsNotStringMask));
__ B(ne, slow);
}
// Normally here we fall through to return_equal, but undefined is // Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but // special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5. // (undefined <= undefined) == false! See ECMAScript 11.8.5.
@ -513,7 +529,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer // Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical. // or goes to slow. Only falls through if the objects were not identical.
EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond); EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond,
strength());
// If either is a smi (we know that at least one is not a smi), then they can // If either is a smi (we know that at least one is not a smi), then they can
// only be strictly equal if the other is a HeapNumber. // only be strictly equal if the other is a HeapNumber.
@ -632,7 +649,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cond == eq) { if (cond == eq) {
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else { } else {
native = Builtins::COMPARE; native =
is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result int ncr; // NaN compare result
if ((cond == lt) || (cond == le)) { if ((cond == lt) || (cond == le)) {
ncr = GREATER; ncr = GREATER;
@ -1433,9 +1451,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister(); Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before // Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler. // calling the miss handler.
DCHECK(!FLAG_vector_ics || DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::VectorRegister(),
!AreAliased(x10, x11, VectorLoadICDescriptor::VectorRegister(), LoadWithVectorDescriptor::SlotRegister()));
VectorLoadICDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
x11, &miss); x11, &miss);
@ -1455,9 +1472,8 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register result = x0; Register result = x0;
Register scratch = x10; Register scratch = x10;
DCHECK(!scratch.is(receiver) && !scratch.is(index)); DCHECK(!scratch.is(receiver) && !scratch.is(index));
DCHECK(!FLAG_vector_ics || DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) && result.is(LoadWithVectorDescriptor::SlotRegister()));
result.is(VectorLoadICDescriptor::SlotRegister())));
// StringCharAtGenerator doesn't use the result register until it's passed // StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict // the different miss possibilities. If it did, we would have a conflict
@ -1669,7 +1685,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
CHECK(!has_new_target());
Register arg_count = ArgumentsAccessReadDescriptor::parameter_count(); Register arg_count = ArgumentsAccessReadDescriptor::parameter_count();
Register key = ArgumentsAccessReadDescriptor::index(); Register key = ArgumentsAccessReadDescriptor::index();
DCHECK(arg_count.is(x0)); DCHECK(arg_count.is(x0));
@ -1726,8 +1741,6 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// jssp[8]: address of receiver argument // jssp[8]: address of receiver argument
// jssp[16]: function // jssp[16]: function
CHECK(!has_new_target());
// Check if the calling frame is an arguments adaptor frame. // Check if the calling frame is an arguments adaptor frame.
Label runtime; Label runtime;
Register caller_fp = x10; Register caller_fp = x10;
@ -1759,8 +1772,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// //
// Returns pointer to result object in x0. // Returns pointer to result object in x0.
CHECK(!has_new_target());
// Note: arg_count_smi is an alias of param_count_smi. // Note: arg_count_smi is an alias of param_count_smi.
Register arg_count_smi = x3; Register arg_count_smi = x3;
Register param_count_smi = x3; Register param_count_smi = x3;
@ -2087,15 +2098,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
MemOperand(caller_fp, MemOperand(caller_fp,
ArgumentsAdaptorFrameConstants::kLengthOffset)); ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(param_count, param_count_smi); __ SmiUntag(param_count, param_count_smi);
if (has_new_target()) {
__ Cmp(param_count, Operand(0));
Label skip_decrement;
__ B(eq, &skip_decrement);
// Skip new.target: it is not a part of arguments.
__ Sub(param_count, param_count, Operand(1));
__ SmiTag(param_count_smi, param_count);
__ Bind(&skip_decrement);
}
__ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2)); __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
__ Add(params, x10, StandardFrameConstants::kCallerSPOffset); __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
@ -2192,19 +2194,21 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) { void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Stack layout on entry. // Stack layout on entry.
// jssp[0]: index of rest parameter (tagged) // jssp[0]: language mode (tagged)
// jssp[8]: number of parameters (tagged) // jssp[8]: index of rest parameter (tagged)
// jssp[16]: address of receiver argument // jssp[16]: number of parameters (tagged)
// jssp[24]: address of receiver argument
// //
// Returns pointer to result object in x0. // Returns pointer to result object in x0.
// Get the stub arguments from the frame, and make an untagged copy of the // Get the stub arguments from the frame, and make an untagged copy of the
// parameter count. // parameter count.
Register rest_index_smi = x1; Register language_mode_smi = x1;
Register param_count_smi = x2; Register rest_index_smi = x2;
Register params = x3; Register param_count_smi = x3;
Register params = x4;
Register param_count = x13; Register param_count = x13;
__ Pop(rest_index_smi, param_count_smi, params); __ Pop(language_mode_smi, rest_index_smi, param_count_smi, params);
__ SmiUntag(param_count, param_count_smi); __ SmiUntag(param_count, param_count_smi);
// Test if arguments adaptor needed. // Test if arguments adaptor needed.
@ -2217,11 +2221,12 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
__ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(ne, &runtime); __ B(ne, &runtime);
// x1 rest_index_smi index of rest parameter // x1 language_mode_smi language mode
// x2 param_count_smi number of parameters passed to function (smi) // x2 rest_index_smi index of rest parameter
// x3 params pointer to parameters // x3 param_count_smi number of parameters passed to function (smi)
// x11 caller_fp caller's frame pointer // x4 params pointer to parameters
// x13 param_count number of parameters passed to function // x11 caller_fp caller's frame pointer
// x13 param_count number of parameters passed to function
// Patch the argument length and parameters pointer. // Patch the argument length and parameters pointer.
__ Ldr(param_count_smi, __ Ldr(param_count_smi,
@ -2232,8 +2237,8 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
__ Add(params, x10, StandardFrameConstants::kCallerSPOffset); __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
__ Bind(&runtime); __ Bind(&runtime);
__ Push(params, param_count_smi, rest_index_smi); __ Push(params, param_count_smi, rest_index_smi, language_mode_smi);
__ TailCallRuntime(Runtime::kNewRestParam, 3, 1); __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
} }
@ -2286,27 +2291,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Register last_match_info_elements = x21; Register last_match_info_elements = x21;
Register code_object = x22; Register code_object = x22;
// TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
CPURegList used_callee_saved_registers(subject,
regexp_data,
last_match_info_elements,
code_object);
__ PushCPURegList(used_callee_saved_registers);
// Stack frame. // Stack frame.
// jssp[0] : x19 // jssp[00]: last_match_info (JSArray)
// jssp[8] : x20 // jssp[08]: previous index
// jssp[16]: x21 // jssp[16]: subject string
// jssp[24]: x22 // jssp[24]: JSRegExp object
// jssp[32]: last_match_info (JSArray)
// jssp[40]: previous index
// jssp[48]: subject string
// jssp[56]: JSRegExp object
const int kLastMatchInfoOffset = 4 * kPointerSize; const int kLastMatchInfoOffset = 0 * kPointerSize;
const int kPreviousIndexOffset = 5 * kPointerSize; const int kPreviousIndexOffset = 1 * kPointerSize;
const int kSubjectOffset = 6 * kPointerSize; const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 7 * kPointerSize; const int kJSRegExpOffset = 3 * kPointerSize;
// Ensure that a RegExp stack is allocated. // Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address = ExternalReference address_of_regexp_stack_memory_address =
@ -2673,7 +2667,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Return last match info. // Return last match info.
__ Peek(x0, kLastMatchInfoOffset); __ Peek(x0, kLastMatchInfoOffset);
__ PopCPURegList(used_callee_saved_registers);
// Drop the 4 arguments of the stub from the stack. // Drop the 4 arguments of the stub from the stack.
__ Drop(4); __ Drop(4);
__ Ret(); __ Ret();
@ -2696,13 +2689,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Bind(&failure); __ Bind(&failure);
__ Mov(x0, Operand(isolate()->factory()->null_value())); __ Mov(x0, Operand(isolate()->factory()->null_value()));
__ PopCPURegList(used_callee_saved_registers);
// Drop the 4 arguments of the stub from the stack. // Drop the 4 arguments of the stub from the stack.
__ Drop(4); __ Drop(4);
__ Ret(); __ Ret();
__ Bind(&runtime); __ Bind(&runtime);
__ PopCPURegList(used_callee_saved_registers);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1); __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
// Deferred code for string handling. // Deferred code for string handling.
@ -3100,10 +3091,18 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ Ldr(map, FieldMemOperand(scratch, HeapObject::kMapOffset)); __ Ldr(map, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ JumpIfNotRoot(map, Heap::kAllocationSiteMapRootIndex, &miss); __ JumpIfNotRoot(map, Heap::kAllocationSiteMapRootIndex, &miss);
Register allocation_site = feedback_vector; // Increment the call count for monomorphic function calls.
__ Mov(allocation_site, scratch); __ Add(feedback_vector, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Add(feedback_vector, feedback_vector,
Operand(FixedArray::kHeaderSize + kPointerSize));
__ Ldr(index, FieldMemOperand(feedback_vector, 0));
__ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ Str(index, FieldMemOperand(feedback_vector, 0));
Register original_constructor = x3; Register allocation_site = feedback_vector;
Register original_constructor = index;
__ Mov(allocation_site, scratch);
__ Mov(original_constructor, function); __ Mov(original_constructor, function);
ArrayConstructorStub stub(masm->isolate(), arg_count()); ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub); __ TailCallStub(&stub);
@ -3169,6 +3168,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction. // convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(function, &extra_checks_or_miss); __ JumpIfSmi(function, &extra_checks_or_miss);
// Increment the call count for monomorphic function calls.
__ Add(feedback_vector, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Add(feedback_vector, feedback_vector,
Operand(FixedArray::kHeaderSize + kPointerSize));
__ Ldr(index, FieldMemOperand(feedback_vector, 0));
__ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ Str(index, FieldMemOperand(feedback_vector, 0));
__ bind(&have_js_function); __ bind(&have_js_function);
if (CallAsMethod()) { if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont); EmitContinueIfStrictOrNative(masm, &cont);
@ -3244,6 +3252,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Adds(x4, x4, Operand(Smi::FromInt(1))); __ Adds(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, with_types_offset)); __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
// Initialize the call counter.
__ Mov(x5, Smi::FromInt(CallICNexus::kCallCountIncrement));
__ Adds(x4, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize + kPointerSize));
// Store the function. Use a stub since we need a frame for allocation. // Store the function. Use a stub since we need a frame for allocation.
// x2 - vector // x2 - vector
// x3 - slot // x3 - slot
@ -3338,9 +3352,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// If index is a heap number, try converting it to an integer. // If index is a heap number, try converting it to an integer.
__ JumpIfNotHeapNumber(index_, index_not_number_); __ JumpIfNotHeapNumber(index_, index_not_number_);
call_helper.BeforeCall(masm); call_helper.BeforeCall(masm);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) { if (embed_mode == PART_OF_IC_HANDLER) {
__ Push(VectorLoadICDescriptor::VectorRegister(), __ Push(LoadWithVectorDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister(), object_, index_); LoadWithVectorDescriptor::SlotRegister(), object_, index_);
} else { } else {
// Save object_ on the stack and pass index_ as argument for runtime call. // Save object_ on the stack and pass index_ as argument for runtime call.
__ Push(object_, index_); __ Push(object_, index_);
@ -3355,9 +3369,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below // Save the conversion result before the pop instructions below
// have a chance to overwrite it. // have a chance to overwrite it.
__ Mov(index_, x0); __ Mov(index_, x0);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) { if (embed_mode == PART_OF_IC_HANDLER) {
__ Pop(object_, VectorLoadICDescriptor::SlotRegister(), __ Pop(object_, LoadWithVectorDescriptor::SlotRegister(),
VectorLoadICDescriptor::VectorRegister()); LoadWithVectorDescriptor::VectorRegister());
} else { } else {
__ Pop(object_); __ Pop(object_);
} }
@ -3485,7 +3499,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ Ret(); __ Ret();
__ Bind(&unordered); __ Bind(&unordered);
CompareICStub stub(isolate(), op(), CompareICState::GENERIC, CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC); CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@ -4481,15 +4495,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) { void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister()); EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
VectorRawLoadStub stub(isolate(), state()); LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm); stub.GenerateForTrampoline(masm);
} }
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) { void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister()); EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
VectorRawKeyedLoadStub stub(isolate()); KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm); stub.GenerateForTrampoline(masm);
} }
@ -4508,12 +4522,10 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
} }
void VectorRawLoadStub::Generate(MacroAssembler* masm) { void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
GenerateImpl(masm, false);
}
void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) { void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true); GenerateImpl(masm, true);
} }
@ -4610,11 +4622,11 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
} }
void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // x1 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
Register name = VectorLoadICDescriptor::NameRegister(); // x2 Register name = LoadWithVectorDescriptor::NameRegister(); // x2
Register vector = VectorLoadICDescriptor::VectorRegister(); // x3 Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
Register slot = VectorLoadICDescriptor::SlotRegister(); // x0 Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
Register feedback = x4; Register feedback = x4;
Register receiver_map = x5; Register receiver_map = x5;
Register scratch1 = x6; Register scratch1 = x6;
@ -4654,21 +4666,21 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
} }
void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) { void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false); GenerateImpl(masm, false);
} }
void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) { void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true); GenerateImpl(masm, true);
} }
void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // x1 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
Register key = VectorLoadICDescriptor::NameRegister(); // x2 Register key = LoadWithVectorDescriptor::NameRegister(); // x2
Register vector = VectorLoadICDescriptor::VectorRegister(); // x3 Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
Register slot = VectorLoadICDescriptor::SlotRegister(); // x0 Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
Register feedback = x4; Register feedback = x4;
Register receiver_map = x5; Register receiver_map = x5;
Register scratch1 = x6; Register scratch1 = x6;
@ -4700,7 +4712,7 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
&try_poly_name); &try_poly_name);
Handle<Code> megamorphic_stub = Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate()); KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ Bind(&try_poly_name); __ Bind(&try_poly_name);
@ -4724,6 +4736,58 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
} }
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label miss;
// TODO(mvstanton): Implement.
__ Bind(&miss);
StoreIC::GenerateMiss(masm);
}
void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label miss;
// TODO(mvstanton): Implement.
__ Bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
}
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call. // a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize = static const unsigned int kProfileEntryHookCallSize =
@ -5426,7 +5490,7 @@ static const int kCallApiFunctionSpillSpace = 4;
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address(); return static_cast<int>(ref0.address() - ref1.address());
} }
@ -5765,6 +5829,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
#undef __ #undef __
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

View File

@ -138,8 +138,10 @@ class RecordWriteStub: public PlatformCodeStub {
DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm()); DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm()); DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
// Retrieve the offsets to the labels. // Retrieve the offsets to the labels.
int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset(); auto offset_to_incremental_noncompacting =
int32_t offset_to_incremental_compacting = instr2->ImmPCOffset(); static_cast<int32_t>(instr1->ImmPCOffset());
auto offset_to_incremental_compacting =
static_cast<int32_t>(instr2->ImmPCOffset());
switch (mode) { switch (mode) {
case STORE_BUFFER_ONLY: case STORE_BUFFER_ONLY:

View File

@ -634,6 +634,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __ #undef __
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

View File

@ -84,6 +84,8 @@ const int64_t kXMaxInt = 0x7fffffffffffffffL;
const int64_t kXMinInt = 0x8000000000000000L; const int64_t kXMinInt = 0x8000000000000000L;
const int32_t kWMaxInt = 0x7fffffff; const int32_t kWMaxInt = 0x7fffffff;
const int32_t kWMinInt = 0x80000000; const int32_t kWMinInt = 0x80000000;
const unsigned kIp0Code = 16;
const unsigned kIp1Code = 17;
const unsigned kFramePointerRegCode = 29; const unsigned kFramePointerRegCode = 29;
const unsigned kLinkRegCode = 30; const unsigned kLinkRegCode = 30;
const unsigned kZeroRegCode = 31; const unsigned kZeroRegCode = 31;

View File

@ -120,6 +120,7 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
#endif #endif
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

View File

@ -207,10 +207,8 @@ void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc). // Calling convention for IC load (from ic-arm.cc).
Register receiver = LoadDescriptor::ReceiverRegister(); Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister(); Register name = LoadDescriptor::NameRegister();
RegList regs = receiver.Bit() | name.Bit(); Register slot = LoadDescriptor::SlotRegister();
if (FLAG_vector_ics) { RegList regs = receiver.Bit() | name.Bit() | slot.Bit();
regs |= VectorLoadICTrampolineDescriptor::SlotRegister().Bit();
}
Generate_DebugBreakCallHelper(masm, regs, 0, x10); Generate_DebugBreakCallHelper(masm, regs, 0, x10);
} }
@ -220,8 +218,11 @@ void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister(); Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister(); Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister(); Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper( RegList regs = receiver.Bit() | name.Bit() | value.Bit();
masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10); if (FLAG_vector_stores) {
regs |= VectorStoreICDescriptor::SlotRegister().Bit();
}
Generate_DebugBreakCallHelper(masm, regs, 0, x10);
} }
@ -233,11 +234,7 @@ void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-arm64.cc). // Calling convention for IC keyed store call (from ic-arm64.cc).
Register receiver = StoreDescriptor::ReceiverRegister(); GenerateStoreICDebugBreak(masm);
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
} }
@ -346,6 +343,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
const bool LiveEdit::kFrameDropperSupported = true; const bool LiveEdit::kFrameDropperSupported = true;
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

View File

@ -81,6 +81,7 @@ VISITOR_LIST(DEFINE_VISITOR_CALLERS)
#undef DEFINE_VISITOR_CALLERS #undef DEFINE_VISITOR_CALLERS
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

View File

@ -193,6 +193,7 @@ void DelayedMasm::EmitPending() {
ResetPending(); ResetPending();
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

View File

@ -354,11 +354,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No out-of-line constant pool support. // No embedded constant pool support.
UNREACHABLE(); UNREACHABLE();
} }
#undef __ #undef __
} } // namespace v8::internal } // namespace internal
} // namespace v8

View File

@ -1369,11 +1369,12 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
switch (format[1]) { switch (format[1]) {
case 'M': { // IMoveImm or IMoveLSL. case 'M': { // IMoveImm or IMoveLSL.
if (format[5] == 'I') { if (format[5] == 'I') {
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide()); uint64_t imm = static_cast<uint64_t>(instr->ImmMoveWide())
<< (16 * instr->ShiftMoveWide());
AppendToOutput("#0x%" PRIx64, imm); AppendToOutput("#0x%" PRIx64, imm);
} else { } else {
DCHECK(format[5] == 'L'); DCHECK(format[5] == 'L');
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide()); AppendToOutput("#0x%" PRIx32, instr->ImmMoveWide());
if (instr->ShiftMoveWide() > 0) { if (instr->ShiftMoveWide() > 0) {
AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide()); AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
} }
@ -1383,13 +1384,13 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
case 'L': { case 'L': {
switch (format[2]) { switch (format[2]) {
case 'L': { // ILLiteral - Immediate Load Literal. case 'L': { // ILLiteral - Immediate Load Literal.
AppendToOutput("pc%+" PRId64, AppendToOutput("pc%+" PRId32, instr->ImmLLiteral()
instr->ImmLLiteral() << kLoadLiteralScaleLog2); << kLoadLiteralScaleLog2);
return 9; return 9;
} }
case 'S': { // ILS - Immediate Load/Store. case 'S': { // ILS - Immediate Load/Store.
if (instr->ImmLS() != 0) { if (instr->ImmLS() != 0) {
AppendToOutput(", #%" PRId64, instr->ImmLS()); AppendToOutput(", #%" PRId32, instr->ImmLS());
} }
return 3; return 3;
} }
@ -1397,14 +1398,14 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
if (instr->ImmLSPair() != 0) { if (instr->ImmLSPair() != 0) {
// format[3] is the scale value. Convert to a number. // format[3] is the scale value. Convert to a number.
int scale = format[3] - 0x30; int scale = format[3] - 0x30;
AppendToOutput(", #%" PRId64, instr->ImmLSPair() * scale); AppendToOutput(", #%" PRId32, instr->ImmLSPair() * scale);
} }
return 4; return 4;
} }
case 'U': { // ILU - Immediate Load/Store Unsigned. case 'U': { // ILU - Immediate Load/Store Unsigned.
if (instr->ImmLSUnsigned() != 0) { if (instr->ImmLSUnsigned() != 0) {
AppendToOutput(", #%" PRIu64, AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned()
instr->ImmLSUnsigned() << instr->SizeLS()); << instr->SizeLS());
} }
return 3; return 3;
} }
@ -1427,7 +1428,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
AppendToOutput("#%d", 64 - instr->FPScale()); AppendToOutput("#%d", 64 - instr->FPScale());
return 8; return 8;
} else { } else {
AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(), AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmFP(),
format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64()); format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
return 9; return 9;
} }
@ -1538,7 +1539,7 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
case 'L': { // HLo. case 'L': { // HLo.
if (instr->ImmDPShift() != 0) { if (instr->ImmDPShift() != 0) {
const char* shift_type[] = {"lsl", "lsr", "asr", "ror"}; const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
AppendToOutput(", %s #%" PRId64, shift_type[instr->ShiftDP()], AppendToOutput(", %s #%" PRId32, shift_type[instr->ShiftDP()],
instr->ImmDPShift()); instr->ImmDPShift());
} }
return 3; return 3;
@ -1729,7 +1730,8 @@ void PrintDisassembler::ProcessOutput(Instruction* instr) {
GetOutput()); GetOutput());
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8
namespace disasm { namespace disasm {

View File

@ -31,12 +31,7 @@ Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
} }
Object*& ExitFrame::constant_pool_slot() const { } // namespace internal
UNREACHABLE(); } // namespace v8
return Memory::Object_at(NULL);
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

View File

@ -78,11 +78,10 @@ class ConstructFrameConstants : public AllStatic {
// FP-relative. // FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
static const int kLengthOffset = -4 * kPointerSize; static const int kLengthOffset = -4 * kPointerSize;
static const int kConstructorOffset = -5 * kPointerSize; static const int kImplicitReceiverOffset = -5 * kPointerSize;
static const int kImplicitReceiverOffset = -6 * kPointerSize;
static const int kFrameSize = static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize; StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
}; };

File diff suppressed because it is too large Load Diff

View File

@ -93,9 +93,9 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
// met. // met.
uint64_t Instruction::ImmLogical() { uint64_t Instruction::ImmLogical() {
unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits; unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
int64_t n = BitN(); int32_t n = BitN();
int64_t imm_s = ImmSetBits(); int32_t imm_s = ImmSetBits();
int64_t imm_r = ImmRotate(); int32_t imm_r = ImmRotate();
// An integer is constructed from the n, imm_s and imm_r bits according to // An integer is constructed from the n, imm_s and imm_r bits according to
// the following table: // the following table:
@ -211,7 +211,7 @@ Instruction* Instruction::ImmPCOffsetTarget() {
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type, bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
int32_t offset) { ptrdiff_t offset) {
return is_intn(offset, ImmBranchRangeBitwidth(branch_type)); return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
} }
@ -242,7 +242,7 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
ptrdiff_t target_offset = DistanceTo(target); ptrdiff_t target_offset = DistanceTo(target);
Instr imm; Instr imm;
if (Instruction::IsValidPCRelOffset(target_offset)) { if (Instruction::IsValidPCRelOffset(target_offset)) {
imm = Assembler::ImmPCRelAddress(target_offset); imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm); SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} else { } else {
PatchingAssembler patcher(this, PatchingAssembler patcher(this,
@ -254,9 +254,11 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
void Instruction::SetBranchImmTarget(Instruction* target) { void Instruction::SetBranchImmTarget(Instruction* target) {
DCHECK(IsAligned(DistanceTo(target), kInstructionSize)); DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(IsValidImmPCOffset(BranchType(),
DistanceTo(target) >> kInstructionSizeLog2));
int offset = static_cast<int>(DistanceTo(target) >> kInstructionSizeLog2);
Instr branch_imm = 0; Instr branch_imm = 0;
uint32_t imm_mask = 0; uint32_t imm_mask = 0;
ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2;
switch (BranchType()) { switch (BranchType()) {
case CondBranchType: { case CondBranchType: {
branch_imm = Assembler::ImmCondBranch(offset); branch_imm = Assembler::ImmCondBranch(offset);
@ -287,9 +289,9 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) { void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
DCHECK(IsUnresolvedInternalReference()); DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize)); DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
ptrdiff_t target_offset = DistanceTo(target) >> kInstructionSizeLog2; int32_t target_offset =
DCHECK(is_int32(target_offset)); static_cast<int32_t>(DistanceTo(target) >> kInstructionSizeLog2);
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset); uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset); uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
@ -302,8 +304,9 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
void Instruction::SetImmLLiteral(Instruction* source) { void Instruction::SetImmLLiteral(Instruction* source) {
DCHECK(IsLdrLiteral()); DCHECK(IsLdrLiteral());
DCHECK(IsAligned(DistanceTo(source), kInstructionSize)); DCHECK(IsAligned(DistanceTo(source), kInstructionSize));
ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2; DCHECK(Assembler::IsImmLLiteral(DistanceTo(source)));
Instr imm = Assembler::ImmLLiteral(offset); Instr imm = Assembler::ImmLLiteral(
static_cast<int>(DistanceTo(source) >> kLoadLiteralScaleLog2));
Instr mask = ImmLLiteral_mask; Instr mask = ImmLLiteral_mask;
SetInstructionBits(Mask(~mask) | imm); SetInstructionBits(Mask(~mask) | imm);
@ -316,7 +319,7 @@ void Instruction::SetImmLLiteral(Instruction* source) {
bool InstructionSequence::IsInlineData() const { bool InstructionSequence::IsInlineData() const {
// Inline data is encoded as a single movz instruction which writes to xzr // Inline data is encoded as a single movz instruction which writes to xzr
// (x31). // (x31).
return IsMovz() && SixtyFourBits() && (Rd() == xzr.code()); return IsMovz() && SixtyFourBits() && (Rd() == kZeroRegCode);
// TODO(all): If we extend ::InlineData() to support bigger data, we need // TODO(all): If we extend ::InlineData() to support bigger data, we need
// to update this method too. // to update this method too.
} }
@ -334,6 +337,7 @@ uint64_t InstructionSequence::InlineData() const {
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

View File

@ -137,8 +137,8 @@ class Instruction {
return following(-count); return following(-count);
} }
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \ #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int64_t Name() const { return Func(HighBit, LowBit); } int32_t Name() const { return Func(HighBit, LowBit); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER) INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER #undef DEFINE_GETTER
@ -146,8 +146,8 @@ class Instruction {
// formed from ImmPCRelLo and ImmPCRelHi. // formed from ImmPCRelLo and ImmPCRelHi.
int ImmPCRel() const { int ImmPCRel() const {
DCHECK(IsPCRelAddressing()); DCHECK(IsPCRelAddressing());
int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo()); int offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
int const width = ImmPCRelLo_width + ImmPCRelHi_width; int width = ImmPCRelLo_width + ImmPCRelHi_width;
return signed_bitextract_32(width - 1, 0, offset); return signed_bitextract_32(width - 1, 0, offset);
} }
@ -369,7 +369,7 @@ class Instruction {
// PC-relative addressing instruction. // PC-relative addressing instruction.
Instruction* ImmPCOffsetTarget(); Instruction* ImmPCOffsetTarget();
static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset); static bool IsValidImmPCOffset(ImmBranchType branch_type, ptrdiff_t offset);
bool IsTargetInImmPCOffsetRange(Instruction* target); bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction. // a PC-relative addressing instruction.
@ -409,9 +409,7 @@ class Instruction {
static const int ImmPCRelRangeBitwidth = 21; static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(int offset) { static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
return is_int21(offset);
}
void SetPCRelImmTarget(Instruction* target); void SetPCRelImmTarget(Instruction* target);
void SetBranchImmTarget(Instruction* target); void SetBranchImmTarget(Instruction* target);
}; };

View File

@ -591,4 +591,5 @@ void Instrument::VisitUnimplemented(Instruction* instr) {
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8

View File

@ -16,12 +16,10 @@ const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return x1; } const Register LoadDescriptor::ReceiverRegister() { return x1; }
const Register LoadDescriptor::NameRegister() { return x2; } const Register LoadDescriptor::NameRegister() { return x2; }
const Register LoadDescriptor::SlotRegister() { return x0; }
const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return x0; } const Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
const Register VectorLoadICDescriptor::VectorRegister() { return x3; }
const Register StoreDescriptor::ReceiverRegister() { return x1; } const Register StoreDescriptor::ReceiverRegister() { return x1; }
@ -29,6 +27,12 @@ const Register StoreDescriptor::NameRegister() { return x2; }
const Register StoreDescriptor::ValueRegister() { return x0; } const Register StoreDescriptor::ValueRegister() { return x0; }
const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return x4; }
const Register VectorStoreICDescriptor::VectorRegister() { return x3; }
const Register StoreTransitionDescriptor::MapRegister() { return x3; } const Register StoreTransitionDescriptor::MapRegister() { return x3; }
@ -62,389 +66,338 @@ const Register MathPowIntegerDescriptor::exponent() { return x12; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; } const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; } const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
const Register GrowArrayElementsDescriptor::CapacityRegister() { return x2; }
void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) { void FastNewClosureDescriptor::InitializePlatformSpecific(
// cp: context CallInterfaceDescriptorData* data) {
// x2: function info // x2: function info
Register registers[] = {cp, x2}; Register registers[] = {x2};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) { void FastNewContextDescriptor::InitializePlatformSpecific(
// cp: context CallInterfaceDescriptorData* data) {
// x1: function // x1: function
Register registers[] = {cp, x1}; Register registers[] = {x1};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) { void ToNumberDescriptor::InitializePlatformSpecific(
// cp: context CallInterfaceDescriptorData* data) {
// x0: value // x0: value
Register registers[] = {cp, x0}; Register registers[] = {x0};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) { void NumberToStringDescriptor::InitializePlatformSpecific(
// cp: context CallInterfaceDescriptorData* data) {
// x0: value // x0: value
Register registers[] = {cp, x0}; Register registers[] = {x0};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) { void TypeofDescriptor::InitializePlatformSpecific(
Register registers[] = {cp, x3}; CallInterfaceDescriptorData* data) {
data->Initialize(arraysize(registers), registers, NULL); Register registers[] = {x3};
} data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowArrayDescriptor::Initialize(
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// cp: context
// x3: array literals array // x3: array literals array
// x2: array literal index // x2: array literal index
// x1: constant elements // x1: constant elements
Register registers[] = {cp, x3, x2, x1}; Register registers[] = {x3, x2, x1};
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers);
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
} }
void FastCloneShallowObjectDescriptor::Initialize( void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// cp: context
// x3: object literals array // x3: object literals array
// x2: object literal index // x2: object literal index
// x1: constant properties // x1: constant properties
// x0: object literal flags // x0: object literal flags
Register registers[] = {cp, x3, x2, x1, x0}; Register registers[] = {x3, x2, x1, x0};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void CreateAllocationSiteDescriptor::Initialize( void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// cp: context
// x2: feedback vector // x2: feedback vector
// x3: call feedback slot // x3: call feedback slot
Register registers[] = {cp, x2, x3}; Register registers[] = {x2, x3};
Representation representations[] = {Representation::Tagged(), data->InitializePlatformSpecific(arraysize(registers), registers);
Representation::Tagged(),
Representation::Smi()};
data->Initialize(arraysize(registers), registers, representations);
} }
void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) { void CreateWeakCellDescriptor::InitializePlatformSpecific(
// cp: context CallInterfaceDescriptorData* data) {
// x2: feedback vector // x2: feedback vector
// x3: call feedback slot // x3: call feedback slot
// x1: tagged value to put in the weak cell // x1: tagged value to put in the weak cell
Register registers[] = {cp, x2, x3, x1}; Register registers[] = {x2, x3, x1};
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers);
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
} }
void StoreArrayLiteralElementDescriptor::Initialize( void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {cp, x3, x0}; Register registers[] = {x3, x0};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) { void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1 function the function to call // x1 function the function to call
Register registers[] = {cp, x1}; Register registers[] = {x1};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void CallFunctionWithFeedbackDescriptor::Initialize( void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {cp, x1, x3}; Register registers[] = {x1, x3};
Representation representations[] = {Representation::Tagged(), data->InitializePlatformSpecific(arraysize(registers), registers);
Representation::Tagged(),
Representation::Smi()};
data->Initialize(arraysize(registers), registers, representations);
} }
void CallFunctionWithFeedbackAndVectorDescriptor::Initialize( void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {cp, x1, x3, x2}; Register registers[] = {x1, x3, x2};
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers);
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
} }
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) { void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments // x0 : number of arguments
// x1 : the function to call // x1 : the function to call
// x2 : feedback vector // x2 : feedback vector
// x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol) // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
// TODO(turbofan): So far we don't gather type feedback and hence skip the // TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined. // slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {cp, x0, x1, x2}; Register registers[] = {x0, x1, x2};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void RegExpConstructResultDescriptor::Initialize( void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// cp: context
// x2: length // x2: length
// x1: index (of last match) // x1: index (of last match)
// x0: string // x0: string
Register registers[] = {cp, x2, x1, x0}; Register registers[] = {x2, x1, x0};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void TransitionElementsKindDescriptor::Initialize( void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// cp: context
// x0: value (js_array) // x0: value (js_array)
// x1: to_map // x1: to_map
Register registers[] = {cp, x0, x1}; Register registers[] = {x0, x1};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void AllocateHeapNumberDescriptor::Initialize( void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// cp: context data->InitializePlatformSpecific(0, nullptr, nullptr);
Register registers[] = {cp};
data->Initialize(arraysize(registers), registers, nullptr);
} }
void ArrayConstructorConstantArgCountDescriptor::Initialize( void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// cp: context
// x1: function // x1: function
// x2: allocation site with elements kind // x2: allocation site with elements kind
// x0: number of arguments to the constructor function // x0: number of arguments to the constructor function
Register registers[] = {cp, x1, x2}; Register registers[] = {x1, x2};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) { void ArrayConstructorDescriptor::InitializePlatformSpecific(
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, x1, x2, x0};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(),
Representation::Tagged(), Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
}
void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// cp: context // stack param count needs (constructor pointer, and single argument)
Register registers[] = {x1, x2, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InternalArrayConstructorConstantArgCountDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// x1: constructor function // x1: constructor function
// x0: number of arguments to the constructor function // x0: number of arguments to the constructor function
Register registers[] = {cp, x1}; Register registers[] = {x1};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void InternalArrayConstructorDescriptor::Initialize( void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument) // stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, x1, x0}; Register registers[] = {x1, x0};
Representation representations[] = {Representation::Tagged(), data->InitializePlatformSpecific(arraysize(registers), registers);
Representation::Tagged(),
Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
} }
void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) { void CompareDescriptor::InitializePlatformSpecific(
// cp: context CallInterfaceDescriptorData* data) {
// x1: left operand // x1: left operand
// x0: right operand // x0: right operand
Register registers[] = {cp, x1, x0}; Register registers[] = {x1, x0};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) { void CompareNilDescriptor::InitializePlatformSpecific(
// cp: context CallInterfaceDescriptorData* data) {
// x0: value to compare // x0: value to compare
Register registers[] = {cp, x0}; Register registers[] = {x0};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) { void ToBooleanDescriptor::InitializePlatformSpecific(
// cp: context CallInterfaceDescriptorData* data) {
// x0: value // x0: value
Register registers[] = {cp, x0}; Register registers[] = {x0};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) { void BinaryOpDescriptor::InitializePlatformSpecific(
// cp: context CallInterfaceDescriptorData* data) {
// x1: left operand // x1: left operand
// x0: right operand // x0: right operand
Register registers[] = {cp, x1, x0}; Register registers[] = {x1, x0};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void BinaryOpWithAllocationSiteDescriptor::Initialize( void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// cp: context
// x2: allocation site // x2: allocation site
// x1: left operand // x1: left operand
// x0: right operand // x0: right operand
Register registers[] = {cp, x2, x1, x0}; Register registers[] = {x2, x1, x0};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) { void StringAddDescriptor::InitializePlatformSpecific(
// cp: context CallInterfaceDescriptorData* data) {
// x1: left operand // x1: left operand
// x0: right operand // x0: right operand
Register registers[] = {cp, x1, x0}; Register registers[] = {x1, x0};
data->Initialize(arraysize(registers), registers, NULL); data->InitializePlatformSpecific(arraysize(registers), registers);
} }
void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) { void KeyedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor = static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS); PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = { Register registers[] = {
cp, // context
x2, // key x2, // key
}; };
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers,
Representation::Tagged(), // context &noInlineDescriptor);
Representation::Tagged(), // key
};
data->Initialize(arraysize(registers), registers, representations,
&noInlineDescriptor);
} }
void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) { void NamedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor = static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS); PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = { Register registers[] = {
cp, // context
x2, // name x2, // name
}; };
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers,
Representation::Tagged(), // context &noInlineDescriptor);
Representation::Tagged(), // name
};
data->Initialize(arraysize(registers), registers, representations,
&noInlineDescriptor);
} }
void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) { void CallHandlerDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor = static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = { Register registers[] = {
cp, // context
x0, // receiver x0, // receiver
}; };
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers,
Representation::Tagged(), // context &default_descriptor);
Representation::Tagged(), // receiver
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
} }
void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) { void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor = static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = { Register registers[] = {
cp, // context
x1, // JSFunction x1, // JSFunction
x0, // actual number of arguments x0, // actual number of arguments
x2, // expected number of arguments x2, // expected number of arguments
}; };
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers,
Representation::Tagged(), // context &default_descriptor);
Representation::Tagged(), // JSFunction
Representation::Integer32(), // actual number of arguments
Representation::Integer32(), // expected number of arguments
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
} }
void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) { void ApiFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor = static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = { Register registers[] = {
cp, // context
x0, // callee x0, // callee
x4, // call_data x4, // call_data
x2, // holder x2, // holder
x1, // api_function_address x1, // api_function_address
x3, // actual number of arguments x3, // actual number of arguments
}; };
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers,
Representation::Tagged(), // context &default_descriptor);
Representation::Tagged(), // callee
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address
Representation::Integer32(), // actual number of arguments
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
} }
void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) { void ApiAccessorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor = static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = { Register registers[] = {
cp, // context
x0, // callee x0, // callee
x4, // call_data x4, // call_data
x2, // holder x2, // holder
x1, // api_function_address x1, // api_function_address
}; };
Representation representations[] = { data->InitializePlatformSpecific(arraysize(registers), registers,
Representation::Tagged(), // context &default_descriptor);
Representation::Tagged(), // callee }
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address void MathRoundVariantDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x1, // math rounding function
x3, // vector slot id
}; };
data->Initialize(arraysize(registers), registers, representations, data->InitializePlatformSpecific(arraysize(registers), registers);
&default_descriptor);
} }
} } // namespace internal
} // namespace v8::internal } // namespace v8
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

View File

@ -1050,10 +1050,18 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LOperand* target = UseRegisterOrConstantAtStart(instr->target()); LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone()); ZoneList<LOperand*> ops(instr->OperandCount(), zone());
// Target
ops.Add(target, zone()); ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) { // Context
LOperand* op = LOperand* op = UseFixed(instr->OperandAt(1), cp);
UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1)); ops.Add(op, zone());
// Other register parameters
for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
i < instr->OperandCount(); i++) {
op =
UseFixed(instr->OperandAt(i),
descriptor.GetRegisterParameter(
i - LCallWithDescriptor::kImplicitRegisterParameterCount));
ops.Add(op, zone()); ops.Add(op, zone());
} }
@ -1391,7 +1399,7 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), x0); LOperand* object = UseFixed(instr->value(), x0);
LDateField* result = new(zone()) LDateField(object, instr->index()); LDateField* result = new(zone()) LDateField(object, instr->index());
return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY); return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
} }
@ -1504,7 +1512,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context()); inner->BindContext(instr->closure_context());
inner->set_entry(instr); inner->set_entry(instr);
current_block_->UpdateEnvironment(inner); current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure()); chunk_->AddInlinedFunction(instr->shared());
return NULL; return NULL;
} }
@ -1588,20 +1596,6 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
} }
LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
HTailCallThroughMegamorphicCache* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* receiver_register =
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
context, receiver_register, name_register);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
// The function is required (by MacroAssembler::InvokeFunction) to be in x1. // The function is required (by MacroAssembler::InvokeFunction) to be in x1.
@ -1700,7 +1694,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister()); UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL; LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) { if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister()); vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
} }
LLoadGlobalGeneric* result = LLoadGlobalGeneric* result =
@ -1766,7 +1760,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister()); LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL; LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) { if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister()); vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
} }
LInstruction* result = LInstruction* result =
@ -1788,7 +1782,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister()); UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL; LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) { if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister()); vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
} }
LInstruction* result = LInstruction* result =
@ -2028,7 +2022,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CallInterfaceDescriptor descriptor = CallInterfaceDescriptor descriptor =
info()->code_stub()->GetCallInterfaceDescriptor(); info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index()); int index = static_cast<int>(instr->index());
Register reg = descriptor.GetEnvironmentParameterRegister(index); Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg); return DefineFixed(result, reg);
} }
} }
@ -2402,8 +2396,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged()); DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged()); DCHECK(instr->value()->representation().IsTagged());
return MarkAsCall( LOperand* slot = NULL;
new(zone()) LStoreKeyedGeneric(context, object, key, value), instr); LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
}
LStoreKeyedGeneric* result = new (zone())
LStoreKeyedGeneric(context, object, key, value, slot, vector);
return MarkAsCall(result, instr);
} }
@ -2442,7 +2444,15 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister()); UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister()); LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value); LOperand* slot = NULL;
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
}
LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
return MarkAsCall(result, instr); return MarkAsCall(result, instr);
} }
@ -2567,6 +2577,21 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
} }
LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseRegister(instr->object());
LOperand* elements = UseRegister(instr->elements());
LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
LMaybeGrowElements* result = new (zone())
LMaybeGrowElements(context, object, elements, key, current_capacity);
DefineFixed(result, x0);
return AssignPointerMap(AssignEnvironment(result));
}
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), x3); LOperand* value = UseFixed(instr->value(), x3);
@ -2763,4 +2788,5 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8

View File

@ -125,6 +125,7 @@ class LCodeGen;
V(MathRoundD) \ V(MathRoundD) \
V(MathRoundI) \ V(MathRoundI) \
V(MathSqrt) \ V(MathSqrt) \
V(MaybeGrowElements) \
V(ModByConstI) \ V(ModByConstI) \
V(ModByPowerOf2I) \ V(ModByPowerOf2I) \
V(ModI) \ V(ModI) \
@ -164,7 +165,6 @@ class LCodeGen;
V(SubI) \ V(SubI) \
V(SubS) \ V(SubS) \
V(TaggedToI) \ V(TaggedToI) \
V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \ V(ThisFunction) \
V(ToFastProperties) \ V(ToFastProperties) \
V(TransitionElementsKind) \ V(TransitionElementsKind) \
@ -318,26 +318,6 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
}; };
class LTailCallThroughMegamorphicCache final
: public LTemplateInstruction<0, 3, 0> {
public:
LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
LOperand* name) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
};
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> { class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public: public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; } bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@ -739,7 +719,7 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation) DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
LanguageMode language_mode() { return hydrogen()->language_mode(); } Strength strength() { return hydrogen()->strength(); }
private: private:
Token::Value op_; Token::Value op_;
@ -1181,6 +1161,8 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Strength strength() { return hydrogen()->strength(); }
Token::Value op() const { return hydrogen()->token(); } Token::Value op() const { return hydrogen()->token(); }
}; };
@ -1550,8 +1532,12 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
LCallWithDescriptor(CallInterfaceDescriptor descriptor, LCallWithDescriptor(CallInterfaceDescriptor descriptor,
const ZoneList<LOperand*>& operands, Zone* zone) const ZoneList<LOperand*>& operands, Zone* zone)
: descriptor_(descriptor), : descriptor_(descriptor),
inputs_(descriptor.GetRegisterParameterCount() + 1, zone) { inputs_(descriptor.GetRegisterParameterCount() +
DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length()); kImplicitRegisterParameterCount,
zone) {
DCHECK(descriptor.GetRegisterParameterCount() +
kImplicitRegisterParameterCount ==
operands.length());
inputs_.AddAll(operands, zone); inputs_.AddAll(operands, zone);
} }
@ -1561,6 +1547,10 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor) DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
// The target and context are passed as implicit parameters that are not
// explicitly listed in the descriptor.
static const int kImplicitRegisterParameterCount = 2;
private: private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor") DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@ -2563,22 +2553,24 @@ class LStoreKeyedFixedDouble final : public LStoreKeyed<1> {
}; };
class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 0> { class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
public: public:
LStoreKeyedGeneric(LOperand* context, LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
LOperand* obj, LOperand* value, LOperand* slot, LOperand* vector) {
LOperand* key,
LOperand* value) {
inputs_[0] = context; inputs_[0] = context;
inputs_[1] = obj; inputs_[1] = object;
inputs_[2] = key; inputs_[2] = key;
inputs_[3] = value; inputs_[3] = value;
temps_[0] = slot;
temps_[1] = vector;
} }
LOperand* context() { return inputs_[0]; } LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; } LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; } LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; } LOperand* value() { return inputs_[3]; }
LOperand* temp_slot() { return temps_[0]; }
LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric) DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@ -2615,17 +2607,22 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
}; };
class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> { class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
public: public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) { LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
LOperand* slot, LOperand* vector) {
inputs_[0] = context; inputs_[0] = context;
inputs_[1] = object; inputs_[1] = object;
inputs_[2] = value; inputs_[2] = value;
temps_[0] = slot;
temps_[1] = vector;
} }
LOperand* context() { return inputs_[0]; } LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; } LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; } LOperand* value() { return inputs_[2]; }
LOperand* temp_slot() { return temps_[0]; }
LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@ -2637,6 +2634,28 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
}; };
class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
public:
LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
LOperand* key, LOperand* current_capacity) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = elements;
inputs_[3] = key;
inputs_[4] = current_capacity;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* elements() { return inputs_[2]; }
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
class LStringAdd final : public LTemplateInstruction<1, 3, 0> { class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
public: public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) { LStringAdd(LOperand* context, LOperand* left, LOperand* right) {

View File

@ -224,55 +224,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The translation includes one command per value in the environment. // The translation includes one command per value in the environment.
int translation_size = environment->translation_size(); int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation); WriteTranslation(environment->outer(), translation);
bool has_closure_id = !info()->closure().is_null() && WriteTranslationFrame(environment, translation);
!info()->closure().is_identical_to(environment->closure());
int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
break;
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
case JS_GETTER:
DCHECK(translation_size == 1);
DCHECK(height == 0);
translation->BeginGetterStubFrame(closure_id);
break;
case JS_SETTER:
DCHECK(translation_size == 2);
DCHECK(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
case STUB:
translation->BeginCompiledStubFrame();
break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
default:
UNREACHABLE();
}
int object_index = 0; int object_index = 0;
int dematerialized_index = 0; int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) { for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i); LOperand* value = environment->values()->at(i);
AddToTranslation(
AddToTranslation(environment, environment, translation, value, environment->HasTaggedValueAt(i),
translation, environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
&object_index,
&dematerialized_index);
} }
} }
@ -345,16 +307,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
} }
int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
int result = deoptimization_literals_.length();
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
deoptimization_literals_.Add(literal, zone());
return result;
}
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) { Safepoint::DeoptMode mode) {
environment->set_has_been_used(); environment->set_has_been_used();
@ -435,6 +387,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
CallFunctionStub stub(isolate(), arity, flags); CallFunctionStub stub(isolate(), arity, flags);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} }
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
} }
@ -449,6 +402,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
DCHECK(ToRegister(instr->result()).is(x0)); DCHECK(ToRegister(instr->result()).is(x0));
} }
@ -504,6 +458,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
} }
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
DCHECK(ToRegister(instr->result()).is(x0)); DCHECK(ToRegister(instr->result()).is(x0));
} }
@ -525,7 +480,7 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) { if (context->IsRegister()) {
__ Mov(cp, ToRegister(context)); __ Mov(cp, ToRegister(context));
} else if (context->IsStackSlot()) { } else if (context->IsStackSlot()) {
__ Ldr(cp, ToMemOperand(context)); __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
} else if (context->IsConstantOperand()) { } else if (context->IsConstantOperand()) {
HConstant* constant = HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context)); chunk_->LookupConstant(LConstantOperand::cast(context));
@ -669,7 +624,7 @@ bool LCodeGen::GeneratePrologue() {
// global proxy when called as functions (without an explicit receiver // global proxy when called as functions (without an explicit receiver
// object). // object).
if (is_sloppy(info_->language_mode()) && info()->MayUseThis() && if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
!info_->is_native()) { !info()->is_native() && info()->scope()->has_this_declaration()) {
Label ok; Label ok;
int receiver_offset = info_->scope()->num_parameters() * kXRegSize; int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset); __ Peek(x10, receiver_offset);
@ -728,8 +683,9 @@ bool LCodeGen::GeneratePrologue() {
__ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context. // Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters(); int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) { int first_parameter = scope()->has_this_declaration() ? -1 : 0;
Variable* var = scope()->parameter(i); for (int i = first_parameter; i < num_parameters; i++) {
Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) { if (var->IsContextSlot()) {
Register value = x0; Register value = x0;
Register scratch = x3; Register scratch = x3;
@ -743,8 +699,9 @@ bool LCodeGen::GeneratePrologue() {
__ Str(value, target); __ Str(value, target);
// Update the write barrier. This clobbers value and scratch. // Update the write barrier. This clobbers value and scratch.
if (need_write_barrier) { if (need_write_barrier) {
__ RecordWriteContextSlot(cp, target.offset(), value, scratch, __ RecordWriteContextSlot(cp, static_cast<int>(target.offset()),
GetLinkRegisterState(), kSaveFPRegs); value, scratch, GetLinkRegisterState(),
kSaveFPRegs);
} else if (FLAG_debug_code) { } else if (FLAG_debug_code) {
Label done; Label done;
__ JumpIfInNewSpace(cp, &done); __ JumpIfInNewSpace(cp, &done);
@ -995,15 +952,10 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
DCHECK(deoptimization_literals_.length() == 0); DCHECK_EQ(0, deoptimization_literals_.length());
for (auto function : chunk()->inlined_functions()) {
const ZoneList<Handle<JSFunction> >* inlined_closures = DefineDeoptimizationLiteral(function);
chunk()->inlined_closures();
for (int i = 0, length = inlined_closures->length(); i < length; i++) {
DefineDeoptimizationLiteral(inlined_closures->at(i));
} }
inlined_function_count_ = deoptimization_literals_.length(); inlined_function_count_ = deoptimization_literals_.length();
} }
@ -1281,13 +1233,37 @@ static int64_t ArgumentsOffsetWithoutFrame(int index) {
} }
MemOperand LCodeGen::ToMemOperand(LOperand* op) const { MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
DCHECK(op != NULL); DCHECK(op != NULL);
DCHECK(!op->IsRegister()); DCHECK(!op->IsRegister());
DCHECK(!op->IsDoubleRegister()); DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) { if (NeedsEagerFrame()) {
return MemOperand(fp, StackSlotOffset(op->index())); int fp_offset = StackSlotOffset(op->index());
// Loads and stores have a bigger reach in positive offset than negative.
// We try to access using jssp (positive offset) first, then fall back to
// fp (negative offset) if that fails.
//
// We can reference a stack slot from jssp only if we know how much we've
// put on the stack. We don't know this in the following cases:
// - stack_mode != kCanUseStackPointer: this is the case when deferred
// code has saved the registers.
// - saves_caller_doubles(): some double registers have been pushed, jssp
// references the end of the double registers and not the end of the stack
// slots.
// In both of the cases above, we _could_ add the tracking information
// required so that we can use jssp here, but in practice it isn't worth it.
if ((stack_mode == kCanUseStackPointer) &&
!info()->saves_caller_doubles()) {
int jssp_offset_to_fp =
StandardFrameConstants::kFixedFrameSizeFromFp +
(pushed_arguments_ + GetStackSlotCount()) * kPointerSize;
int jssp_offset = fp_offset + jssp_offset_to_fp;
if (masm()->IsImmLSScaled(jssp_offset, LSDoubleWord)) {
return MemOperand(masm()->StackPointer(), jssp_offset);
}
}
return MemOperand(fp, fp_offset);
} else { } else {
// Retrieve parameter without eager stack-frame relative to the // Retrieve parameter without eager stack-frame relative to the
// stack-pointer. // stack-pointer.
@ -1772,8 +1748,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(x0)); DCHECK(ToRegister(instr->right()).is(x0));
DCHECK(ToRegister(instr->result()).is(x0)); DCHECK(ToRegister(instr->result()).is(x0));
Handle<Code> code = CodeFactory::BinaryOpIC( Handle<Code> code =
isolate(), instr->op(), instr->language_mode()).code(); CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr); CallCode(code, RelocInfo::CODE_TARGET, instr);
} }
@ -2021,29 +1997,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
} }
void LCodeGen::DoTailCallThroughMegamorphicCache(
LTailCallThroughMegamorphicCache* instr) {
Register receiver = ToRegister(instr->receiver());
Register name = ToRegister(instr->name());
DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
DCHECK(name.is(LoadDescriptor::NameRegister()));
DCHECK(receiver.is(x1));
DCHECK(name.is(x2));
Register scratch = x4;
Register extra = x5;
Register extra2 = x6;
Register extra3 = x7;
// The probe will tail call to a handler if found.
isolate()->stub_cache()->GenerateProbe(
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), false, receiver, name,
scratch, extra, extra2, extra3);
// Tail call to miss if we ended up here.
LoadIC::GenerateMiss(masm());
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(instr->IsMarkedAsCall()); DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->result()).Is(x0)); DCHECK(ToRegister(instr->result()).Is(x0));
@ -2085,6 +2038,8 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
} }
generator.AfterCall(); generator.AfterCall();
} }
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
} }
@ -2104,11 +2059,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
__ Call(x10); __ Call(x10);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
} }
void LCodeGen::DoCallRuntime(LCallRuntime* instr) { void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr); CallRuntime(instr->function(), instr->arity(), instr);
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
} }
@ -2134,6 +2091,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
default: default:
UNREACHABLE(); UNREACHABLE();
} }
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
} }
@ -2554,7 +2512,8 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->left()).Is(x1)); DCHECK(ToRegister(instr->left()).Is(x1));
DCHECK(ToRegister(instr->right()).Is(x0)); DCHECK(ToRegister(instr->right()).Is(x0));
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code(); Handle<Code> ic =
CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
// Signal that we don't inline smi code before this stub. // Signal that we don't inline smi code before this stub.
InlineSmiCheckInfo::EmitNotInlined(masm()); InlineSmiCheckInfo::EmitNotInlined(masm());
@ -2653,18 +2612,14 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register temp1 = x10; Register temp1 = x10;
Register temp2 = x11; Register temp2 = x11;
Smi* index = instr->index(); Smi* index = instr->index();
Label runtime, done;
DCHECK(object.is(result) && object.Is(x0)); DCHECK(object.is(result) && object.Is(x0));
DCHECK(instr->IsMarkedAsCall()); DCHECK(instr->IsMarkedAsCall());
DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
__ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
if (index->value() == 0) { if (index->value() == 0) {
__ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
} else { } else {
Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) { if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ Mov(temp1, Operand(stamp)); __ Mov(temp1, Operand(stamp));
@ -2680,9 +2635,8 @@ void LCodeGen::DoDateField(LDateField* instr) {
__ Bind(&runtime); __ Bind(&runtime);
__ Mov(x1, Operand(index)); __ Mov(x1, Operand(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ Bind(&done);
} }
__ Bind(&done);
} }
@ -3196,6 +3150,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
instr->hydrogen()->formal_parameter_count(), instr->hydrogen()->formal_parameter_count(),
instr->arity(), instr); instr->arity(), instr);
} }
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
} }
@ -3309,6 +3264,16 @@ void LCodeGen::DoLabel(LLabel* label) {
label->block_id(), label->block_id(),
LabelType(label)); LabelType(label));
// Inherit pushed_arguments_ from the predecessor's argument count.
if (label->block()->HasPredecessor()) {
pushed_arguments_ = label->block()->predecessors()->at(0)->argument_count();
#ifdef DEBUG
for (auto p : *label->block()->predecessors()) {
DCHECK_EQ(p->argument_count(), pushed_arguments_);
}
#endif
}
__ Bind(label->label()); __ Bind(label->label());
current_block_ = label->block_id(); current_block_ = label->block_id();
DoGap(label); DoGap(label);
@ -3361,10 +3326,9 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
template <class T> template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) { void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector()); Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = VectorLoadICDescriptor::SlotRegister(); Register slot_register = LoadWithVectorDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(x0)); DCHECK(slot_register.is(x0));
AllowDeferredHandleDereference vector_structure_check; AllowDeferredHandleDereference vector_structure_check;
@ -3377,17 +3341,29 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
} }
template <class T>
void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = ToRegister(instr->temp_slot());
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Mov(vector_register, vector);
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ Mov(slot_register, Smi::FromInt(index));
}
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp)); DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->global_object()) DCHECK(ToRegister(instr->global_object())
.is(LoadDescriptor::ReceiverRegister())); .is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).Is(x0)); DCHECK(ToRegister(instr->result()).Is(x0));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name())); __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) { EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code(); PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
} }
@ -3657,9 +3633,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr); EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
} }
Handle<Code> ic = Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
CodeFactory::KeyedLoadICInOptimizedCode( isolate(), instr->hydrogen()->language_mode(),
isolate(), instr->hydrogen()->initialization_state()).code(); instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).Is(x0)); DCHECK(ToRegister(instr->result()).Is(x0));
@ -3712,13 +3688,11 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// LoadIC expects name and receiver in registers. // LoadIC expects name and receiver in registers.
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name())); __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) { EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr); Handle<Code> ic =
} CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode( instr->hydrogen()->initialization_state()).code();
isolate(), NOT_CONTEXTUAL,
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).is(x0)); DCHECK(ToRegister(instr->result()).is(x0));
@ -4754,6 +4728,8 @@ void LCodeGen::DoPushArguments(LPushArguments* instr) {
// The preamble was done by LPreparePushArguments. // The preamble was done by LPreparePushArguments.
args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE); args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
RecordPushedArgumentsDelta(instr->ArgumentCount());
} }
@ -5137,14 +5113,9 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
SmiCheck check_needed = SmiCheck check_needed =
instr->hydrogen()->value()->type().IsHeapObject() instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK; ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteContextSlot(context, __ RecordWriteContextSlot(context, static_cast<int>(target.offset()), value,
target.offset(), scratch, GetLinkRegisterState(), kSaveFPRegs,
value, EMIT_REMEMBERED_SET, check_needed);
scratch,
GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
} }
__ Bind(&skip_assignment); __ Bind(&skip_assignment);
} }
@ -5322,6 +5293,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister())); DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode( Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(), isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code(); instr->hydrogen()->initialization_state()).code();
@ -5329,6 +5304,91 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
} }
void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
class DeferredMaybeGrowElements final : public LDeferredCode {
public:
DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
: LDeferredCode(codegen), instr_(instr) {}
void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
LInstruction* instr() override { return instr_; }
private:
LMaybeGrowElements* instr_;
};
Register result = x0;
DeferredMaybeGrowElements* deferred =
new (zone()) DeferredMaybeGrowElements(this, instr);
LOperand* key = instr->key();
LOperand* current_capacity = instr->current_capacity();
DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
DCHECK(key->IsConstantOperand() || key->IsRegister());
DCHECK(current_capacity->IsConstantOperand() ||
current_capacity->IsRegister());
if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
int32_t constant_capacity =
ToInteger32(LConstantOperand::cast(current_capacity));
if (constant_key >= constant_capacity) {
// Deferred case.
__ B(deferred->entry());
}
} else if (key->IsConstantOperand()) {
int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
__ Cmp(ToRegister(current_capacity), Operand(constant_key));
__ B(le, deferred->entry());
} else if (current_capacity->IsConstantOperand()) {
int32_t constant_capacity =
ToInteger32(LConstantOperand::cast(current_capacity));
__ Cmp(ToRegister(key), Operand(constant_capacity));
__ B(ge, deferred->entry());
} else {
__ Cmp(ToRegister(key), ToRegister(current_capacity));
__ B(ge, deferred->entry());
}
__ Mov(result, ToRegister(instr->elements()));
__ Bind(deferred->exit());
}
void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register result = x0;
__ Mov(result, 0);
// We have to call a stub.
{
PushSafepointRegistersScope scope(this);
__ Move(result, ToRegister(instr->object()));
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
__ Mov(x3, Operand(ToSmi(LConstantOperand::cast(key))));
} else {
__ Mov(x3, ToRegister(key));
__ SmiTag(x3);
}
GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
__ StoreToSafepointRegisterSlot(result, result);
}
// Deopt on smi, which means the elements array changed to dictionary mode.
DeoptimizeIfSmi(result, instr, Deoptimizer::kSmi);
}
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Representation representation = instr->representation(); Representation representation = instr->representation();
@ -5433,10 +5493,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
}
__ Mov(StoreDescriptor::NameRegister(), Operand(instr->name())); __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic = Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
StoreIC::initialize_stub(isolate(), instr->language_mode(), isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()); instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
} }
@ -5548,7 +5612,8 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp)); DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op(); Token::Value op = instr->op();
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code(); Handle<Code> ic =
CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
InlineSmiCheckInfo::EmitNotInlined(masm()); InlineSmiCheckInfo::EmitNotInlined(masm());
@ -6054,5 +6119,5 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
} }
} // namespace internal
} } // namespace v8::internal } // namespace v8

View File

@ -28,7 +28,6 @@ class LCodeGen: public LCodeGenBase {
: LCodeGenBase(chunk, assembler, info), : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()), deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()), jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0), inlined_function_count_(0),
scope_(info->scope()), scope_(info->scope()),
translations_(info->zone()), translations_(info->zone()),
@ -37,7 +36,8 @@ class LCodeGen: public LCodeGenBase {
frame_is_built_(false), frame_is_built_(false),
safepoints_(info->zone()), safepoints_(info->zone()),
resolver_(this), resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) { expected_safepoint_kind_(Safepoint::kSimple),
pushed_arguments_(0) {
PopulateDeoptimizationLiteralsWithInlinedFunctions(); PopulateDeoptimizationLiteralsWithInlinedFunctions();
} }
@ -81,7 +81,9 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister32(LOperand* op) const; Register ToRegister32(LOperand* op) const;
Operand ToOperand(LOperand* op); Operand ToOperand(LOperand* op);
Operand ToOperand32(LOperand* op); Operand ToOperand32(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const; enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
MemOperand ToMemOperand(LOperand* op,
StackMode stack_mode = kCanUseStackPointer) const;
Handle<Object> ToHandle(LConstantOperand* op) const; Handle<Object> ToHandle(LConstantOperand* op) const;
template <class LI> template <class LI>
@ -114,6 +116,7 @@ class LCodeGen: public LCodeGenBase {
// Deferred code support. // Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr); void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredStackCheck(LStackCheck* instr); void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr); void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredMathAbsTagged(LMathAbsTagged* instr, void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
@ -190,6 +193,8 @@ class LCodeGen: public LCodeGenBase {
template <class T> template <class T>
void EmitVectorLoadICRegisters(T* instr); void EmitVectorLoadICRegisters(T* instr);
template <class T>
void EmitVectorStoreICRegisters(T* instr);
// Emits optimized code for %_IsString(x). Preserves input register. // Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to // Returns the condition on which a final split to
@ -197,7 +202,6 @@ class LCodeGen: public LCodeGenBase {
Condition EmitIsString(Register input, Register temp1, Label* is_not_string, Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
SmiCheck check_needed); SmiCheck check_needed);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationData(Handle<Code> code); void PopulateDeoptimizationData(Handle<Code> code);
void PopulateDeoptimizationLiteralsWithInlinedFunctions(); void PopulateDeoptimizationLiteralsWithInlinedFunctions();
@ -341,7 +345,6 @@ class LCodeGen: public LCodeGenBase {
ZoneList<LEnvironment*> deoptimizations_; ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry*> jump_table_; ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_; int inlined_function_count_;
Scope* const scope_; Scope* const scope_;
TranslationBuffer translations_; TranslationBuffer translations_;
@ -358,6 +361,15 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_; Safepoint::Kind expected_safepoint_kind_;
// The number of arguments pushed onto the stack, either by this block or by a
// predecessor.
int pushed_arguments_;
void RecordPushedArgumentsDelta(int delta) {
pushed_arguments_ += delta;
DCHECK(pushed_arguments_ >= 0);
}
int old_position_; int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED { class PushSafepointRegistersScope BASE_EMBEDDED {

View File

@ -292,4 +292,5 @@ void LGapResolver::EmitMove(int index) {
moves_[index].Eliminate(); moves_[index].Eliminate();
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8

View File

@ -926,8 +926,8 @@ void MacroAssembler::PushPopQueue::PushQueued(
masm_->PushPreamble(size_); masm_->PushPreamble(size_);
} }
int count = queued_.size(); size_t count = queued_.size();
int index = 0; size_t index = 0;
while (index < count) { while (index < count) {
// PushHelper can only handle registers with the same size and type, and it // PushHelper can only handle registers with the same size and type, and it
// can handle only four at a time. Batch them up accordingly. // can handle only four at a time. Batch them up accordingly.
@ -949,8 +949,8 @@ void MacroAssembler::PushPopQueue::PushQueued(
void MacroAssembler::PushPopQueue::PopQueued() { void MacroAssembler::PushPopQueue::PopQueued() {
if (queued_.empty()) return; if (queued_.empty()) return;
int count = queued_.size(); size_t count = queued_.size();
int index = 0; size_t index = 0;
while (index < count) { while (index < count) {
// PopHelper can only handle registers with the same size and type, and it // PopHelper can only handle registers with the same size and type, and it
// can handle only four at a time. Batch them up accordingly. // can handle only four at a time. Batch them up accordingly.
@ -1263,7 +1263,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
// system stack pointer (csp). // system stack pointer (csp).
DCHECK(csp.Is(StackPointer())); DCHECK(csp.Is(StackPointer()));
MemOperand tos(csp, -2 * kXRegSize, PreIndex); MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
stp(d14, d15, tos); stp(d14, d15, tos);
stp(d12, d13, tos); stp(d12, d13, tos);
@ -3928,6 +3928,7 @@ void MacroAssembler::GetNumberHash(Register key, Register scratch) {
Add(key, key, scratch); Add(key, key, scratch);
// hash = hash ^ (hash >> 16); // hash = hash ^ (hash >> 16);
Eor(key, key, Operand(key, LSR, 16)); Eor(key, key, Operand(key, LSR, 16));
Bic(key, key, Operand(0xc0000000u));
} }
@ -4693,7 +4694,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
// Check that the function's map is the same as the expected cached map. // Check that the function's map is the same as the expected cached map.
Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX)); Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize; int offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
Ldr(scratch2, FieldMemOperand(scratch1, offset)); Ldr(scratch2, FieldMemOperand(scratch1, offset));
Cmp(map_in_out, scratch2); Cmp(map_in_out, scratch2);
B(ne, no_map_match); B(ne, no_map_match);
@ -5115,7 +5116,8 @@ void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
// 'check' in the other bits. The possible offset is limited in that we // 'check' in the other bits. The possible offset is limited in that we
// use BitField to pack the data, and the underlying data type is a // use BitField to pack the data, and the underlying data type is a
// uint32_t. // uint32_t.
uint32_t delta = __ InstructionsGeneratedSince(smi_check); uint32_t delta =
static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
__ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta)); __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
} else { } else {
DCHECK(!smi_check->is_bound()); DCHECK(!smi_check->is_bound());
@ -5136,9 +5138,10 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
// 32-bit values. // 32-bit values.
DCHECK(is_uint32(payload)); DCHECK(is_uint32(payload));
if (payload != 0) { if (payload != 0) {
int reg_code = RegisterBits::decode(payload); uint32_t payload32 = static_cast<uint32_t>(payload);
int reg_code = RegisterBits::decode(payload32);
reg_ = Register::XRegFromCode(reg_code); reg_ = Register::XRegFromCode(reg_code);
uint64_t smi_check_delta = DeltaBits::decode(payload); int smi_check_delta = DeltaBits::decode(payload32);
DCHECK(smi_check_delta != 0); DCHECK(smi_check_delta != 0);
smi_check_ = inline_data->preceding(smi_check_delta); smi_check_ = inline_data->preceding(smi_check_delta);
} }
@ -5149,6 +5152,7 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
#undef __ #undef __
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

View File

@ -886,8 +886,8 @@ class MacroAssembler : public Assembler {
template<typename Field> template<typename Field>
void DecodeField(Register dst, Register src) { void DecodeField(Register dst, Register src) {
static const uint64_t shift = Field::kShift; static const int shift = Field::kShift;
static const uint64_t setbits = CountSetBits(Field::kMask, 32); static const int setbits = CountSetBits(Field::kMask, 32);
Ubfx(dst, src, shift, setbits); Ubfx(dst, src, shift, setbits);
} }

View File

@ -1611,6 +1611,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP #endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

View File

@ -20,6 +20,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
RegExpMacroAssemblerARM64(Isolate* isolate, Zone* zone, Mode mode, RegExpMacroAssemblerARM64(Isolate* isolate, Zone* zone, Mode mode,
int registers_to_save); int registers_to_save);
virtual ~RegExpMacroAssemblerARM64(); virtual ~RegExpMacroAssemblerARM64();
virtual void AbortedCodeGeneration() { masm_->AbortedCodeGeneration(); }
virtual int stack_limit_slack(); virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by); virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by); virtual void AdvanceRegister(int reg, int by);

View File

@ -490,7 +490,7 @@ class Redirection {
static Redirection* FromHltInstruction(Instruction* redirect_call) { static Redirection* FromHltInstruction(Instruction* redirect_call) {
char* addr_of_hlt = reinterpret_cast<char*>(redirect_call); char* addr_of_hlt = reinterpret_cast<char*>(redirect_call);
char* addr_of_redirection = char* addr_of_redirection =
addr_of_hlt - OFFSET_OF(Redirection, redirect_call_); addr_of_hlt - offsetof(Redirection, redirect_call_);
return reinterpret_cast<Redirection*>(addr_of_redirection); return reinterpret_cast<Redirection*>(addr_of_redirection);
} }
@ -500,6 +500,14 @@ class Redirection {
return redirection->external_function<void*>(); return redirection->external_function<void*>();
} }
static void DeleteChain(Redirection* redirection) {
while (redirection != nullptr) {
Redirection* next = redirection->next_;
delete redirection;
redirection = next;
}
}
private: private:
void* external_function_; void* external_function_;
Instruction redirect_call_; Instruction redirect_call_;
@ -508,6 +516,12 @@ class Redirection {
}; };
// static
void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
Redirection::DeleteChain(first);
}
// Calls into the V8 runtime are based on this very simple interface. // Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc // Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair structure. // uses the ObjectPair structure.
@ -903,10 +917,11 @@ T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
return static_cast<unsignedT>(value) >> amount; return static_cast<unsignedT>(value) >> amount;
case ASR: case ASR:
return value >> amount; return value >> amount;
case ROR: case ROR: {
unsignedT mask = (static_cast<unsignedT>(1) << amount) - 1;
return (static_cast<unsignedT>(value) >> amount) | return (static_cast<unsignedT>(value) >> amount) |
((value & ((1L << amount) - 1L)) << ((value & mask) << (sizeof(mask) * 8 - amount));
(sizeof(unsignedT) * 8 - amount)); }
default: default:
UNIMPLEMENTED(); UNIMPLEMENTED();
return 0; return 0;
@ -1399,7 +1414,8 @@ void Simulator::VisitAddSubShifted(Instruction* instr) {
int64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount); int64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
AddSubHelper(instr, op2); AddSubHelper(instr, op2);
} else { } else {
int32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount); int32_t op2 = static_cast<int32_t>(
ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount));
AddSubHelper(instr, op2); AddSubHelper(instr, op2);
} }
} }
@ -1410,7 +1426,7 @@ void Simulator::VisitAddSubImmediate(Instruction* instr) {
if (instr->SixtyFourBits()) { if (instr->SixtyFourBits()) {
AddSubHelper<int64_t>(instr, op2); AddSubHelper<int64_t>(instr, op2);
} else { } else {
AddSubHelper<int32_t>(instr, op2); AddSubHelper<int32_t>(instr, static_cast<int32_t>(op2));
} }
} }
@ -1457,7 +1473,7 @@ void Simulator::VisitLogicalImmediate(Instruction* instr) {
if (instr->SixtyFourBits()) { if (instr->SixtyFourBits()) {
LogicalHelper<int64_t>(instr, instr->ImmLogical()); LogicalHelper<int64_t>(instr, instr->ImmLogical());
} else { } else {
LogicalHelper<int32_t>(instr, instr->ImmLogical()); LogicalHelper<int32_t>(instr, static_cast<int32_t>(instr->ImmLogical()));
} }
} }
@ -1879,7 +1895,7 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
// Get the shifted immediate. // Get the shifted immediate.
int64_t shift = instr->ShiftMoveWide() * 16; int64_t shift = instr->ShiftMoveWide() * 16;
int64_t shifted_imm16 = instr->ImmMoveWide() << shift; int64_t shifted_imm16 = static_cast<int64_t>(instr->ImmMoveWide()) << shift;
// Compute the new value. // Compute the new value.
switch (mov_op) { switch (mov_op) {
@ -1912,25 +1928,32 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
void Simulator::VisitConditionalSelect(Instruction* instr) { void Simulator::VisitConditionalSelect(Instruction* instr) {
uint64_t new_val = xreg(instr->Rn());
if (ConditionFailed(static_cast<Condition>(instr->Condition()))) { if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
uint64_t new_val = xreg(instr->Rm()); new_val = xreg(instr->Rm());
switch (instr->Mask(ConditionalSelectMask)) { switch (instr->Mask(ConditionalSelectMask)) {
case CSEL_w: set_wreg(instr->Rd(), new_val); break; case CSEL_w:
case CSEL_x: set_xreg(instr->Rd(), new_val); break; case CSEL_x:
case CSINC_w: set_wreg(instr->Rd(), new_val + 1); break; break;
case CSINC_x: set_xreg(instr->Rd(), new_val + 1); break; case CSINC_w:
case CSINV_w: set_wreg(instr->Rd(), ~new_val); break; case CSINC_x:
case CSINV_x: set_xreg(instr->Rd(), ~new_val); break; new_val++;
case CSNEG_w: set_wreg(instr->Rd(), -new_val); break; break;
case CSNEG_x: set_xreg(instr->Rd(), -new_val); break; case CSINV_w:
case CSINV_x:
new_val = ~new_val;
break;
case CSNEG_w:
case CSNEG_x:
new_val = -new_val;
break;
default: UNIMPLEMENTED(); default: UNIMPLEMENTED();
} }
}
if (instr->SixtyFourBits()) {
set_xreg(instr->Rd(), new_val);
} else { } else {
if (instr->SixtyFourBits()) { set_wreg(instr->Rd(), static_cast<uint32_t>(new_val));
set_xreg(instr->Rd(), xreg(instr->Rn()));
} else {
set_wreg(instr->Rd(), wreg(instr->Rn()));
}
} }
} }
@ -1940,13 +1963,27 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
unsigned src = instr->Rn(); unsigned src = instr->Rn();
switch (instr->Mask(DataProcessing1SourceMask)) { switch (instr->Mask(DataProcessing1SourceMask)) {
case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSizeInBits)); break; case RBIT_w:
case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSizeInBits)); break; set_wreg(dst, ReverseBits(wreg(src)));
case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break; break;
case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break; case RBIT_x:
case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break; set_xreg(dst, ReverseBits(xreg(src)));
case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break; break;
case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break; case REV16_w:
set_wreg(dst, ReverseBytes(wreg(src), 1));
break;
case REV16_x:
set_xreg(dst, ReverseBytes(xreg(src), 1));
break;
case REV_w:
set_wreg(dst, ReverseBytes(wreg(src), 2));
break;
case REV32_x:
set_xreg(dst, ReverseBytes(xreg(src), 2));
break;
case REV_x:
set_xreg(dst, ReverseBytes(xreg(src), 3));
break;
case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits)); case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits));
break; break;
case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits)); case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits));
@ -1964,44 +2001,6 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
} }
uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
DCHECK((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
uint64_t result = 0;
for (unsigned i = 0; i < num_bits; i++) {
result = (result << 1) | (value & 1);
value >>= 1;
}
return result;
}
uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
// Split the 64-bit value into an 8-bit array, where b[0] is the least
// significant byte, and b[7] is the most significant.
uint8_t bytes[8];
uint64_t mask = 0xff00000000000000UL;
for (int i = 7; i >= 0; i--) {
bytes[i] = (value & mask) >> (i * 8);
mask >>= 8;
}
// Permutation tables for REV instructions.
// permute_table[Reverse16] is used by REV16_x, REV16_w
// permute_table[Reverse32] is used by REV32_x, REV_w
// permute_table[Reverse64] is used by REV_x
DCHECK((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
{4, 5, 6, 7, 0, 1, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7} };
uint64_t result = 0;
for (int i = 0; i < 8; i++) {
result <<= 8;
result |= bytes[permute_table[mode][i]];
}
return result;
}
template <typename T> template <typename T>
void Simulator::DataProcessing2Source(Instruction* instr) { void Simulator::DataProcessing2Source(Instruction* instr) {
Shift shift_op = NO_SHIFT; Shift shift_op = NO_SHIFT;
@ -2121,7 +2120,7 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
if (instr->SixtyFourBits()) { if (instr->SixtyFourBits()) {
set_xreg(instr->Rd(), result); set_xreg(instr->Rd(), result);
} else { } else {
set_wreg(instr->Rd(), result); set_wreg(instr->Rd(), static_cast<int32_t>(result));
} }
} }
@ -2138,8 +2137,9 @@ void Simulator::BitfieldHelper(Instruction* instr) {
mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1 mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1
: static_cast<T>(-1); : static_cast<T>(-1);
} else { } else {
mask = ((1L << (S + 1)) - 1); uint64_t umask = ((1L << (S + 1)) - 1);
mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R)); umask = (umask >> R) | (umask << (reg_size - R));
mask = static_cast<T>(umask);
diff += reg_size; diff += reg_size;
} }
@ -2563,7 +2563,7 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// Bail out early for zero inputs. // Bail out early for zero inputs.
if (mantissa == 0) { if (mantissa == 0) {
return sign << sign_offset; return static_cast<T>(sign << sign_offset);
} }
// If all bits in the exponent are set, the value is infinite or NaN. // If all bits in the exponent are set, the value is infinite or NaN.
@ -2580,9 +2580,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// FPTieEven rounding mode handles overflows using infinities. // FPTieEven rounding mode handles overflows using infinities.
exponent = infinite_exponent; exponent = infinite_exponent;
mantissa = 0; mantissa = 0;
return (sign << sign_offset) | return static_cast<T>((sign << sign_offset) |
(exponent << exponent_offset) | (exponent << exponent_offset) |
(mantissa << mantissa_offset); (mantissa << mantissa_offset));
} }
// Calculate the shift required to move the top mantissa bit to the proper // Calculate the shift required to move the top mantissa bit to the proper
@ -2605,7 +2605,7 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// non-zero result after rounding. // non-zero result after rounding.
if (shift > (highest_significant_bit + 1)) { if (shift > (highest_significant_bit + 1)) {
// The result will always be +/-0.0. // The result will always be +/-0.0.
return sign << sign_offset; return static_cast<T>(sign << sign_offset);
} }
// Properly encode the exponent for a subnormal output. // Properly encode the exponent for a subnormal output.
@ -2624,9 +2624,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa); uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
T halfbit_adjusted = (adjusted >> (shift-1)) & 1; T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
T result = (sign << sign_offset) | T result =
(exponent << exponent_offset) | static_cast<T>((sign << sign_offset) | (exponent << exponent_offset) |
((mantissa >> shift) << mantissa_offset); ((mantissa >> shift) << mantissa_offset));
// A very large mantissa can overflow during rounding. If this happens, the // A very large mantissa can overflow during rounding. If this happens, the
// exponent should be incremented and the mantissa set to 1.0 (encoded as // exponent should be incremented and the mantissa set to 1.0 (encoded as
@ -2641,9 +2641,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// We have to shift the mantissa to the left (or not at all). The input // We have to shift the mantissa to the left (or not at all). The input
// mantissa is exactly representable in the output mantissa, so apply no // mantissa is exactly representable in the output mantissa, so apply no
// rounding correction. // rounding correction.
return (sign << sign_offset) | return static_cast<T>((sign << sign_offset) |
(exponent << exponent_offset) | (exponent << exponent_offset) |
((mantissa << -shift) << mantissa_offset); ((mantissa << -shift) << mantissa_offset));
} }
} }
@ -2838,7 +2838,8 @@ float Simulator::FPToFloat(double value, FPRounding round_mode) {
uint32_t sign = raw >> 63; uint32_t sign = raw >> 63;
uint32_t exponent = (1 << 8) - 1; uint32_t exponent = (1 << 8) - 1;
uint32_t payload = unsigned_bitextract_64(50, 52 - 23, raw); uint32_t payload =
static_cast<uint32_t>(unsigned_bitextract_64(50, 52 - 23, raw));
payload |= (1 << 22); // Force a quiet NaN. payload |= (1 << 22); // Force a quiet NaN.
return rawbits_to_float((sign << 31) | (exponent << 23) | payload); return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
@ -2859,7 +2860,8 @@ float Simulator::FPToFloat(double value, FPRounding round_mode) {
// Extract the IEEE-754 double components. // Extract the IEEE-754 double components.
uint32_t sign = raw >> 63; uint32_t sign = raw >> 63;
// Extract the exponent and remove the IEEE-754 encoding bias. // Extract the exponent and remove the IEEE-754 encoding bias.
int32_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023; int32_t exponent =
static_cast<int32_t>(unsigned_bitextract_64(62, 52, raw)) - 1023;
// Extract the mantissa and add the implicit '1' bit. // Extract the mantissa and add the implicit '1' bit.
uint64_t mantissa = unsigned_bitextract_64(51, 0, raw); uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
if (std::fpclassify(value) == FP_NORMAL) { if (std::fpclassify(value) == FP_NORMAL) {
@ -3210,11 +3212,11 @@ void Simulator::VisitSystem(Instruction* instr) {
case MSR: { case MSR: {
switch (instr->ImmSystemRegister()) { switch (instr->ImmSystemRegister()) {
case NZCV: case NZCV:
nzcv().SetRawValue(xreg(instr->Rt())); nzcv().SetRawValue(wreg(instr->Rt()));
LogSystemRegister(NZCV); LogSystemRegister(NZCV);
break; break;
case FPCR: case FPCR:
fpcr().SetRawValue(xreg(instr->Rt())); fpcr().SetRawValue(wreg(instr->Rt()));
LogSystemRegister(FPCR); LogSystemRegister(FPCR);
break; break;
default: UNIMPLEMENTED(); default: UNIMPLEMENTED();
@ -3835,6 +3837,7 @@ void Simulator::DoPrintf(Instruction* instr) {
#endif // USE_SIMULATOR #endif // USE_SIMULATOR
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

View File

@ -72,12 +72,6 @@ class SimulatorStack : public v8::internal::AllStatic {
#else // !defined(USE_SIMULATOR) #else // !defined(USE_SIMULATOR)
enum ReverseByteMode {
Reverse16 = 0,
Reverse32 = 1,
Reverse64 = 2
};
// The proper way to initialize a simulated system register (such as NZCV) is as // The proper way to initialize a simulated system register (such as NZCV) is as
// follows: // follows:
@ -169,6 +163,8 @@ class Simulator : public DecoderVisitor {
static void Initialize(Isolate* isolate); static void Initialize(Isolate* isolate);
static void TearDown(HashMap* i_cache, Redirection* first);
static Simulator* current(v8::internal::Isolate* isolate); static Simulator* current(v8::internal::Isolate* isolate);
class CallArgument; class CallArgument;
@ -706,9 +702,6 @@ class Simulator : public DecoderVisitor {
template <typename T> template <typename T>
void BitfieldHelper(Instruction* instr); void BitfieldHelper(Instruction* instr);
uint64_t ReverseBits(uint64_t value, unsigned num_bits);
uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
template <typename T> template <typename T>
T FPDefaultNaN() const; T FPDefaultNaN() const;
@ -884,10 +877,10 @@ class Simulator : public DecoderVisitor {
FUNCTION_ADDR(entry), \ FUNCTION_ADDR(entry), \
p0, p1, p2, p3, p4)) p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->CallRegExp( \ static_cast<int>( \
entry, \ Simulator::current(Isolate::Current()) \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8) ->CallRegExp(entry, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
// The simulator has its own stack. Thus it has a different stack limit from // The simulator has its own stack. Thus it has a different stack limit from

View File

@ -74,7 +74,7 @@ int CountSetBits(uint64_t value, int width) {
value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff); value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff); value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
return value; return static_cast<int>(value);
} }
@ -89,6 +89,7 @@ int MaskToBit(uint64_t mask) {
} }
} } // namespace v8::internal } // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

View File

@ -61,6 +61,49 @@ uint64_t LargestPowerOf2Divisor(uint64_t value);
int MaskToBit(uint64_t mask); int MaskToBit(uint64_t mask);
template <typename T>
T ReverseBits(T value) {
DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) ||
(sizeof(value) == 8));
T result = 0;
for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
result = (result << 1) | (value & 1);
value >>= 1;
}
return result;
}
template <typename T>
T ReverseBytes(T value, int block_bytes_log2) {
DCHECK((sizeof(value) == 4) || (sizeof(value) == 8));
DCHECK((1U << block_bytes_log2) <= sizeof(value));
// Split the 64-bit value into an 8-bit array, where b[0] is the least
// significant byte, and b[7] is the most significant.
uint8_t bytes[8];
uint64_t mask = 0xff00000000000000;
for (int i = 7; i >= 0; i--) {
bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
mask >>= 8;
}
// Permutation tables for REV instructions.
// permute_table[0] is used by REV16_x, REV16_w
// permute_table[1] is used by REV32_x, REV_w
// permute_table[2] is used by REV_x
DCHECK((0 < block_bytes_log2) && (block_bytes_log2 < 4));
static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
{4, 5, 6, 7, 0, 1, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7}};
T result = 0;
for (int i = 0; i < 8; i++) {
result <<= 8;
result |= bytes[permute_table[block_bytes_log2 - 1][i]];
}
return result;
}
// NaN tests. // NaN tests.
inline bool IsSignallingNaN(double num) { inline bool IsSignallingNaN(double num) {
uint64_t raw = double_to_rawbits(num); uint64_t raw = double_to_rawbits(num);

View File

@ -2,17 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
var $iteratorCreateResultObject;
var $arrayValues; var $arrayValues;
(function(global, shared, exports) { (function(global, utils) {
"use strict"; "use strict";
%CheckIsBootstrapping(); %CheckIsBootstrapping();
var GlobalArray = global.Array; var GlobalArray = global.Array;
var GlobalObject = global.Object;
macro TYPED_ARRAYS(FUNCTION) macro TYPED_ARRAYS(FUNCTION)
FUNCTION(Uint8Array) FUNCTION(Uint8Array)
@ -122,19 +120,19 @@ function ArrayKeys() {
} }
%FunctionSetPrototype(ArrayIterator, new GlobalObject()); %FunctionSetPrototype(ArrayIterator, {__proto__: $iteratorPrototype});
%FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator'); %FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
$installFunctions(ArrayIterator.prototype, DONT_ENUM, [ utils.InstallFunctions(ArrayIterator.prototype, DONT_ENUM, [
'next', ArrayIteratorNext 'next', ArrayIteratorNext
]); ]);
$setFunctionName(ArrayIteratorIterator, symbolIterator); utils.SetFunctionName(ArrayIteratorIterator, symbolIterator);
%AddNamedProperty(ArrayIterator.prototype, symbolIterator, %AddNamedProperty(ArrayIterator.prototype, symbolIterator,
ArrayIteratorIterator, DONT_ENUM); ArrayIteratorIterator, DONT_ENUM);
%AddNamedProperty(ArrayIterator.prototype, symbolToStringTag, %AddNamedProperty(ArrayIterator.prototype, symbolToStringTag,
"Array Iterator", READ_ONLY | DONT_ENUM); "Array Iterator", READ_ONLY | DONT_ENUM);
$installFunctions(GlobalArray.prototype, DONT_ENUM, [ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
// No 'values' since it breaks webcompat: http://crbug.com/409858 // No 'values' since it breaks webcompat: http://crbug.com/409858
'entries', ArrayEntries, 'entries', ArrayEntries,
'keys', ArrayKeys 'keys', ArrayKeys
@ -153,7 +151,13 @@ endmacro
TYPED_ARRAYS(EXTEND_TYPED_ARRAY) TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
$iteratorCreateResultObject = CreateIteratorResultObject; // -------------------------------------------------------------------
// Exports
utils.Export(function(to) {
to.ArrayIteratorCreateResultObject = CreateIteratorResultObject;
});
$arrayValues = ArrayValues; $arrayValues = ArrayValues;
}) })

283
deps/v8/src/array.js vendored
View File

@ -3,7 +3,6 @@
// found in the LICENSE file. // found in the LICENSE file.
var $arrayConcat; var $arrayConcat;
var $arrayJoin;
var $arrayPush; var $arrayPush;
var $arrayPop; var $arrayPop;
var $arrayShift; var $arrayShift;
@ -11,13 +10,34 @@ var $arraySlice;
var $arraySplice; var $arraySplice;
var $arrayUnshift; var $arrayUnshift;
(function(global, shared, exports) { (function(global, utils) {
"use strict"; "use strict";
%CheckIsBootstrapping(); %CheckIsBootstrapping();
// -------------------------------------------------------------------
// Imports
var GlobalArray = global.Array; var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
var Delete;
var MathMin;
var ObjectHasOwnProperty;
var ObjectIsFrozen;
var ObjectIsSealed;
var ObjectToString;
utils.Import(function(from) {
Delete = from.Delete;
MathMin = from.MathMin;
ObjectHasOwnProperty = from.ObjectHasOwnProperty;
ObjectIsFrozen = from.ObjectIsFrozen;
ObjectIsSealed = from.ObjectIsSealed;
ObjectToString = from.ObjectToString;
});
// ------------------------------------------------------------------- // -------------------------------------------------------------------
@ -223,7 +243,7 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
for (var i = start_i; i < limit; ++i) { for (var i = start_i; i < limit; ++i) {
var current = array[i]; var current = array[i];
if (!IS_UNDEFINED(current) || i in array) { if (!IS_UNDEFINED(current) || i in array) {
%AddElement(deleted_elements, i - start_i, current, NONE); %AddElement(deleted_elements, i - start_i, current);
} }
} }
} else { } else {
@ -234,7 +254,7 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
if (key >= start_i) { if (key >= start_i) {
var current = array[key]; var current = array[key];
if (!IS_UNDEFINED(current) || key in array) { if (!IS_UNDEFINED(current) || key in array) {
%AddElement(deleted_elements, key - start_i, current, NONE); %AddElement(deleted_elements, key - start_i, current);
} }
} }
} }
@ -251,7 +271,7 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
// Move data to new array. // Move data to new array.
var new_array = new InternalArray( var new_array = new InternalArray(
// Clamp array length to 2^32-1 to avoid early RangeError. // Clamp array length to 2^32-1 to avoid early RangeError.
$min(len - del_count + num_additional_args, 0xffffffff)); MathMin(len - del_count + num_additional_args, 0xffffffff));
var big_indices; var big_indices;
var indices = %GetArrayKeys(array, len); var indices = %GetArrayKeys(array, len);
if (IS_NUMBER(indices)) { if (IS_NUMBER(indices)) {
@ -283,7 +303,7 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
if (!IS_UNDEFINED(current) || key in array) { if (!IS_UNDEFINED(current) || key in array) {
var new_key = key - del_count + num_additional_args; var new_key = key - del_count + num_additional_args;
new_array[new_key] = current; new_array[new_key] = current;
if (new_key > 0xffffffff) { if (new_key > 0xfffffffe) {
big_indices = big_indices || new InternalArray(); big_indices = big_indices || new InternalArray();
big_indices.push(new_key); big_indices.push(new_key);
} }
@ -316,7 +336,7 @@ function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
var current = array[index]; var current = array[index];
// The spec requires [[DefineOwnProperty]] here, %AddElement is close // The spec requires [[DefineOwnProperty]] here, %AddElement is close
// enough (in that it ignores the prototype). // enough (in that it ignores the prototype).
%AddElement(deleted_elements, i, current, NONE); %AddElement(deleted_elements, i, current);
} }
} }
} }
@ -372,26 +392,27 @@ function ArrayToString() {
func = array.join; func = array.join;
} }
if (!IS_SPEC_FUNCTION(func)) { if (!IS_SPEC_FUNCTION(func)) {
return %_CallFunction(array, $objectToString); return %_CallFunction(array, ObjectToString);
} }
return %_CallFunction(array, func); return %_CallFunction(array, func);
} }
function InnerArrayToLocaleString(array, length) {
var len = TO_UINT32(length);
if (len === 0) return "";
return Join(array, len, ',', ConvertToLocaleString);
}
function ArrayToLocaleString() { function ArrayToLocaleString() {
var array = $toObject(this); var array = $toObject(this);
var arrayLen = array.length; var arrayLen = array.length;
var len = TO_UINT32(arrayLen); return InnerArrayToLocaleString(array, arrayLen);
if (len === 0) return "";
return Join(array, len, ',', ConvertToLocaleString);
} }
function ArrayJoin(separator) { function InnerArrayJoin(separator, array, length) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
var array = TO_OBJECT_INLINE(this);
var length = TO_UINT32(array.length);
if (IS_UNDEFINED(separator)) { if (IS_UNDEFINED(separator)) {
separator = ','; separator = ',';
} else if (!IS_STRING(separator)) { } else if (!IS_STRING(separator)) {
@ -413,6 +434,16 @@ function ArrayJoin(separator) {
} }
function ArrayJoin(separator) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
var array = TO_OBJECT_INLINE(this);
var length = TO_UINT32(array.length);
return InnerArrayJoin(separator, array, length);
}
function ObservedArrayPop(n) { function ObservedArrayPop(n) {
n--; n--;
var value = this[n]; var value = this[n];
@ -447,7 +478,7 @@ function ArrayPop() {
n--; n--;
var value = array[n]; var value = array[n];
$delete(array, $toName(n), true); Delete(array, $toName(n), true);
array.length = n; array.length = n;
return value; return value;
} }
@ -557,18 +588,7 @@ function SparseReverse(array, len) {
} }
function ArrayReverse() { function InnerArrayReverse(array, len) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
var array = TO_OBJECT_INLINE(this);
var len = TO_UINT32(array.length);
if (UseSparseVariant(array, len, IS_ARRAY(array), len)) {
%NormalizeElements(array);
SparseReverse(array, len);
return array;
}
var j = len - 1; var j = len - 1;
for (var i = 0; i < j; i++, j--) { for (var i = 0; i < j; i++, j--) {
var current_i = array[i]; var current_i = array[i];
@ -593,6 +613,22 @@ function ArrayReverse() {
} }
function ArrayReverse() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
var array = TO_OBJECT_INLINE(this);
var len = TO_UINT32(array.length);
if (UseSparseVariant(array, len, IS_ARRAY(array), len)) {
%NormalizeElements(array);
SparseReverse(array, len);
return array;
}
return InnerArrayReverse(array, len);
}
function ObservedArrayShift(len) { function ObservedArrayShift(len) {
var first = this[0]; var first = this[0];
@ -620,7 +656,7 @@ function ArrayShift() {
return; return;
} }
if ($objectIsSealed(array)) throw MakeTypeError(kArrayFunctionsOnSealed); if (ObjectIsSealed(array)) throw MakeTypeError(kArrayFunctionsOnSealed);
if (%IsObserved(array)) if (%IsObserved(array))
return ObservedArrayShift.call(array, len); return ObservedArrayShift.call(array, len);
@ -671,7 +707,7 @@ function ArrayUnshift(arg1) { // length == 1
var num_arguments = %_ArgumentsLength(); var num_arguments = %_ArgumentsLength();
if (len > 0 && UseSparseVariant(array, len, IS_ARRAY(array), len) && if (len > 0 && UseSparseVariant(array, len, IS_ARRAY(array), len) &&
!$objectIsSealed(array)) { !ObjectIsSealed(array)) {
SparseMove(array, 0, 0, len, num_arguments); SparseMove(array, 0, 0, len, num_arguments);
} else { } else {
SimpleMove(array, 0, 0, len, num_arguments); SimpleMove(array, 0, 0, len, num_arguments);
@ -817,9 +853,9 @@ function ArraySplice(start, delete_count) {
deleted_elements.length = del_count; deleted_elements.length = del_count;
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0; var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
if (del_count != num_elements_to_add && $objectIsSealed(array)) { if (del_count != num_elements_to_add && ObjectIsSealed(array)) {
throw MakeTypeError(kArrayFunctionsOnSealed); throw MakeTypeError(kArrayFunctionsOnSealed);
} else if (del_count > 0 && $objectIsFrozen(array)) { } else if (del_count > 0 && ObjectIsFrozen(array)) {
throw MakeTypeError(kArrayFunctionsOnFrozen); throw MakeTypeError(kArrayFunctionsOnFrozen);
} }
@ -854,9 +890,7 @@ function ArraySplice(start, delete_count) {
} }
function ArraySort(comparefn) { function InnerArraySort(length, comparefn) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
// In-place QuickSort algorithm. // In-place QuickSort algorithm.
// For short (length <= 22) arrays, insertion sort is used for efficiency. // For short (length <= 22) arrays, insertion sort is used for efficiency.
@ -1101,7 +1135,6 @@ function ArraySort(comparefn) {
return first_undefined; return first_undefined;
}; };
var length = TO_UINT32(this.length);
if (length < 2) return this; if (length < 2) return this;
var is_array = IS_ARRAY(this); var is_array = IS_ARRAY(this);
@ -1140,17 +1173,19 @@ function ArraySort(comparefn) {
} }
function ArraySort(comparefn) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
var array = $toObject(this);
var length = TO_UINT32(array.length);
return %_CallFunction(array, length, comparefn, InnerArraySort);
}
// The following functions cannot be made efficient on sparse arrays while // The following functions cannot be made efficient on sparse arrays while
// preserving the semantics, since the calls to the receiver function can add // preserving the semantics, since the calls to the receiver function can add
// or delete elements from the array. // or delete elements from the array.
function ArrayFilter(f, receiver) { function InnerArrayFilter(f, receiver, array, length) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.filter");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = $toUint32(array.length);
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f); if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false; var needs_wrapper = false;
if (IS_NULL(receiver)) { if (IS_NULL(receiver)) {
@ -1159,7 +1194,6 @@ function ArrayFilter(f, receiver) {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver); needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
} }
var result = new GlobalArray();
var accumulator = new InternalArray(); var accumulator = new InternalArray();
var accumulator_length = 0; var accumulator_length = 0;
var is_array = IS_ARRAY(array); var is_array = IS_ARRAY(array);
@ -1175,19 +1209,23 @@ function ArrayFilter(f, receiver) {
} }
} }
} }
%MoveArrayContents(accumulator, result); return accumulator;
return result;
} }
function ArrayFilter(f, receiver) {
function ArrayForEach(f, receiver) { CHECK_OBJECT_COERCIBLE(this, "Array.prototype.filter");
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.forEach");
// Pull out the length so that modifications to the length in the // Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible. // loop will not affect the looping and side effects are visible.
var array = $toObject(this); var array = $toObject(this);
var length = TO_UINT32(array.length); var length = $toUint32(array.length);
var accumulator = InnerArrayFilter(f, receiver, array, length);
var result = new GlobalArray();
%MoveArrayContents(accumulator, result);
return result;
}
function InnerArrayForEach(f, receiver, array, length) {
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f); if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false; var needs_wrapper = false;
if (IS_NULL(receiver)) { if (IS_NULL(receiver)) {
@ -1209,17 +1247,18 @@ function ArrayForEach(f, receiver) {
} }
} }
function ArrayForEach(f, receiver) {
// Executes the function once for each element present in the CHECK_OBJECT_COERCIBLE(this, "Array.prototype.forEach");
// array until it finds one where callback returns true.
function ArraySome(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.some");
// Pull out the length so that modifications to the length in the // Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible. // loop will not affect the looping and side effects are visible.
var array = $toObject(this); var array = $toObject(this);
var length = TO_UINT32(array.length); var length = TO_UINT32(array.length);
InnerArrayForEach(f, receiver, array, length);
}
function InnerArraySome(f, receiver, array, length) {
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f); if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false; var needs_wrapper = false;
if (IS_NULL(receiver)) { if (IS_NULL(receiver)) {
@ -1243,14 +1282,20 @@ function ArraySome(f, receiver) {
} }
function ArrayEvery(f, receiver) { // Executes the function once for each element present in the
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.every"); // array until it finds one where callback returns true.
function ArraySome(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.some");
// Pull out the length so that modifications to the length in the // Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible. // loop will not affect the looping and side effects are visible.
var array = $toObject(this); var array = $toObject(this);
var length = TO_UINT32(array.length); var length = TO_UINT32(array.length);
return InnerArraySome(f, receiver, array, length);
}
function InnerArrayEvery(f, receiver, array, length) {
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f); if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false; var needs_wrapper = false;
if (IS_NULL(receiver)) { if (IS_NULL(receiver)) {
@ -1273,15 +1318,18 @@ function ArrayEvery(f, receiver) {
return true; return true;
} }
function ArrayEvery(f, receiver) {
function ArrayMap(f, receiver) { CHECK_OBJECT_COERCIBLE(this, "Array.prototype.every");
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.map");
// Pull out the length so that modifications to the length in the // Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible. // loop will not affect the looping and side effects are visible.
var array = $toObject(this); var array = $toObject(this);
var length = TO_UINT32(array.length); var length = TO_UINT32(array.length);
return InnerArrayEvery(f, receiver, array, length);
}
function InnerArrayMap(f, receiver, array, length) {
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f); if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false; var needs_wrapper = false;
if (IS_NULL(receiver)) { if (IS_NULL(receiver)) {
@ -1290,7 +1338,6 @@ function ArrayMap(f, receiver) {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver); needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
} }
var result = new GlobalArray();
var accumulator = new InternalArray(length); var accumulator = new InternalArray(length);
var is_array = IS_ARRAY(array); var is_array = IS_ARRAY(array);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f); var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
@ -1303,15 +1350,29 @@ function ArrayMap(f, receiver) {
accumulator[i] = %_CallFunction(new_receiver, element, i, array, f); accumulator[i] = %_CallFunction(new_receiver, element, i, array, f);
} }
} }
return accumulator;
}
function ArrayMap(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.map");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = TO_UINT32(array.length);
var accumulator = InnerArrayMap(f, receiver, array, length);
var result = new GlobalArray();
%MoveArrayContents(accumulator, result); %MoveArrayContents(accumulator, result);
return result; return result;
} }
function ArrayIndexOf(element, index) { // For .indexOf, we don't need to pass in the number of arguments
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.indexOf"); // at the callsite since ToInteger(undefined) == 0; however, for
// .lastIndexOf, we need to pass it, since the behavior for passing
var length = TO_UINT32(this.length); // undefined is 0 but for not including the argument is length-1.
function InnerArrayIndexOf(element, index, length) {
if (length == 0) return -1; if (length == 0) return -1;
if (IS_UNDEFINED(index)) { if (IS_UNDEFINED(index)) {
index = 0; index = 0;
@ -1365,12 +1426,17 @@ function ArrayIndexOf(element, index) {
} }
function ArrayLastIndexOf(element, index) { function ArrayIndexOf(element, index) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf"); CHECK_OBJECT_COERCIBLE(this, "Array.prototype.indexOf");
var length = TO_UINT32(this.length); var length = TO_UINT32(this.length);
return %_CallFunction(this, element, index, length, InnerArrayIndexOf);
}
function InnerArrayLastIndexOf(element, index, length, argumentsLength) {
if (length == 0) return -1; if (length == 0) return -1;
if (%_ArgumentsLength() < 2) { if (argumentsLength < 2) {
index = length - 1; index = length - 1;
} else { } else {
index = TO_INTEGER(index); index = TO_INTEGER(index);
@ -1418,21 +1484,23 @@ function ArrayLastIndexOf(element, index) {
} }
function ArrayReduce(callback, current) { function ArrayLastIndexOf(element, index) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduce"); CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf");
// Pull out the length so that modifications to the length in the var length = TO_UINT32(this.length);
// loop will not affect the looping and side effects are visible. return %_CallFunction(this, element, index, length,
var array = $toObject(this); %_ArgumentsLength(), InnerArrayLastIndexOf);
var length = $toUint32(array.length); }
function InnerArrayReduce(callback, current, array, length, argumentsLength) {
if (!IS_SPEC_FUNCTION(callback)) { if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError(kCalledNonCallable, callback); throw MakeTypeError(kCalledNonCallable, callback);
} }
var is_array = IS_ARRAY(array); var is_array = IS_ARRAY(array);
var i = 0; var i = 0;
find_initial: if (%_ArgumentsLength() < 2) { find_initial: if (argumentsLength < 2) {
for (; i < length; i++) { for (; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) { if (HAS_INDEX(array, i, is_array)) {
current = array[i++]; current = array[i++];
@ -1455,21 +1523,27 @@ function ArrayReduce(callback, current) {
} }
function ArrayReduceRight(callback, current) { function ArrayReduce(callback, current) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduceRight"); CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduce");
// Pull out the length so that side effects are visible before the // Pull out the length so that modifications to the length in the
// callback function is checked. // loop will not affect the looping and side effects are visible.
var array = $toObject(this); var array = $toObject(this);
var length = $toUint32(array.length); var length = $toUint32(array.length);
return InnerArrayReduce(callback, current, array, length,
%_ArgumentsLength());
}
function InnerArrayReduceRight(callback, current, array, length,
argumentsLength) {
if (!IS_SPEC_FUNCTION(callback)) { if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError(kCalledNonCallable, callback); throw MakeTypeError(kCalledNonCallable, callback);
} }
var is_array = IS_ARRAY(array); var is_array = IS_ARRAY(array);
var i = length - 1; var i = length - 1;
find_initial: if (%_ArgumentsLength() < 2) { find_initial: if (argumentsLength < 2) {
for (; i >= 0; i--) { for (; i >= 0; i--) {
if (HAS_INDEX(array, i, is_array)) { if (HAS_INDEX(array, i, is_array)) {
current = array[i--]; current = array[i--];
@ -1491,6 +1565,18 @@ function ArrayReduceRight(callback, current) {
return current; return current;
} }
function ArrayReduceRight(callback, current) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduceRight");
// Pull out the length so that side effects are visible before the
// callback function is checked.
var array = $toObject(this);
var length = $toUint32(array.length);
return InnerArrayReduceRight(callback, current, array, length,
%_ArgumentsLength());
}
// ES5, 15.4.3.2 // ES5, 15.4.3.2
function ArrayIsArray(obj) { function ArrayIsArray(obj) {
return IS_ARRAY(obj); return IS_ARRAY(obj);
@ -1519,7 +1605,7 @@ var unscopables = {
DONT_ENUM | READ_ONLY); DONT_ENUM | READ_ONLY);
// Set up non-enumerable functions on the Array object. // Set up non-enumerable functions on the Array object.
$installFunctions(GlobalArray, DONT_ENUM, [ utils.InstallFunctions(GlobalArray, DONT_ENUM, [
"isArray", ArrayIsArray "isArray", ArrayIsArray
]); ]);
@ -1540,7 +1626,7 @@ var getFunction = function(name, jsBuiltin, len) {
// set their names. // set their names.
// Manipulate the length of some of the functions to meet // Manipulate the length of some of the functions to meet
// expectations set by ECMA-262 or Mozilla. // expectations set by ECMA-262 or Mozilla.
$installFunctions(GlobalArray.prototype, DONT_ENUM, [ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"toString", getFunction("toString", ArrayToString), "toString", getFunction("toString", ArrayToString),
"toLocaleString", getFunction("toLocaleString", ArrayToLocaleString), "toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
"join", getFunction("join", ArrayJoin), "join", getFunction("join", ArrayJoin),
@ -1569,7 +1655,7 @@ $installFunctions(GlobalArray.prototype, DONT_ENUM, [
// The internal Array prototype doesn't need to be fancy, since it's never // The internal Array prototype doesn't need to be fancy, since it's never
// exposed to user code. // exposed to user code.
// Adding only the functions that are actually used. // Adding only the functions that are actually used.
$setUpLockedPrototype(InternalArray, GlobalArray(), [ utils.SetUpLockedPrototype(InternalArray, GlobalArray(), [
"concat", getFunction("concat", ArrayConcatJS), "concat", getFunction("concat", ArrayConcatJS),
"indexOf", getFunction("indexOf", ArrayIndexOf), "indexOf", getFunction("indexOf", ArrayIndexOf),
"join", getFunction("join", ArrayJoin), "join", getFunction("join", ArrayJoin),
@ -1579,15 +1665,36 @@ $setUpLockedPrototype(InternalArray, GlobalArray(), [
"splice", getFunction("splice", ArraySplice) "splice", getFunction("splice", ArraySplice)
]); ]);
$setUpLockedPrototype(InternalPackedArray, GlobalArray(), [ utils.SetUpLockedPrototype(InternalPackedArray, GlobalArray(), [
"join", getFunction("join", ArrayJoin), "join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop), "pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush), "push", getFunction("push", ArrayPush),
"shift", getFunction("shift", ArrayShift) "shift", getFunction("shift", ArrayShift)
]); ]);
// -------------------------------------------------------------------
// Exports
utils.Export(function(to) {
to.ArrayIndexOf = ArrayIndexOf;
to.ArrayJoin = ArrayJoin;
to.ArrayToString = ArrayToString;
to.InnerArrayEvery = InnerArrayEvery;
to.InnerArrayFilter = InnerArrayFilter;
to.InnerArrayForEach = InnerArrayForEach;
to.InnerArrayIndexOf = InnerArrayIndexOf;
to.InnerArrayJoin = InnerArrayJoin;
to.InnerArrayLastIndexOf = InnerArrayLastIndexOf;
to.InnerArrayMap = InnerArrayMap;
to.InnerArrayReduce = InnerArrayReduce;
to.InnerArrayReduceRight = InnerArrayReduceRight;
to.InnerArrayReverse = InnerArrayReverse;
to.InnerArraySome = InnerArraySome;
to.InnerArraySort = InnerArraySort;
to.InnerArrayToLocaleString = InnerArrayToLocaleString;
});
$arrayConcat = ArrayConcatJS; $arrayConcat = ArrayConcatJS;
$arrayJoin = ArrayJoin;
$arrayPush = ArrayPush; $arrayPush = ArrayPush;
$arrayPop = ArrayPop; $arrayPop = ArrayPop;
$arrayShift = ArrayShift; $arrayShift = ArrayShift;
@ -1595,4 +1702,4 @@ $arraySlice = ArraySlice;
$arraySplice = ArraySplice; $arraySplice = ArraySplice;
$arrayUnshift = ArrayUnshift; $arrayUnshift = ArrayUnshift;
}) });

Some files were not shown because too many files have changed in this diff Show More