deps: upgrade v8 to 4.5.92

This commit includes two fix-ups to src/node_contextify.cc and
lib/module.js to force-load the debugger when necessary.

PR-URL: https://github.com/nodejs/io.js/pull/2091
Reviewed-By: Trevor Norris <trev.norris@gmail.com>
This commit is contained in:
Ben Noordhuis 2015-07-02 16:32:19 +02:00 committed by Ali Ijaz Sheikh
parent a5745aa151
commit 41e63fb088
1238 changed files with 81934 additions and 41207 deletions

2
deps/v8/.gitignore vendored
View File

@ -60,6 +60,8 @@ shell_g
/test/promises-aplus/promises-tests
/test/promises-aplus/promises-tests.tar.gz
/test/promises-aplus/sinon
/test/simdjs/ecmascript_simd*
/test/simdjs/data*
/test/test262/data
/test/test262/data.old
/test/test262/tc39-test262-*

69
deps/v8/BUILD.gn vendored
View File

@ -52,7 +52,7 @@ config("internal_config") {
include_dirs = [ "." ]
if (component_mode == "shared_library") {
if (is_component_build) {
defines = [
"V8_SHARED",
"BUILDING_V8_SHARED",
@ -204,6 +204,7 @@ action("js2c") {
"src/macros.py",
"src/messages.h",
"src/runtime.js",
"src/prologue.js",
"src/v8natives.js",
"src/symbol.js",
"src/array.js",
@ -215,6 +216,7 @@ action("js2c") {
"src/regexp.js",
"src/arraybuffer.js",
"src/typedarray.js",
"src/iterator-prototype.js",
"src/generator.js",
"src/object-observe.js",
"src/collection.js",
@ -267,6 +269,7 @@ action("js2c_experimental") {
"src/messages.h",
"src/proxy.js",
"src/generator.js",
"src/harmony-atomics.js",
"src/harmony-array.js",
"src/harmony-array-includes.js",
"src/harmony-typedarray.js",
@ -274,7 +277,8 @@ action("js2c_experimental") {
"src/harmony-regexp.js",
"src/harmony-reflect.js",
"src/harmony-spread.js",
"src/harmony-object.js"
"src/harmony-object.js",
"src/harmony-sharedarraybuffer.js"
]
outputs = [
@ -474,9 +478,13 @@ source_set("v8_snapshot") {
":js2c",
":js2c_experimental",
":js2c_extras",
":run_mksnapshot",
":v8_base",
]
public_deps = [
# This should be public so downstream targets can declare the snapshot
# output file as their inputs.
":run_mksnapshot",
]
sources = [
"$target_gen_dir/libraries.cc",
@ -502,9 +510,11 @@ if (v8_use_external_startup_data) {
":js2c",
":js2c_experimental",
":js2c_extras",
":run_mksnapshot",
":v8_base",
]
public_deps = [
":natives_blob",
":run_mksnapshot",
]
sources = [
@ -526,6 +536,14 @@ source_set("v8_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
"include/v8-debug.h",
"include/v8-platform.h",
"include/v8-profiler.h",
"include/v8-testing.h",
"include/v8-util.h",
"include/v8-version.h",
"include/v8.h",
"include/v8config.h",
"src/accessors.cc",
"src/accessors.h",
"src/allocation.cc",
@ -544,6 +562,8 @@ source_set("v8_base") {
"src/assembler.h",
"src/assert-scope.h",
"src/assert-scope.cc",
"src/ast-literal-reindexer.cc",
"src/ast-literal-reindexer.h",
"src/ast-numbering.cc",
"src/ast-numbering.h",
"src/ast-value-factory.cc",
@ -602,6 +622,8 @@ source_set("v8_base") {
"src/compiler/basic-block-instrumentor.h",
"src/compiler/change-lowering.cc",
"src/compiler/change-lowering.h",
"src/compiler/coalesced-live-ranges.cc",
"src/compiler/coalesced-live-ranges.h",
"src/compiler/code-generator-impl.h",
"src/compiler/code-generator.cc",
"src/compiler/code-generator.h",
@ -617,8 +639,8 @@ source_set("v8_base") {
"src/compiler/control-equivalence.h",
"src/compiler/control-flow-optimizer.cc",
"src/compiler/control-flow-optimizer.h",
"src/compiler/control-reducer.cc",
"src/compiler/control-reducer.h",
"src/compiler/dead-code-elimination.cc",
"src/compiler/dead-code-elimination.h",
"src/compiler/diamond.h",
"src/compiler/frame.h",
"src/compiler/frame-elider.cc",
@ -632,10 +654,14 @@ source_set("v8_base") {
"src/compiler/graph-reducer.h",
"src/compiler/graph-replay.cc",
"src/compiler/graph-replay.h",
"src/compiler/graph-trimmer.cc",
"src/compiler/graph-trimmer.h",
"src/compiler/graph-visualizer.cc",
"src/compiler/graph-visualizer.h",
"src/compiler/graph.cc",
"src/compiler/graph.h",
"src/compiler/greedy-allocator.cc",
"src/compiler/greedy-allocator.h",
"src/compiler/instruction-codes.h",
"src/compiler/instruction-selector-impl.h",
"src/compiler/instruction-selector.cc",
@ -703,8 +729,6 @@ source_set("v8_base") {
"src/compiler/pipeline.h",
"src/compiler/pipeline-statistics.cc",
"src/compiler/pipeline-statistics.h",
"src/compiler/raw-machine-assembler.cc",
"src/compiler/raw-machine-assembler.h",
"src/compiler/register-allocator.cc",
"src/compiler/register-allocator.h",
"src/compiler/register-allocator-verifier.cc",
@ -774,6 +798,7 @@ source_set("v8_base") {
"src/elements.h",
"src/execution.cc",
"src/execution.h",
"src/expression-classifier.h",
"src/extensions/externalize-string-extension.cc",
"src/extensions/externalize-string-extension.h",
"src/extensions/free-buffer-extension.cc",
@ -958,12 +983,11 @@ source_set("v8_base") {
"src/optimizing-compile-dispatcher.h",
"src/ostreams.cc",
"src/ostreams.h",
"src/pattern-rewriter.cc",
"src/parser.cc",
"src/parser.h",
"src/pending-compilation-error-handler.cc",
"src/pending-compilation-error-handler.h",
"src/perf-jit.cc",
"src/perf-jit.h",
"src/preparse-data-format.h",
"src/preparse-data.cc",
"src/preparse-data.h",
@ -992,11 +1016,13 @@ source_set("v8_base") {
"src/runtime-profiler.cc",
"src/runtime-profiler.h",
"src/runtime/runtime-array.cc",
"src/runtime/runtime-atomics.cc",
"src/runtime/runtime-classes.cc",
"src/runtime/runtime-collections.cc",
"src/runtime/runtime-compiler.cc",
"src/runtime/runtime-date.cc",
"src/runtime/runtime-debug.cc",
"src/runtime/runtime-forin.cc",
"src/runtime/runtime-function.cc",
"src/runtime/runtime-generator.cc",
"src/runtime/runtime-i18n.cc",
@ -1032,6 +1058,7 @@ source_set("v8_base") {
"src/scopes.cc",
"src/scopes.h",
"src/signature.h",
"src/simulator.h",
"src/small-pointer-list.h",
"src/smart-pointers.h",
"src/snapshot/natives.h",
@ -1040,6 +1067,8 @@ source_set("v8_base") {
"src/snapshot/snapshot-common.cc",
"src/snapshot/snapshot-source-sink.cc",
"src/snapshot/snapshot-source-sink.h",
"src/splay-tree.h",
"src/splay-tree-inl.h",
"src/snapshot/snapshot.h",
"src/string-builder.cc",
"src/string-builder.h",
@ -1089,6 +1118,8 @@ source_set("v8_base") {
"src/vm-state.h",
"src/zone.cc",
"src/zone.h",
"src/zone-allocator.h",
"src/zone-containers.h",
"src/third_party/fdlibm/fdlibm.cc",
"src/third_party/fdlibm/fdlibm.h",
]
@ -1201,6 +1232,7 @@ source_set("v8_base") {
"src/arm/regexp-macro-assembler-arm.cc",
"src/arm/regexp-macro-assembler-arm.h",
"src/arm/simulator-arm.cc",
"src/arm/simulator-arm.h",
"src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h",
"src/compiler/arm/instruction-selector-arm.cc",
@ -1295,6 +1327,7 @@ source_set("v8_base") {
"src/mips/regexp-macro-assembler-mips.cc",
"src/mips/regexp-macro-assembler-mips.h",
"src/mips/simulator-mips.cc",
"src/mips/simulator-mips.h",
"src/compiler/mips/code-generator-mips.cc",
"src/compiler/mips/instruction-codes-mips.h",
"src/compiler/mips/instruction-selector-mips.cc",
@ -1336,6 +1369,7 @@ source_set("v8_base") {
"src/mips64/regexp-macro-assembler-mips64.cc",
"src/mips64/regexp-macro-assembler-mips64.h",
"src/mips64/simulator-mips64.cc",
"src/mips64/simulator-mips64.h",
"src/ic/mips64/access-compiler-mips64.cc",
"src/ic/mips64/handler-compiler-mips64.cc",
"src/ic/mips64/ic-mips64.cc",
@ -1399,6 +1433,8 @@ source_set("v8_libbase") {
"src/base/atomicops_internals_atomicword_compat.h",
"src/base/atomicops_internals_mac.h",
"src/base/atomicops_internals_mips_gcc.h",
"src/base/atomicops_internals_mips64_gcc.h",
"src/base/atomicops_internals_portable.h",
"src/base/atomicops_internals_tsan.h",
"src/base/atomicops_internals_x86_gcc.cc",
"src/base/atomicops_internals_x86_gcc.h",
@ -1558,7 +1594,7 @@ if (current_toolchain == snapshot_toolchain) {
# Public targets
#
if (component_mode == "shared_library") {
if (is_component_build) {
component("v8") {
sources = [
"src/v8dll-main.cc",
@ -1567,11 +1603,17 @@ if (component_mode == "shared_library") {
if (v8_use_snapshot && v8_use_external_startup_data) {
deps = [
":v8_base",
]
public_deps = [
":v8_external_snapshot",
]
} else if (v8_use_snapshot) {
deps = [
":v8_base",
]
# v8_snapshot should be public so downstream targets can declare the
# snapshot file as their input.
public_deps = [
":v8_snapshot",
]
} else {
@ -1607,6 +1649,8 @@ if (component_mode == "shared_library") {
} else if (v8_use_snapshot) {
deps = [
":v8_base",
]
public_deps = [
":v8_snapshot",
]
} else {
@ -1657,9 +1701,10 @@ if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
sources += [ "src/d8-windows.cc" ]
}
if (component_mode != "shared_library") {
if (!is_component_build) {
sources += [
"src/d8-debug.cc",
"src/d8-debug.h",
"$target_gen_dir/d8-js.cc",
]
}

612
deps/v8/ChangeLog vendored
View File

@ -1,3 +1,615 @@
2015-07-02: Version 4.5.92
Performance and stability improvements on all platforms.
2015-07-01: Version 4.5.91
Performance and stability improvements on all platforms.
2015-07-01: Version 4.5.90
Performance and stability improvements on all platforms.
2015-07-01: Version 4.5.89
Performance and stability improvements on all platforms.
2015-06-30: Version 4.5.88
Performance and stability improvements on all platforms.
2015-06-30: Version 4.5.87
Performance and stability improvements on all platforms.
2015-06-30: Version 4.5.86
Ensure mjsunit tests use dashes not underscores in flags directives
(Chromium issue 505228).
Performance and stability improvements on all platforms.
2015-06-29: Version 4.5.85
Fix flag convention in handle count tests and comment (Chromium issue
505228).
Performance and stability improvements on all platforms.
2015-06-29: Version 4.5.84
Performance and stability improvements on all platforms.
2015-06-27: Version 4.5.83
Performance and stability improvements on all platforms.
2015-06-26: Version 4.5.82
Performance and stability improvements on all platforms.
2015-06-26: Version 4.5.81
Remove obsolete options in ScriptCompiler::CompileOptions (Chromium
issue 399580).
Performance and stability improvements on all platforms.
2015-06-25: Version 4.5.80
Performance and stability improvements on all platforms.
2015-06-25: Version 4.5.79
Performance and stability improvements on all platforms.
2015-06-25: Version 4.5.78
Serializer: clear next link in weak cells (Chromium issue 503552).
Performance and stability improvements on all platforms.
2015-06-24: Version 4.5.77
Performance and stability improvements on all platforms.
2015-06-24: Version 4.5.76
Performance and stability improvements on all platforms.
2015-06-24: Version 4.5.75
Date() should not depend on Date.prototype.toString (issue 4225).
Performance and stability improvements on all platforms.
2015-06-23: Version 4.5.74
Expose Map/Set methods through the API (issue 3340).
[turbofan] NaN is never truish (issue 4207).
Performance and stability improvements on all platforms.
2015-06-23: Version 4.5.73
Re-ship Harmony Array/TypedArray methods (issue 3578).
Performance and stability improvements on all platforms.
2015-06-23: Version 4.5.72
Performance and stability improvements on all platforms.
2015-06-23: Version 4.5.71
Performance and stability improvements on all platforms.
2015-06-20: Version 4.5.70
Ship Harmony Array/TypedArray methods (issue 3578).
Performance and stability improvements on all platforms.
2015-06-20: Version 4.5.69
Ship arrow functions (issue 2700).
Performance and stability improvements on all platforms.
2015-06-19: Version 4.5.68
Performance and stability improvements on all platforms.
2015-06-19: Version 4.5.67
Performance and stability improvements on all platforms.
2015-06-19: Version 4.5.66
Ship arrow functions (issue 2700).
Performance and stability improvements on all platforms.
2015-06-18: Version 4.5.65
Performance and stability improvements on all platforms.
2015-06-18: Version 4.5.64
Performance and stability improvements on all platforms.
2015-06-18: Version 4.5.63
Performance and stability improvements on all platforms.
2015-06-17: Version 4.5.62
Hydrogen object literals: always initialize in-object properties
(Chromium issue 500497).
Performance and stability improvements on all platforms.
2015-06-17: Version 4.5.61
Add %TypedArray% to proto chain (issue 4085).
Performance and stability improvements on all platforms.
2015-06-17: Version 4.5.60
Performance and stability improvements on all platforms.
2015-06-17: Version 4.5.59
[crankshaft] Fix wrong bailout points in for-in loop body (Chromium
issue 500435).
Performance and stability improvements on all platforms.
2015-06-16: Version 4.5.58
Performance and stability improvements on all platforms.
2015-06-16: Version 4.5.57
Inline code generation for %_IsTypedArray (issue 4085).
Allow TypedArrays to be initialized with iterables (issue 4090).
Performance and stability improvements on all platforms.
2015-06-15: Version 4.5.56
Performance and stability improvements on all platforms.
2015-06-15: Version 4.5.55
Performance and stability improvements on all platforms.
2015-06-14: Version 4.5.54
Performance and stability improvements on all platforms.
2015-06-13: Version 4.5.53
Performance and stability improvements on all platforms.
2015-06-12: Version 4.5.52
Map::TryUpdate() must be in sync with Map::Update() (issue 4173).
Add ToObject call in Array.prototype.sort (issue 4125).
In Array.of and Array.from, fall back to DefineOwnProperty (issue 4168).
Performance and stability improvements on all platforms.
2015-06-12: Version 4.5.51
Performance and stability improvements on all platforms.
2015-06-11: Version 4.5.50
Performance and stability improvements on all platforms.
2015-06-11: Version 4.5.49
Performance and stability improvements on all platforms.
2015-06-11: Version 4.5.48
Support rest parameters in arrow functions (issue 2700).
Performance and stability improvements on all platforms.
2015-06-10: Version 4.5.47
Implement %TypedArray%.prototype.slice (issue 3578).
Performance and stability improvements on all platforms.
2015-06-09: Version 4.5.46
Stage ES6 arrow functions (issue 2700).
Performance and stability improvements on all platforms.
2015-06-09: Version 4.5.45
Performance and stability improvements on all platforms.
2015-06-09: Version 4.5.44
Performance and stability improvements on all platforms.
2015-06-08: Version 4.5.43
[for-in] Make ForInNext and ForInFilter deal properly with exceptions
(Chromium issue 496331).
Performance and stability improvements on all platforms.
2015-06-08: Version 4.5.42
Performance and stability improvements on all platforms.
2015-06-06: Version 4.5.41
Performance and stability improvements on all platforms.
2015-06-05: Version 4.5.40
Performance and stability improvements on all platforms.
2015-06-05: Version 4.5.39
Stage ES6 Array and TypedArray methods (issue 3578).
Performance and stability improvements on all platforms.
2015-06-05: Version 4.5.38
Implement %TypedArray%.prototype.{reduce,reduceRight} (issue 3578).
Add support for Embedded Constant Pools for PPC and Arm (Chromium issue
478811).
Performance and stability improvements on all platforms.
2015-06-04: Version 4.5.37
Performance and stability improvements on all platforms.
2015-06-04: Version 4.5.36
Performance and stability improvements on all platforms.
2015-06-04: Version 4.5.35
Flatten the Arrays returned and consumed by the v8::Map API (Chromium
issue 478263).
Performance and stability improvements on all platforms.
2015-06-03: Version 4.5.34
Also allocate small typed arrays on heap when initialized from an array-
like (issue 3996).
Implement %TypedArray%.prototype.{reduce,reduceRight} (issue 3578).
Performance and stability improvements on all platforms.
2015-06-03: Version 4.5.33
Add support for Embedded Constant Pools for PPC and Arm (Chromium issue
478811).
Implement %TypedArray%.prototype.{toString,toLocaleString,join} (issue
3578).
Performance and stability improvements on all platforms.
2015-06-03: Version 4.5.32
Performance and stability improvements on all platforms.
2015-06-02: Version 4.5.31
Performance and stability improvements on all platforms.
2015-06-02: Version 4.5.30
Performance and stability improvements on all platforms.
2015-06-01: Version 4.5.29
Reland "Re-enable on-heap typed array allocation" (issue 3996).
Performance and stability improvements on all platforms.
2015-06-01: Version 4.5.28
Re-enable on-heap typed array allocation (issue 3996).
Also expose DefineOwnProperty (Chromium issue 475206).
Performance and stability improvements on all platforms.
2015-06-01: Version 4.5.27
Performance and stability improvements on all platforms.
2015-05-31: Version 4.5.26
Performance and stability improvements on all platforms.
2015-05-30: Version 4.5.25
Performance and stability improvements on all platforms.
2015-05-29: Version 4.5.24
Debugger: consider try-finally scopes not catching wrt debug events
(Chromium issue 492522).
Performance and stability improvements on all platforms.
2015-05-29: Version 4.5.23
Performance and stability improvements on all platforms.
2015-05-29: Version 4.5.22
Do not eagerly convert exception to string when creating a message
object (Chromium issue 490680).
Performance and stability improvements on all platforms.
2015-05-28: Version 4.5.21
Performance and stability improvements on all platforms.
2015-05-28: Version 4.5.20
Introduce v8::Object::CreateDataProperty (Chromium issue 475206).
Performance and stability improvements on all platforms.
2015-05-27: Version 4.5.19
Performance and stability improvements on all platforms.
2015-05-27: Version 4.5.18
Add {Map,Set}::FromArray to the API (issue 3340).
Add {Map,Set}::AsArray to the API (issue 3340).
Add basic API support for Map & Set (issue 3340).
Performance and stability improvements on all platforms.
2015-05-26: Version 4.5.17
Correctly hook up materialized receiver into the evaluation context
chain (Chromium issue 491943).
Implement bookmarks for ExternalStreamingStream (Chromium issue 470930).
Performance and stability improvements on all platforms.
2015-05-26: Version 4.5.16
Performance and stability improvements on all platforms.
2015-05-26: Version 4.5.15
Performance and stability improvements on all platforms.
2015-05-23: Version 4.5.14
Performance and stability improvements on all platforms.
2015-05-22: Version 4.5.13
Remove v8::Private.
Performance and stability improvements on all platforms.
2015-05-22: Version 4.5.12
Performance and stability improvements on all platforms.
2015-05-22: Version 4.5.11
Performance and stability improvements on all platforms.
2015-05-21: Version 4.5.10
Re-land %TypedArray%.prototype.{map,filter,some} (issue 3578).
Performance and stability improvements on all platforms.
2015-05-21: Version 4.5.9
Performance and stability improvements on all platforms.
2015-05-20: Version 4.5.8
Performance and stability improvements on all platforms.
2015-05-20: Version 4.5.7
Implement %TypedArray%.{lastI,i}ndexOf (issue 3578).
Implement %TypedArray%.prototype.sort (issue 3578).
Implement %TypedArray%.reverse (issue 3578).
Implement %TypedArray%.prototype.{map,filter,some,reduce,reduceRight}
(issue 3578).
Fix has_pending_exception logic in API's Array::CloneElementAt (issue
4103).
Adding api to get last gc object statistics for chrome://tracing
(Chromium issue 476013).
Fix harmless HGraph verification failure after hoisting inlined bounds
checks (Chromium issue 487608).
Performance and stability improvements on all platforms.
2015-05-20: Version 4.5.6
Add TypedArray.from method (issue 3578).
Performance and stability improvements on all platforms.
2015-05-19: Version 4.5.5
ARM64: Propagate notification about aborted compilation from
RegExpEngine to MacroAssembler (Chromium issue 489290).
Performance and stability improvements on all platforms.
2015-05-18: Version 4.5.4
Performance and stability improvements on all platforms.
2015-05-18: Version 4.5.3
Performance and stability improvements on all platforms.
2015-05-17: Version 4.5.2
Performance and stability improvements on all platforms.
2015-05-16: Version 4.5.1
Test that TypedArray methods don't read length (issue 3578).
Implement %TypedArray%.{fill,find,findIndex} (issue 3578).
TypedArray.prototype.copyWithin method (issue 3578).
Provide accessor for object internal properties that doesn't require
debugger to be active (Chromium issue 481845).
Don't create debug context if debug listener is not set (Chromium issue
482290).
Performance and stability improvements on all platforms.
2015-05-13: Version 4.4.65
Deprecate Isolate::New.
Factor out core of Array.forEach and .every, for use in TypedArrays
(issue 3578).
Performance and stability improvements on all platforms.
2015-05-12: Version 4.4.64
Performance and stability improvements on all platforms.
2015-05-11: Version 4.4.63
Let Runtime_GrowArrayElements accept non-Smi numbers as |key| (Chromium

12
deps/v8/DEPS vendored
View File

@ -8,23 +8,23 @@ vars = {
deps = {
"v8/build/gyp":
Var("git_url") + "/external/gyp.git" + "@" + "0bb67471bca068996e15b56738fa4824dfa19de0",
Var("git_url") + "/external/gyp.git" + "@" + "5122240c5e5c4d8da12c543d82b03d6089eb77c5",
"v8/third_party/icu":
Var("git_url") + "/chromium/deps/icu.git" + "@" + "f8c0e585b0a046d83d72b5d37356cb50d5b2031a",
Var("git_url") + "/chromium/deps/icu.git" + "@" + "1b697da5c2c0112e2b70e7e75d3e3d985f464a8f",
"v8/buildtools":
Var("git_url") + "/chromium/buildtools.git" + "@" + "b0ede9c89f9d5fbe5387d961ad4c0ec665b6c821",
Var("git_url") + "/chromium/buildtools.git" + "@" + "ecc8e253abac3b6186a97573871a084f4c0ca3ae",
"v8/testing/gtest":
Var("git_url") + "/external/googletest.git" + "@" + "be1868139ffe0ccd0e8e3b37292b84c821d9c8ad",
Var("git_url") + "/external/googletest.git" + "@" + "23574bf2333f834ff665f894c97bef8a5b33a0a9",
"v8/testing/gmock":
Var("git_url") + "/external/googlemock.git" + "@" + "29763965ab52f24565299976b936d1265cb6a271", # from svn revision 501
"v8/tools/clang":
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "5bab78c6ced45a71a8e095a09697ca80492e57e1",
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "73ec8804ed395b0886d6edf82a9f33583f4a7902",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
Var("git_url") + "/android_tools.git" + "@" + "4f723e2a5fa5b7b8a198072ac19b92344be2b271",
Var("git_url") + "/android_tools.git" + "@" + "21f4bcbd6cd927e4b4227cfde7d5f13486be1236",
},
"win": {
"v8/third_party/cygwin":

7
deps/v8/LICENSE vendored
View File

@ -3,12 +3,12 @@ maintained libraries. The externally maintained libraries used by V8
are:
- PCRE test suite, located in
test/mjsunit/third_party/regexp-pcre.js. This is based on the
test/mjsunit/third_party/regexp-pcre/regexp-pcre.js. This is based on the
test suite from PCRE-7.3, which is copyrighted by the University
of Cambridge and Google, Inc. The copyright notice and license
are embedded in regexp-pcre.js.
- Layout tests, located in test/mjsunit/third_party. These are
- Layout tests, located in test/mjsunit/third_party/object-keys. These are
based on layout tests from webkit.org which are copyrighted by
Apple Computer, Inc. and released under a 3-clause BSD license.
@ -26,6 +26,9 @@ are:
These libraries have their own licenses; we recommend you read them,
as their terms may differ from the terms below.
Further license information can be found in LICENSE files located in
sub-directories.
Copyright 2014, the V8 project authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are

21
deps/v8/Makefile vendored
View File

@ -31,9 +31,7 @@ OUTDIR ?= out
TESTJOBS ?=
GYPFLAGS ?=
TESTFLAGS ?=
ANDROID_NDK_ROOT ?=
ANDROID_NDK_HOST_ARCH ?=
ANDROID_TOOLCHAIN ?=
ANDROID_V8 ?= /data/local/tmp/v8
NACL_SDK_ROOT ?=
@ -145,10 +143,14 @@ ifeq ($(i18nsupport), off)
GYPFLAGS += -Dv8_enable_i18n_support=0
TESTFLAGS += --noi18n
endif
# deprecation_warnings=on
# deprecationwarnings=on
ifeq ($(deprecationwarnings), on)
GYPFLAGS += -Dv8_deprecation_warnings=1
endif
# imminentdeprecationwarnings=on
ifeq ($(imminentdeprecationwarnings), on)
GYPFLAGS += -Dv8_imminent_deprecation_warnings=1
endif
# asan=on
ifeq ($(asan), on)
GYPFLAGS += -Dasan=1 -Dclang=1
@ -246,7 +248,7 @@ NACL_ARCHES = nacl_ia32 nacl_x64
GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \
build/shim_headers.gypi build/features.gypi build/standalone.gypi \
build/toolchain.gypi build/all.gyp build/mac/asan.gyp \
build/android.gypi test/cctest/cctest.gyp \
test/cctest/cctest.gyp \
test/unittests/unittests.gyp tools/gyp/v8.gyp \
tools/parser-shell.gyp testing/gmock.gyp testing/gtest.gyp \
buildtools/third_party/libc++abi/libc++abi.gyp \
@ -277,7 +279,6 @@ ENVFILE = $(OUTDIR)/environment
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
$(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN \
$(NACL_ARCHES) $(NACL_BUILDS) $(NACL_CHECKS) \
must-set-NACL_SDK_ROOT
@ -311,8 +312,7 @@ native: $(OUTDIR)/Makefile.native
$(ANDROID_ARCHES): $(addprefix $$@.,$(MODES))
$(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) build/android.gypi \
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN Makefile.android
$(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) Makefile.android
@$(MAKE) -f Makefile.android $@ \
ARCH="$(basename $@)" \
MODE="$(subst .,,$(suffix $@))" \
@ -448,13 +448,6 @@ $(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN:
ifndef ANDROID_NDK_ROOT
ifndef ANDROID_TOOLCHAIN
$(error ANDROID_NDK_ROOT or ANDROID_TOOLCHAIN must be set))
endif
endif
# Note that NACL_SDK_ROOT must be set to point to an appropriate
# Native Client SDK before using this makefile. You can download
# an SDK here:

View File

@ -35,75 +35,28 @@ MODES = release debug
ANDROID_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(ANDROID_ARCHES)))
HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
ANDROID_NDK_HOST_ARCH ?= $(shell uname -m | sed -e 's/i[3456]86/x86/')
ifeq ($(HOST_OS), linux)
TOOLCHAIN_DIR = linux-$(ANDROID_NDK_HOST_ARCH)
else ifeq ($(HOST_OS), mac)
TOOLCHAIN_DIR = darwin-$(ANDROID_NDK_HOST_ARCH)
else
$(error Host platform "${HOST_OS}" is not supported)
endif
ifeq ($(ARCH), android_arm)
DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm android_target_platform=14
DEFINES += arm_neon=0 arm_version=7
TOOLCHAIN_ARCH = arm-linux-androideabi
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8
DEFINES = target_arch=arm v8_target_arch=arm
else ifeq ($(ARCH), android_arm64)
DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 android_target_platform=21
TOOLCHAIN_ARCH = aarch64-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.9
DEFINES = target_arch=arm64 v8_target_arch=arm64
else ifeq ($(ARCH), android_mipsel)
DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_platform=14
DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
TOOLCHAIN_ARCH = mipsel-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8
DEFINES = target_arch=mipsel v8_target_arch=mipsel
else ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.8
DEFINES = target_arch=ia32 v8_target_arch=ia32
else ifeq ($(ARCH), android_x64)
DEFINES = target_arch=x64 v8_target_arch=x64 android_target_arch=x86_64 android_target_platform=21
TOOLCHAIN_ARCH = x86_64
TOOLCHAIN_PREFIX = x86_64-linux-android
TOOLCHAIN_VER = 4.9
DEFINES = target_arch=x64 v8_target_arch=x64
else ifeq ($(ARCH), android_x87)
DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.8
DEFINES = target_arch=ia32 v8_target_arch=x87
else
$(error Target architecture "${ARCH}" is not supported)
endif
TOOLCHAIN_PATH = \
${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}-${TOOLCHAIN_VER}/prebuilt
ANDROID_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
ifeq ($(wildcard $(ANDROID_TOOLCHAIN)),)
$(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}". Please \
check that ANDROID_NDK_ROOT and ANDROID_NDK_HOST_ARCH are set \
correctly)
endif
# For mksnapshot host generation.
DEFINES += host_os=${HOST_OS}
# Common flags.
DEFINES += OS=android
.SECONDEXPANSION:
$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
CXX="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
AR="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ar" \
RANLIB="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ranlib" \
CC="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-gcc" \
LD="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ld" \
LINK="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
@ -113,9 +66,7 @@ ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS))
$(ANDROID_MAKEFILES):
GYP_GENERATORS=make-android \
GYP_DEFINES="${DEFINES}" \
CC="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-gcc" \
CXX="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-g++" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-Ibuild/standalone.gypi --depth=. \
-S$(suffix $(basename $@))$(suffix $@) ${GYPFLAGS}

View File

@ -1,266 +0,0 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Definitions for building standalone V8 binaries to run on Android.
# This is mostly excerpted from:
# http://src.chromium.org/viewvc/chrome/trunk/src/build/common.gypi
{
'variables': {
# Location of Android NDK.
'variables': {
'android_ndk_root%': '<!(/bin/echo -n $ANDROID_NDK_ROOT)',
'android_toolchain%': '<!(/bin/echo -n $ANDROID_TOOLCHAIN)',
},
'conditions': [
['android_ndk_root==""', {
'variables': {
'android_sysroot': '<(android_toolchain)/sysroot/',
'android_stlport': '<(android_toolchain)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
}, {
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}, {
'variables': {
'android_sysroot': '<(android_ndk_root)/platforms/android-<(android_target_platform)/arch-<(android_target_arch)',
'android_stlport': '<(android_ndk_root)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
}, {
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}],
],
'android_stlport_library': 'stlport_static',
}, # variables
'target_defaults': {
'defines': [
'ANDROID',
'V8_ANDROID_LOG_STDOUT',
],
'configurations': {
'Release': {
'cflags': [
'-fomit-frame-pointer',
],
}, # Release
}, # configurations
'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter'],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions',
# Note: Using -std=c++0x will define __STRICT_ANSI__, which
# in turn will leave out some template stuff for 'long
# long'. What we want is -std=c++11, but this is not
# supported by GCC 4.6 or Xcode 4.2
'-std=gnu++0x' ],
'target_conditions': [
['_toolset=="target"', {
'cflags!': [
'-pthread', # Not supported by Android toolchain.
],
'cflags': [
'-ffunction-sections',
'-funwind-tables',
'-fstack-protector',
'-fno-short-enums',
'-finline-limit=64',
'-Wa,--noexecstack',
# Note: This include is in cflags to ensure that it comes after
# all of the includes.
'-I<(android_include)',
'-I<(android_stlport_include)',
],
'cflags_cc': [
'-Wno-error=non-virtual-dtor', # TODO(michaelbai): Fix warnings.
],
'defines': [
'ANDROID',
#'__GNU_SOURCE=1', # Necessary for clone()
'USE_STLPORT=1',
'_STLP_USE_PTR_SPECIALIZATIONS=1',
'HAVE_OFF64_T',
'HAVE_SYS_UIO_H',
'ANDROID_BINSIZE_HACK', # Enable temporary hacks to reduce binsize.
],
'ldflags!': [
'-pthread', # Not supported by Android toolchain.
],
'ldflags': [
'-nostdlib',
'-Wl,--no-undefined',
'-Wl,-rpath-link=<(android_lib)',
'-L<(android_lib)',
],
'libraries!': [
'-lrt', # librt is built into Bionic.
# Not supported by Android toolchain.
# Where do these come from? Can't find references in
# any Chromium gyp or gypi file. Maybe they come from
# gyp itself?
'-lpthread', '-lnss3', '-lnssutil3', '-lsmime3', '-lplds4', '-lplc4', '-lnspr4',
],
'libraries': [
'-l<(android_stlport_library)',
# Manually link the libgcc.a that the cross compiler uses.
'<!($CC -print-libgcc-file-name)',
'-lc',
'-ldl',
'-lstdc++',
'-lm',
],
'conditions': [
['target_arch == "arm"', {
'ldflags': [
# Enable identical code folding to reduce size.
'-Wl,--icf=safe',
],
}],
['target_arch=="arm" and arm_version==7', {
'cflags': [
'-march=armv7-a',
'-mtune=cortex-a8',
'-mfpu=vfp3',
],
'ldflags': [
'-L<(android_stlport_libs)/armeabi-v7a',
],
}],
['target_arch=="arm" and arm_version < 7', {
'ldflags': [
'-L<(android_stlport_libs)/armeabi',
],
}],
['target_arch=="x64"', {
'ldflags': [
'-L<(android_stlport_libs)/x86_64',
],
}],
['target_arch=="arm64"', {
'ldflags': [
'-L<(android_stlport_libs)/arm64-v8a',
],
}],
['target_arch=="ia32" or target_arch=="x87"', {
# The x86 toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
],
'cflags': [
'-fno-stack-protector',
],
'ldflags': [
'-L<(android_stlport_libs)/x86',
],
}],
['target_arch=="mipsel"', {
# The mips toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
'-U__linux__'
],
'cflags': [
'-fno-stack-protector',
],
'ldflags': [
'-L<(android_stlport_libs)/mips',
],
}],
['(target_arch=="arm" or target_arch=="arm64" or target_arch=="x64") and component!="shared_library"', {
'cflags': [
'-fPIE',
],
'ldflags': [
'-pie',
],
}],
],
'target_conditions': [
['_type=="executable"', {
'conditions': [
['target_arch=="arm64" or target_arch=="x64"', {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker64',
],
}, {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker',
],
}]
],
'ldflags': [
'-Bdynamic',
'-Wl,-z,nocopyreloc',
# crtbegin_dynamic.o should be the last item in ldflags.
'<(android_lib)/crtbegin_dynamic.o',
],
'libraries': [
# crtend_android.o needs to be the last item in libraries.
# Do not add any libraries after this!
'<(android_lib)/crtend_android.o',
],
}],
['_type=="shared_library"', {
'ldflags': [
'-Wl,-shared,-Bsymbolic',
'<(android_lib)/crtbegin_so.o',
],
}],
['_type=="static_library"', {
'ldflags': [
# Don't export symbols from statically linked libraries.
'-Wl,--exclude-libs=ALL',
],
}],
],
}], # _toolset=="target"
# Settings for building host targets using the system toolchain.
['_toolset=="host"', {
'cflags': [ '-pthread' ],
'ldflags': [ '-pthread' ],
'ldflags!': [
'-Wl,-z,noexecstack',
'-Wl,--gc-sections',
'-Wl,-O1',
'-Wl,--as-needed',
],
}],
], # target_conditions
}, # target_defaults
}

View File

@ -59,6 +59,9 @@
# Enable compiler warnings when using V8_DEPRECATED apis.
'v8_deprecation_warnings%': 0,
# Enable compiler warnings when using V8_DEPRECATE_SOON apis.
'v8_imminent_deprecation_warnings%': 0,
# Set to 1 to enable DCHECKs in release builds.
'dcheck_always_on%': 0,
},
@ -88,6 +91,9 @@
['v8_deprecation_warnings==1', {
'defines': ['V8_DEPRECATION_WARNINGS',],
}],
['v8_imminent_deprecation_warnings==1', {
'defines': ['V8_IMMINENT_DEPRECATION_WARNINGS',],
}],
['v8_enable_i18n_support==1', {
'defines': ['V8_I18N_SUPPORT',],
}],
@ -112,7 +118,7 @@
}, # Debug
'Release': {
'variables': {
'v8_enable_handle_zapping%': 0,
'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_handle_zapping==1', {

View File

@ -21,6 +21,8 @@ def main():
print 'Revert activation of MSVS 2013.'
print 'Activating MSVS 2013 again.'
print 'Clobber after ICU roll.'
print 'Moar clobbering...'
print 'Remove build/android.gypi'
return 0

11
deps/v8/build/gyp_v8 vendored
View File

@ -130,7 +130,7 @@ if __name__ == '__main__':
# Generate for the architectures supported on the given platform.
gyp_args = list(args)
gyp_generators = os.environ.get('GYP_GENERATORS')
gyp_generators = os.environ.get('GYP_GENERATORS', '')
if platform.system() == 'Linux' and gyp_generators != 'ninja':
# Work around for crbug.com/331475.
for f in glob.glob(os.path.join(v8_root, 'out', 'Makefile.*')):
@ -140,4 +140,13 @@ if __name__ == '__main__':
# -Goutput_dir defines where the build output goes, relative to the
# Makefile. Set it to . so that the build output doesn't end up in out/out.
gyp_args.append('-Goutput_dir=.')
gyp_defines = os.environ.get('GYP_DEFINES', '')
# Automatically turn on crosscompile support for platforms that need it.
if all(('ninja' in gyp_generators,
'OS=android' in gyp_defines,
'GYP_CROSSCOMPILE' not in os.environ)):
os.environ['GYP_CROSSCOMPILE'] = '1'
run_gyp(gyp_args)

View File

@ -33,16 +33,17 @@
'includes': ['toolchain.gypi'],
'variables': {
'component%': 'static_library',
'clang_dir%': 'third_party/llvm-build/Release+Asserts',
'clang_xcode%': 0,
# Track where uninitialized memory originates from. From fastest to
# slowest: 0 - no tracking, 1 - track only the initial allocation site, 2
# - track the chain of stores leading from allocation site to use site.
'msan_track_origins%': 1,
'msan_track_origins%': 2,
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 1,
'v8_deprecation_warnings': 1,
# TODO(jochen): Turn this on.
'v8_imminent_deprecation_warnings%': 0,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'release_extra_cflags%': '',
@ -66,7 +67,9 @@
},
'host_arch%': '<(host_arch)',
'target_arch%': '<(host_arch)',
'base_dir%': '<!(cd <(DEPTH) && python -c "import os; print os.getcwd()")',
},
'base_dir%': '<(base_dir)',
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(target_arch)',
@ -74,6 +77,16 @@
'lsan%': 0,
'msan%': 0,
'tsan%': 0,
# Enable coverage gathering instrumentation in sanitizer tools. This flag
# also controls coverage granularity (1 for function-level, 2 for
# block-level, 3 for edge-level).
'sanitizer_coverage%': 0,
# Use libc++ (buildtools/third_party/libc++ and
# buildtools/third_party/libc++abi) instead of stdlibc++ as standard
# library. This is intended to be used for instrumented builds.
'use_custom_libcxx%': 0,
'clang_dir%': '<(base_dir)/third_party/llvm-build/Release+Asserts',
# goma settings.
# 1 to use goma.
@ -87,9 +100,17 @@
}, {
'gomadir': '<!(/bin/echo -n ${HOME}/goma)',
}],
['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le"', {
'host_clang%': '1',
}, {
'host_clang%': '0',
}],
],
},
'base_dir%': '<(base_dir)',
'clang_dir%': '<(clang_dir)',
'host_arch%': '<(host_arch)',
'host_clang%': '<(host_clang)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(v8_target_arch)',
'werror%': '-Werror',
@ -99,6 +120,11 @@
'lsan%': '<(lsan)',
'msan%': '<(msan)',
'tsan%': '<(tsan)',
'sanitizer_coverage%': '<(sanitizer_coverage)',
'use_custom_libcxx%': '<(use_custom_libcxx)',
# Add a simple extra solely for the purpose of the cctests
'v8_extra_library_files': ['../test/cctest/test-extra.js'],
# .gyp files or targets should set v8_code to 1 if they build V8 specific
# code, as opposed to external code. This variable is used to control such
@ -160,20 +186,132 @@
'v8_enable_gdbjit%': 0,
}],
['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \
(v8_target_arch!="x87")', {
(v8_target_arch!="x87" and v8_target_arch!="x32")', {
'clang%': 1,
}, {
'clang%': 0,
}],
['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le"', {
'host_clang%': '1',
}, {
'host_clang%': '0',
}],
['asan==1 or lsan==1 or msan==1 or tsan==1', {
'clang%': 1,
'use_allocator%': 'none',
}],
['asan==1 and OS=="linux"', {
'use_custom_libcxx%': 1,
}],
['tsan==1', {
'use_custom_libcxx%': 1,
}],
['msan==1', {
# Use a just-built, MSan-instrumented libc++ instead of the system-wide
# libstdc++. This is required to avoid false positive reports whenever
# the C++ standard library is used.
'use_custom_libcxx%': 1,
}],
['OS=="linux"', {
# Gradually roll out v8_use_external_startup_data.
# Should eventually be default enabled on all platforms.
'v8_use_external_startup_data%': 1,
}],
['OS=="android"', {
# Location of Android NDK.
'variables': {
'variables': {
# The Android toolchain needs to use the absolute path to the NDK
# because it is used at different levels in the GYP files.
'android_ndk_root%': '<(base_dir)/third_party/android_tools/ndk/',
'android_host_arch%': "<!(uname -m | sed -e 's/i[3456]86/x86/')",
'host_os%': "<!(uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')",
},
# Copy conditionally-set variables out one scope.
'android_ndk_root%': '<(android_ndk_root)',
'host_os%': '<(host_os)',
'conditions': [
['target_arch == "ia32"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/x86-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'x86',
'android_target_platform%': '16',
}],
['target_arch == "x64"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/x86_64-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'x86_64',
'android_target_platform%': '21',
}],
['target_arch=="arm"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/arm-linux-androideabi-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'arm',
'android_target_platform%': '16',
'arm_version%': 7,
}],
['target_arch == "arm64"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/aarch64-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'arm64',
'android_target_platform%': '21',
'arm_version%': 'default',
}],
['target_arch == "mipsel"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/mipsel-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'mips',
'android_target_platform%': '16',
}],
['target_arch == "mips64el"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/mips64el-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_target_arch%': 'mips64',
'android_target_platform%': '21',
}],
],
},
# Copy conditionally-set variables out one scope.
'android_target_arch%': '<(android_target_arch)',
'android_target_platform%': '<(android_target_platform)',
'android_toolchain%': '<(android_toolchain)',
'arm_version%': '<(arm_version)',
'host_os%': '<(host_os)',
'conditions': [
['android_ndk_root==""', {
'variables': {
'android_sysroot': '<(android_toolchain)/sysroot/',
'android_stlport': '<(android_toolchain)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
}, {
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}, {
'variables': {
'android_sysroot': '<(android_ndk_root)/platforms/android-<(android_target_platform)/arch-<(android_target_arch)',
'android_stlport': '<(android_ndk_root)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
}, {
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}],
],
'android_stlport_library': 'stlport_static',
}], # OS=="android"
['host_clang==1', {
'host_cc': '<(clang_dir)/bin/clang',
'host_cxx': '<(clang_dir)/bin/clang++',
}, {
'host_cc': '<!(which gcc)',
'host_cxx': '<!(which g++)',
}],
],
# Default ARM variable settings.
'arm_version%': 'default',
@ -194,6 +332,11 @@
'target_defaults': {
'variables': {
'v8_code%': '<(v8_code)',
'conditions':[
['OS=="android"', {
'host_os%': '<(host_os)',
}],
],
},
'default_configuration': 'Debug',
'configurations': {
@ -283,96 +426,148 @@
],
},
'conditions': [
['asan==1 and OS!="mac"', {
['os_posix==1 and OS!="mac"', {
'target_defaults': {
'cflags_cc+': [
'-fno-omit-frame-pointer',
'-gline-tables-only',
'-fsanitize=address',
'-w', # http://crbug.com/162783
],
'cflags!': [
'-fomit-frame-pointer',
],
'ldflags': [
'-fsanitize=address',
],
},
}],
['tsan==1 and OS!="mac"', {
'target_defaults': {
'cflags+': [
'-fno-omit-frame-pointer',
'-gline-tables-only',
'-fsanitize=thread',
'-fPIC',
'-Wno-c++11-extensions',
],
'cflags!': [
'-fomit-frame-pointer',
],
'ldflags': [
'-fsanitize=thread',
'-pie',
],
'defines': [
'THREAD_SANITIZER',
],
},
}],
['msan==1 and OS!="mac"', {
'target_defaults': {
'cflags_cc+': [
'-fno-omit-frame-pointer',
'-gline-tables-only',
'-fsanitize=memory',
'-fsanitize-memory-track-origins=<(msan_track_origins)',
'-fPIC',
],
'cflags+': [
'-fPIC',
],
'cflags!': [
'-fno-exceptions',
'-fomit-frame-pointer',
],
'ldflags': [
'-fsanitize=memory',
],
'defines': [
'MEMORY_SANITIZER',
],
'dependencies': [
# Use libc++ (third_party/libc++ and third_party/libc++abi) instead of
# stdlibc++ as standard library. This is intended to use for instrumented
# builds.
'<(DEPTH)/buildtools/third_party/libc++/libc++.gyp:libcxx_proxy',
],
},
}],
['asan==1 and OS=="mac"', {
'target_defaults': {
'xcode_settings': {
'OTHER_CFLAGS+': [
'-fno-omit-frame-pointer',
'-gline-tables-only',
'-fsanitize=address',
'-w', # http://crbug.com/162783
],
'OTHER_CFLAGS!': [
'-fomit-frame-pointer',
],
},
'target_conditions': [
['_type!="static_library"', {
'xcode_settings': {'OTHER_LDFLAGS': ['-fsanitize=address']},
'conditions': [
# Common options for AddressSanitizer, LeakSanitizer,
# ThreadSanitizer and MemorySanitizer.
['asan==1 or lsan==1 or tsan==1 or msan==1', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fno-omit-frame-pointer',
'-gline-tables-only',
],
'cflags!': [
'-fomit-frame-pointer',
],
}],
],
}],
['asan==1', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize=address',
],
'ldflags': [
'-fsanitize=address',
],
'defines': [
'ADDRESS_SANITIZER',
],
}],
],
}],
['lsan==1', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize=leak',
],
'ldflags': [
'-fsanitize=leak',
],
'defines': [
'LEAK_SANITIZER',
],
}],
],
}],
['tsan==1', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize=thread',
],
'ldflags': [
'-fsanitize=thread',
],
'defines': [
'THREAD_SANITIZER',
],
}],
],
}],
['msan==1', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize=memory',
'-fsanitize-memory-track-origins=<(msan_track_origins)',
],
'ldflags': [
'-fsanitize=memory',
],
'defines': [
'MEMORY_SANITIZER',
],
}],
],
}],
['use_custom_libcxx==1', {
'dependencies': [
'<(DEPTH)/buildtools/third_party/libc++/libc++.gyp:libcxx_proxy',
],
}],
['sanitizer_coverage!=0', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize-coverage=<(sanitizer_coverage)',
],
'defines': [
'SANITIZER_COVERAGE',
],
}],
],
}],
],
'dependencies': [
'<(DEPTH)/build/mac/asan.gyp:asan_dynamic_runtime',
],
},
}],
['OS=="mac"', {
'target_defaults': {
'conditions': [
['asan==1', {
'xcode_settings': {
# FIXME(machenbach): This is outdated compared to common.gypi.
'OTHER_CFLAGS+': [
'-fno-omit-frame-pointer',
'-gline-tables-only',
'-fsanitize=address',
'-w', # http://crbug.com/162783
],
'OTHER_CFLAGS!': [
'-fomit-frame-pointer',
],
'defines': [
'ADDRESS_SANITIZER',
],
},
'dependencies': [
'<(DEPTH)/build/mac/asan.gyp:asan_dynamic_runtime',
],
'target_conditions': [
['_type!="static_library"', {
'xcode_settings': {'OTHER_LDFLAGS': ['-fsanitize=address']},
}],
],
}],
['sanitizer_coverage!=0', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize-coverage=<(sanitizer_coverage)',
],
'defines': [
'SANITIZER_COVERAGE',
],
}],
],
}],
],
}, # target_defaults
}], # OS=="mac"
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="aix"', {
'target_defaults': {
@ -382,17 +577,20 @@
'-Wno-unused-parameter',
'-Wno-long-long',
'-pthread',
'-fno-exceptions',
'-pedantic',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'cflags_cc': [
'-Wnon-virtual-dtor',
'-fno-exceptions',
'-fno-rtti',
'-std=gnu++0x',
],
'ldflags': [ '-pthread', ],
'conditions': [
# TODO(arm64): It'd be nice to enable this for arm64 as well,
# but the Assembler requires some serious fixing first.
[ 'clang==1 and v8_target_arch=="x64"', {
[ 'clang==1 and (v8_target_arch=="x64" or v8_target_arch=="arm64" \
or v8_target_arch=="mips64el")', {
'cflags': [ '-Wshorten-64-to-32' ],
}],
[ 'host_arch=="ppc64" and OS!="aix"', {
@ -415,11 +613,15 @@
'-Wall',
'<(werror)',
'-Wno-unused-parameter',
'-fno-exceptions',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'cflags_cc': [
'-Wnon-virtual-dtor',
'-fno-exceptions',
'-fno-rtti',
'-std=gnu++0x',
],
'conditions': [
[ 'visibility=="hidden"', {
'cflags': [ '-fvisibility=hidden' ],
@ -581,10 +783,214 @@
], # target_conditions
}, # target_defaults
}], # OS=="mac"
['OS=="android"', {
'target_defaults': {
'defines': [
'ANDROID',
'V8_ANDROID_LOG_STDOUT',
],
'configurations': {
'Release': {
'cflags': [
'-fomit-frame-pointer',
],
}, # Release
}, # configurations
'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter'],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions',
# Note: Using -std=c++0x will define __STRICT_ANSI__, which
# in turn will leave out some template stuff for 'long
# long'. What we want is -std=c++11, but this is not
# supported by GCC 4.6 or Xcode 4.2
'-std=gnu++0x' ],
'target_conditions': [
['_toolset=="target"', {
'cflags!': [
'-pthread', # Not supported by Android toolchain.
],
'cflags': [
'-ffunction-sections',
'-funwind-tables',
'-fstack-protector',
'-fno-short-enums',
'-finline-limit=64',
'-Wa,--noexecstack',
# Note: This include is in cflags to ensure that it comes after
# all of the includes.
'-I<(android_include)',
'-I<(android_stlport_include)',
],
'cflags_cc': [
'-Wno-error=non-virtual-dtor', # TODO(michaelbai): Fix warnings.
],
'defines': [
'ANDROID',
#'__GNU_SOURCE=1', # Necessary for clone()
'USE_STLPORT=1',
'_STLP_USE_PTR_SPECIALIZATIONS=1',
'HAVE_OFF64_T',
'HAVE_SYS_UIO_H',
'ANDROID_BINSIZE_HACK', # Enable temporary hacks to reduce binsize.
],
'ldflags!': [
'-pthread', # Not supported by Android toolchain.
],
'ldflags': [
'-nostdlib',
'-Wl,--no-undefined',
'-Wl,-rpath-link=<(android_lib)',
'-L<(android_lib)',
],
'libraries!': [
'-lrt', # librt is built into Bionic.
# Not supported by Android toolchain.
# Where do these come from? Can't find references in
# any Chromium gyp or gypi file. Maybe they come from
# gyp itself?
'-lpthread', '-lnss3', '-lnssutil3', '-lsmime3', '-lplds4', '-lplc4', '-lnspr4',
],
'libraries': [
'-l<(android_stlport_library)',
# Manually link the libgcc.a that the cross compiler uses.
'<!(<(android_toolchain)/*-gcc -print-libgcc-file-name)',
'-lc',
'-ldl',
'-lstdc++',
'-lm',
],
'conditions': [
['target_arch == "arm"', {
'ldflags': [
# Enable identical code folding to reduce size.
'-Wl,--icf=safe',
],
}],
['target_arch=="arm" and arm_version==7', {
'cflags': [
'-march=armv7-a',
'-mtune=cortex-a8',
'-mfpu=vfp3',
],
'ldflags': [
'-L<(android_stlport_libs)/armeabi-v7a',
],
}],
['target_arch=="arm" and arm_version < 7', {
'ldflags': [
'-L<(android_stlport_libs)/armeabi',
],
}],
['target_arch=="x64"', {
'ldflags': [
'-L<(android_stlport_libs)/x86_64',
],
}],
['target_arch=="arm64"', {
'ldflags': [
'-L<(android_stlport_libs)/arm64-v8a',
],
}],
['target_arch=="ia32" or target_arch=="x87"', {
# The x86 toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
],
'cflags': [
'-fno-stack-protector',
],
'ldflags': [
'-L<(android_stlport_libs)/x86',
],
}],
['target_arch=="mipsel"', {
# The mips toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
'-U__linux__'
],
'cflags': [
'-fno-stack-protector',
],
'ldflags': [
'-L<(android_stlport_libs)/mips',
],
}],
['(target_arch=="arm" or target_arch=="arm64" or target_arch=="x64" or target_arch=="ia32") and component!="shared_library"', {
'cflags': [
'-fPIE',
],
'ldflags': [
'-pie',
],
}],
],
'target_conditions': [
['_type=="executable"', {
'conditions': [
['target_arch=="arm64" or target_arch=="x64"', {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker64',
],
}, {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker',
],
}]
],
'ldflags': [
'-Bdynamic',
'-Wl,-z,nocopyreloc',
# crtbegin_dynamic.o should be the last item in ldflags.
'<(android_lib)/crtbegin_dynamic.o',
],
'libraries': [
# crtend_android.o needs to be the last item in libraries.
# Do not add any libraries after this!
'<(android_lib)/crtend_android.o',
],
}],
['_type=="shared_library"', {
'ldflags': [
'-Wl,-shared,-Bsymbolic',
'<(android_lib)/crtbegin_so.o',
],
}],
['_type=="static_library"', {
'ldflags': [
# Don't export symbols from statically linked libraries.
'-Wl,--exclude-libs=ALL',
],
}],
],
}], # _toolset=="target"
# Settings for building host targets using the system toolchain.
['_toolset=="host"', {
'cflags': [ '-pthread' ],
'ldflags': [ '-pthread' ],
'ldflags!': [
'-Wl,-z,noexecstack',
'-Wl,--gc-sections',
'-Wl,-O1',
'-Wl,--as-needed',
],
}],
], # target_conditions
}, # target_defaults
}], # OS=="android"
['OS=="android" and clang==0', {
# Hardcode the compiler names in the Makefile so that
# it won't depend on the environment at make time.
'make_global_settings': [
['CC', '<!(/bin/echo -n <(android_toolchain)/*-gcc)'],
['CXX', '<!(/bin/echo -n <(android_toolchain)/*-g++)'],
['CC.host', '<(host_cc)'],
['CXX.host', '<(host_cxx)'],
],
}],
['clang!=1 and host_clang==1 and target_arch!="ia32" and target_arch!="x64"', {
'make_global_settings': [
['CC.host', '../<(clang_dir)/bin/clang'],
['CXX.host', '../<(clang_dir)/bin/clang++'],
['CC.host', '<(clang_dir)/bin/clang'],
['CXX.host', '<(clang_dir)/bin/clang++'],
],
}],
['clang==0 and host_clang==1 and target_arch!="ia32" and target_arch!="x64"', {
@ -609,8 +1015,8 @@
['clang==1 and ((OS!="mac" and OS!="ios") or clang_xcode==0) '
'and OS!="win" and "<(GENERATOR)"=="make"', {
'make_global_settings': [
['CC', '../<(clang_dir)/bin/clang'],
['CXX', '../<(clang_dir)/bin/clang++'],
['CC', '<(clang_dir)/bin/clang'],
['CXX', '<(clang_dir)/bin/clang++'],
['CC.host', '$(CC)'],
['CXX.host', '$(CXX)'],
],
@ -627,7 +1033,7 @@
['clang==1 and OS=="win"', {
'make_global_settings': [
# On Windows, gyp's ninja generator only looks at CC.
['CC', '../<(clang_dir)/bin/clang-cl'],
['CC', '<(clang_dir)/bin/clang-cl'],
],
}],
# TODO(yyanagisawa): supports GENERATOR==make

View File

@ -338,6 +338,26 @@
],
'cflags': ['-march=i586'],
}], # v8_target_arch=="x87"
['(v8_target_arch=="mips" or v8_target_arch=="mipsel" \
or v8_target_arch=="mips64el") and v8_target_arch==target_arch', {
'target_conditions': [
['_toolset=="target"', {
# Target built with a Mips CXX compiler.
'variables': {
'ldso_path%': '<!(/bin/echo -n $LDSO_PATH)',
'ld_r_path%': '<!(/bin/echo -n $LD_R_PATH)',
},
'conditions': [
['ldso_path!=""', {
'ldflags': ['-Wl,--dynamic-linker=<(ldso_path)'],
}],
['ld_r_path!=""', {
'ldflags': ['-Wl,--rpath=<(ld_r_path)'],
}],
],
}],
],
}],
['v8_target_arch=="mips"', {
'defines': [
'V8_TARGET_ARCH_MIPS',
@ -384,11 +404,7 @@
],
'cflags!': ['-mfp32', '-mfpxx'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [
'-mips32r6',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
'ldflags': ['-mips32r6'],
}],
['mips_arch_variant=="r2"', {
'conditions': [
@ -571,11 +587,7 @@
],
'cflags!': ['-mfp32', '-mfpxx'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [
'-mips32r6',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
'ldflags': ['-mips32r6'],
}],
['mips_arch_variant=="r2"', {
'conditions': [
@ -770,20 +782,12 @@
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'],
'ldflags': [
'-mips64r6', '-mabi=64',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
'ldflags': ['-mips64r6', '-mabi=64'],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
'ldflags': [
'-mips64r2', '-mabi=64',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
'ldflags': ['-mips64r2', '-mabi=64'],
}],
],
}, {

View File

@ -248,7 +248,8 @@ class V8_EXPORT Debug {
* Debugger is running in its own context which is entered while debugger
* messages are being dispatched. This is an explicit getter for this
* debugger context. Note that the content of the debugger context is subject
* to change.
* to change. The Context exists only when the debugger is active, i.e. at
* least one DebugEventListener or MessageHandler is set.
*/
static Local<Context> GetDebugContext();
@ -259,6 +260,14 @@ class V8_EXPORT Debug {
* unexpectedly used. LiveEdit is enabled by default.
*/
static void SetLiveEditEnabled(Isolate* isolate, bool enable);
/**
* Returns array of internal properties specific to the value type. Result has
* the following format: [<name>, <value>,...,<name>, <value>]. Result array
* will be allocated in the current context.
*/
static MaybeLocal<Array> GetInternalProperties(Isolate* isolate,
Local<Value> value);
};

View File

@ -56,6 +56,17 @@ class Platform {
*/
virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0;
/**
* Schedules a task to be invoked on a foreground thread wrt a specific
* |isolate| after the given number of seconds |delay_in_seconds|.
* Tasks posted for the same isolate should be execute in order of
* scheduling. The definition of "foreground" is opaque to V8.
*/
virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
double delay_in_seconds) {
// TODO(ulan): Make this function abstract after V8 roll in Chromium.
}
/**
* Monotonically increasing time in seconds from an arbitrary fixed point in
* the past. This function is expected to return at least

View File

@ -275,7 +275,8 @@ class V8_EXPORT HeapGraphNode {
// snapshot items together.
kConsString = 10, // Concatenated string. A pair of pointers to strings.
kSlicedString = 11, // Sliced string. A fragment of another string.
kSymbol = 12 // A Symbol (ES6).
kSymbol = 12, // A Symbol (ES6).
kSimdValue = 13 // A SIMD value stored in the heap (Proposed ES7).
};
/** Returns node type (see HeapGraphNode::Type). */

View File

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 4
#define V8_MINOR_VERSION 4
#define V8_BUILD_NUMBER 63
#define V8_PATCH_LEVEL 12
#define V8_MINOR_VERSION 5
#define V8_BUILD_NUMBER 92
#define V8_PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

616
deps/v8/include/v8.h vendored

File diff suppressed because it is too large Load Diff

View File

@ -5,6 +5,8 @@
#ifndef V8CONFIG_H_
#define V8CONFIG_H_
// clang-format off
// Platform headers for feature detection below.
#if defined(__ANDROID__)
# include <sys/cdefs.h>
@ -183,6 +185,7 @@
// V8_HAS_BUILTIN_POPCOUNT - __builtin_popcount() supported
// V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported
// V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported
// V8_HAS_BUILTIN_UADD_OVERFLOW - __builtin_uadd_overflow() supported
// V8_HAS_DECLSPEC_ALIGN - __declspec(align(n)) supported
// V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported
// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
@ -199,8 +202,6 @@
#if defined(__GNUC__) // Clang in gcc mode.
# define V8_CC_GNU 1
#elif defined(_MSC_VER) // Clang in cl mode.
# define V8_CC_MSVC 1
#endif
// Clang defines __alignof__ as alias for __alignof
@ -223,6 +224,7 @@
# define V8_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
# define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow))
# define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow))
# define V8_HAS_BUILTIN_UADD_OVERFLOW (__has_builtin(__builtin_uadd_overflow))
# define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas))
# define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert))
@ -230,10 +232,15 @@
#elif defined(__GNUC__)
# define V8_CC_GNU 1
// Intel C++ also masquerades as GCC 3.2.0
# define V8_CC_INTEL (defined(__INTEL_COMPILER))
# define V8_CC_MINGW32 (defined(__MINGW32__))
# define V8_CC_MINGW64 (defined(__MINGW64__))
# if defined(__INTEL_COMPILER) // Intel C++ also masquerades as GCC 3.2.0
# define V8_CC_INTEL 1
# endif
# if defined(__MINGW32__)
# define V8_CC_MINGW32 1
# endif
# if defined(__MINGW64__)
# define V8_CC_MINGW64 1
# endif
# define V8_CC_MINGW (V8_CC_MINGW32 || V8_CC_MINGW64)
# define V8_HAS___ALIGNOF__ (V8_GNUC_PREREQ(4, 3, 0))
@ -268,11 +275,10 @@
# define V8_HAS_CXX11_ALIGNOF (V8_GNUC_PREREQ(4, 8, 0))
# define V8_HAS_CXX11_STATIC_ASSERT (V8_GNUC_PREREQ(4, 3, 0))
# endif
#endif
#elif defined(_MSC_VER)
#if defined(_MSC_VER)
# define V8_CC_MSVC 1
# define V8_HAS___ALIGNOF 1
# define V8_HAS_DECLSPEC_ALIGN 1
@ -313,22 +319,33 @@
#endif
// A macro to mark classes or functions as deprecated.
// A macro (V8_DEPRECATED) to mark classes or functions as deprecated.
#if defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE
# define V8_DEPRECATED(message, declarator) \
declarator __attribute__((deprecated(message)))
#define V8_DEPRECATED(message, declarator) \
declarator __attribute__((deprecated(message)))
#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED
# define V8_DEPRECATED(message, declarator) \
declarator __attribute__((deprecated))
#define V8_DEPRECATED(message, declarator) \
declarator __attribute__((deprecated))
#elif defined(V8_DEPRECATION_WARNINGS) && V8_HAS_DECLSPEC_DEPRECATED
# define V8_DEPRECATED(message, declarator) __declspec(deprecated) declarator
#define V8_DEPRECATED(message, declarator) __declspec(deprecated) declarator
#else
# define V8_DEPRECATED(message, declarator) declarator
#define V8_DEPRECATED(message, declarator) declarator
#endif
// a macro to make it easier to see what will be deprecated.
// A macro (V8_DEPRECATE_SOON) to make it easier to see what will be deprecated.
#if defined(V8_IMMINENT_DEPRECATION_WARNINGS) && \
V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE
#define V8_DEPRECATE_SOON(message, declarator) \
declarator __attribute__((deprecated(message)))
#elif defined(V8_IMMINENT_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED
#define V8_DEPRECATE_SOON(message, declarator) \
declarator __attribute__((deprecated))
#elif defined(V8_IMMINENT_DEPRECATION_WARNINGS) && V8_HAS_DECLSPEC_DEPRECATED
#define V8_DEPRECATE_SOON(message, declarator) __declspec(deprecated) declarator
#else
#define V8_DEPRECATE_SOON(message, declarator) declarator
#endif
// A macro to provide the compiler with branch prediction information.
@ -402,4 +419,6 @@ namespace v8 { template <typename T> class AlignOfHelper { char c; T t; }; }
#define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */
#endif
// clang-format on
#endif // V8CONFIG_H_

3
deps/v8/infra/OWNERS vendored Normal file
View File

@ -0,0 +1,3 @@
machenbach@chromium.org
sergiyb@chromium.org
tandrii@chromium.org

1
deps/v8/infra/README.md vendored Normal file
View File

@ -0,0 +1 @@
This directory contains infra-specific files.

51
deps/v8/infra/config/cq.cfg vendored Normal file
View File

@ -0,0 +1,51 @@
# See http://luci-config.appspot.com/schemas/projects/refs:cq.cfg for the
# documentation of this file format.
version: 1
cq_name: "v8"
cq_status_url: "https://chromium-cq-status.appspot.com"
hide_ref_in_committed_msg: true
commit_burst_delay: 60
max_commit_burst: 1
target_ref: "refs/pending/heads/master"
rietveld {
url: "https://codereview.chromium.org"
}
verifiers {
reviewer_lgtm {
committer_list: "v8"
}
tree_status {
tree_status_url: "https://v8-status.appspot.com"
}
try_job {
buckets {
name: "tryserver.v8"
builders { name: "v8_android_arm_compile_rel" }
builders { name: "v8_linux64_asan_rel" }
builders { name: "v8_linux64_avx2_rel" }
builders { name: "v8_linux64_rel" }
builders { name: "v8_linux_arm64_rel" }
builders { name: "v8_linux_arm_rel" }
builders { name: "v8_linux_chromium_gn_rel" }
builders { name: "v8_linux_dbg" }
builders { name: "v8_linux_gcc_compile_rel" }
builders { name: "v8_linux_mipsel_compile_rel" }
builders { name: "v8_linux_mips64el_compile_rel" }
builders { name: "v8_linux_nodcheck_rel" }
builders { name: "v8_linux_rel" }
builders { name: "v8_mac_rel" }
builders { name: "v8_presubmit" }
builders { name: "v8_win64_rel" }
builders { name: "v8_win_compile_dbg" }
builders { name: "v8_win_nosnap_shared_compile_rel" }
builders { name: "v8_win_rel" }
}
}
sign_cla {}
}

View File

@ -0,0 +1 @@
This directory contains v8 project-wide configurations for infra services.

View File

@ -0,0 +1,23 @@
# Defines buckets on cr-buildbucket.appspot.com, used to schedule builds
# on buildbot. In particular, CQ uses some of these buckets to schedule tryjobs.
#
# See http://luci-config.appspot.com/schemas/projects:buildbucket.cfg for
# schema of this file and documentation.
#
# Please keep this list sorted by bucket name.
buckets {
name: "master.tryserver.v8"
acls {
role: READER
group: "all"
}
acls {
role: SCHEDULER
group: "service-account-cq"
}
acls {
role: WRITER
group: "service-account-v8-master"
}
}

69
deps/v8/samples/hello-world.cc vendored Normal file
View File

@ -0,0 +1,69 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
using namespace v8;
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
void* data = AllocateUninitialized(length);
return data == NULL ? data : memset(data, 0, length);
}
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t) { free(data); }
};
int main(int argc, char* argv[]) {
// Initialize V8.
V8::InitializeICU();
Platform* platform = platform::CreateDefaultPlatform();
V8::InitializePlatform(platform);
V8::Initialize();
// Create a new Isolate and make it the current one.
ArrayBufferAllocator allocator;
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = &allocator;
Isolate* isolate = Isolate::New(create_params);
{
Isolate::Scope isolate_scope(isolate);
// Create a stack-allocated handle scope.
HandleScope handle_scope(isolate);
// Create a new context.
Local<Context> context = Context::New(isolate);
// Enter the context for compiling and running the hello world script.
Context::Scope context_scope(context);
// Create a string containing the JavaScript source code.
Local<String> source = String::NewFromUtf8(isolate, "'Hello' + ', World!'");
// Compile the source code.
Local<Script> script = Script::Compile(source);
// Run the script to get the result.
Local<Value> result = script->Run();
// Convert the result to an UTF8 string and print it.
String::Utf8Value utf8(result);
printf("%s\n", *utf8);
}
// Dispose the isolate and tear down V8.
isolate->Dispose();
V8::Dispose();
V8::ShutdownPlatform();
delete platform;
return 0;
}

View File

@ -220,7 +220,7 @@ bool JsHttpRequestProcessor::ExecuteScript(Handle<String> script) {
// We're just about to compile the script; set up an error handler to
// catch any exceptions the script might throw.
TryCatch try_catch;
TryCatch try_catch(GetIsolate());
// Compile the script and check for errors.
Handle<Script> compiled_script = Script::Compile(script);
@ -281,7 +281,7 @@ bool JsHttpRequestProcessor::Process(HttpRequest* request) {
Handle<Object> request_obj = WrapRequest(request);
// Set up an exception handler before calling the Process function
TryCatch try_catch;
TryCatch try_catch(GetIsolate());
// Invoke the process function, giving the global object as 'this'
// and one argument, the request.

View File

@ -61,6 +61,12 @@
'shell.cc',
],
},
{
'target_name': 'hello-world',
'sources': [
'hello-world.cc',
],
},
{
'target_name': 'process',
'sources': [

View File

@ -325,7 +325,7 @@ bool ExecuteString(v8::Isolate* isolate,
bool print_result,
bool report_exceptions) {
v8::HandleScope handle_scope(isolate);
v8::TryCatch try_catch;
v8::TryCatch try_catch(isolate);
v8::ScriptOrigin origin(name);
v8::Handle<v8::Script> script = v8::Script::Compile(source, &origin);
if (script.IsEmpty()) {

View File

@ -32,6 +32,7 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
info->set_property_attributes(attributes);
info->set_all_can_read(false);
info->set_all_can_write(false);
info->set_is_special_data_property(true);
info->set_name(*name);
Handle<Object> get = v8::FromCData(isolate, getter);
Handle<Object> set = v8::FromCData(isolate, setter);
@ -126,31 +127,6 @@ bool Accessors::IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
}
bool SetPropertyOnInstanceIfInherited(
Isolate* isolate, const v8::PropertyCallbackInfo<void>& info,
v8::Local<v8::Name> name, Handle<Object> value) {
Handle<Object> holder = Utils::OpenHandle(*info.Holder());
Handle<Object> receiver = Utils::OpenHandle(*info.This());
if (*holder == *receiver) return false;
if (receiver->IsJSObject()) {
Handle<JSObject> object = Handle<JSObject>::cast(receiver);
// This behaves sloppy since we lost the actual strict-mode.
// TODO(verwaest): Fix by making ExecutableAccessorInfo behave like data
// properties.
if (object->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, object);
if (iter.IsAtEnd()) return true;
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
object = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
}
if (!object->map()->is_extensible()) return true;
JSObject::SetOwnPropertyIgnoreAttributes(object, Utils::OpenHandle(*name),
value, NONE).Check();
}
return true;
}
//
// Accessors::ArgumentsIterator
//
@ -174,8 +150,6 @@ void Accessors::ArgumentsIteratorSetter(
Handle<JSObject> object = Utils::OpenHandle(*info.This());
Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
LookupIterator it(object, Utils::OpenHandle(*name));
CHECK_EQ(LookupIterator::ACCESSOR, it.state());
DCHECK(it.HolderIsReceiverOrHiddenPrototype());
@ -199,21 +173,6 @@ Handle<AccessorInfo> Accessors::ArgumentsIteratorInfo(
//
// The helper function will 'flatten' Number objects.
Handle<Object> Accessors::FlattenNumber(Isolate* isolate,
Handle<Object> value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
Handle<JSValue> wrapper = Handle<JSValue>::cast(value);
DCHECK(wrapper->GetIsolate()->native_context()->number_function()->
has_initial_map());
if (wrapper->map() == isolate->number_function()->initial_map()) {
return handle(wrapper->value(), isolate);
}
return value;
}
void Accessors::ArrayLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
@ -226,44 +185,55 @@ void Accessors::ArrayLengthGetter(
}
// Tries to non-observably convert |value| to a valid array length.
// Returns false if it fails.
static bool FastAsArrayLength(Isolate* isolate, Handle<Object> value,
uint32_t* length) {
if (value->ToArrayLength(length)) return true;
// We don't support AsArrayLength, so use AsArrayIndex for now. This just
// misses out on kMaxUInt32.
if (value->IsString()) return String::cast(*value)->AsArrayIndex(length);
return false;
}
void Accessors::ArrayLengthSetter(
v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSObject> object = Utils::OpenHandle(*info.This());
Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) {
return;
Handle<JSArray> array = Handle<JSArray>::cast(object);
Handle<Object> length_obj = Utils::OpenHandle(*val);
uint32_t length = 0;
if (!FastAsArrayLength(isolate, length_obj, &length)) {
Handle<Object> uint32_v;
if (!Execution::ToUint32(isolate, length_obj).ToHandle(&uint32_v)) {
isolate->OptionalRescheduleException(false);
return;
}
Handle<Object> number_v;
if (!Execution::ToNumber(isolate, length_obj).ToHandle(&number_v)) {
isolate->OptionalRescheduleException(false);
return;
}
if (uint32_v->Number() != number_v->Number()) {
Handle<Object> exception = isolate->factory()->NewRangeError(
MessageTemplate::kInvalidArrayLength);
return isolate->ScheduleThrow(*exception);
}
CHECK(uint32_v->ToArrayLength(&length));
}
value = FlattenNumber(isolate, value);
Handle<JSArray> array_handle = Handle<JSArray>::cast(object);
MaybeHandle<Object> maybe;
Handle<Object> uint32_v;
maybe = Execution::ToUint32(isolate, value);
if (!maybe.ToHandle(&uint32_v)) {
if (JSArray::ObservableSetLength(array, length).is_null()) {
isolate->OptionalRescheduleException(false);
return;
}
Handle<Object> number_v;
maybe = Execution::ToNumber(isolate, value);
if (!maybe.ToHandle(&number_v)) {
isolate->OptionalRescheduleException(false);
return;
}
if (uint32_v->Number() == number_v->Number()) {
maybe = JSArray::SetElementsLength(array_handle, uint32_v);
if (maybe.is_null()) isolate->OptionalRescheduleException(false);
return;
}
Handle<Object> exception =
isolate->factory()->NewRangeError(MessageTemplate::kInvalidArrayLength);
isolate->ScheduleThrow(*exception);
}
@ -706,8 +676,9 @@ void Accessors::ScriptIsEmbedderDebugScriptGetter(
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
bool is_embedder_debug_script =
Script::cast(JSValue::cast(object)->value())->is_embedder_debug_script();
bool is_embedder_debug_script = Script::cast(JSValue::cast(object)->value())
->origin_options()
.IsEmbedderDebugScript();
Object* res = *isolate->factory()->ToBoolean(is_embedder_debug_script);
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
@ -970,9 +941,6 @@ void Accessors::FunctionPrototypeSetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) {
return;
}
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionPrototype(isolate, object, value).is_null()) {
@ -1061,8 +1029,6 @@ void Accessors::FunctionLengthSetter(
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionLength(isolate, object, value).is_null()) {
@ -1120,8 +1086,6 @@ void Accessors::FunctionNameSetter(
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionName(isolate, object, value).is_null()) {
@ -1151,22 +1115,41 @@ static Handle<Object> ArgumentsForInlinedFunction(
int inlined_frame_index) {
Isolate* isolate = inlined_function->GetIsolate();
Factory* factory = isolate->factory();
SlotRefValueBuilder slot_refs(
frame, inlined_frame_index,
inlined_function->shared()->internal_formal_parameter_count());
int args_count = slot_refs.args_length();
TranslatedState translated_values(frame);
translated_values.Prepare(false, frame->fp());
int argument_count = 0;
TranslatedFrame* translated_frame =
translated_values.GetArgumentsInfoFromJSFrameIndex(inlined_frame_index,
&argument_count);
TranslatedFrame::iterator iter = translated_frame->begin();
// Skip the function.
iter++;
// Skip the receiver.
iter++;
argument_count--;
Handle<JSObject> arguments =
factory->NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = factory->NewFixedArray(args_count);
slot_refs.Prepare(isolate);
for (int i = 0; i < args_count; ++i) {
Handle<Object> value = slot_refs.GetNext(isolate, 0);
factory->NewArgumentsObject(inlined_function, argument_count);
Handle<FixedArray> array = factory->NewFixedArray(argument_count);
bool should_deoptimize = false;
for (int i = 0; i < argument_count; ++i) {
// If we materialize any object, we should deopt because we might alias
// an object that was eliminated by escape analysis.
should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
Handle<Object> value = iter->GetValue();
array->set(i, *value);
iter++;
}
slot_refs.Finish(isolate);
arguments->set_elements(*array);
if (should_deoptimize) {
translated_values.StoreMaterializedValuesAndDeopt();
}
// Return the freshly allocated arguments object.
return arguments;
}
@ -1437,9 +1420,19 @@ static void ModuleGetExport(
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
DCHECK(context->IsModuleContext());
int slot = info.Data()->Int32Value();
Object* value = context->get(slot);
Isolate* isolate = instance->GetIsolate();
int slot = info.Data()
->Int32Value(info.GetIsolate()->GetCurrentContext())
.FromMaybe(-1);
if (slot < 0 || slot >= context->length()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
MessageTemplate::kNotDefined, name);
isolate->ScheduleThrow(*exception);
return;
}
Object* value = context->get(slot);
if (value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
@ -1459,9 +1452,18 @@ static void ModuleSetExport(
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
DCHECK(context->IsModuleContext());
int slot = info.Data()->Int32Value();
Isolate* isolate = instance->GetIsolate();
int slot = info.Data()
->Int32Value(info.GetIsolate()->GetCurrentContext())
.FromMaybe(-1);
if (slot < 0 || slot >= context->length()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
MessageTemplate::kNotDefined, name);
isolate->ScheduleThrow(*exception);
return;
}
Object* old_value = context->get(slot);
Isolate* isolate = context->GetIsolate();
if (old_value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
@ -1493,4 +1495,5 @@ Handle<AccessorInfo> Accessors::MakeModuleExport(
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

View File

@ -98,11 +98,6 @@ class Accessors : public AllStatic {
static Handle<ExecutableAccessorInfo> CloneAccessor(
Isolate* isolate,
Handle<ExecutableAccessorInfo> accessor);
private:
// Helper functions.
static Handle<Object> FlattenNumber(Isolate* isolate, Handle<Object> value);
};
} } // namespace v8::internal

View File

@ -76,4 +76,5 @@ bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
return false;
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

View File

@ -337,4 +337,5 @@ void AllocationTracker::UnresolvedLocation::HandleWeakScript(
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

View File

@ -108,4 +108,5 @@ void AlignedFree(void *ptr) {
#endif
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

View File

@ -7,6 +7,7 @@
#include "src/api.h"
#include "src/isolate.h"
#include "src/lookup.h"
#include "src/messages.h"
namespace v8 {
namespace internal {
@ -66,7 +67,7 @@ MaybeHandle<Object> DefineAccessorProperty(
MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
Handle<JSObject> object,
Handle<Name> key,
Handle<Name> name,
Handle<Object> prop_data,
Smi* unchecked_attributes) {
DCHECK((unchecked_attributes->value() &
@ -77,35 +78,24 @@ MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
Instantiate(isolate, prop_data, key), Object);
Instantiate(isolate, prop_data, name), Object);
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
#ifdef DEBUG
bool duplicate;
if (key->IsName()) {
LookupIterator it(object, Handle<Name>::cast(key),
LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
DCHECK(maybe.IsJust());
duplicate = it.IsFound();
} else {
uint32_t index = 0;
key->ToArrayIndex(&index);
Maybe<bool> maybe = JSReceiver::HasOwnElement(object, index);
if (!maybe.IsJust()) return MaybeHandle<Object>();
duplicate = maybe.FromJust();
}
if (duplicate) {
Handle<Object> args[1] = {key};
THROW_NEW_ERROR(isolate, NewTypeError("duplicate_template_property",
HandleVector(args, 1)),
Object);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
DCHECK(maybe.IsJust());
if (it.IsFound()) {
THROW_NEW_ERROR(
isolate,
NewTypeError(MessageTemplate::kDuplicateTemplateProperty, name),
Object);
}
#endif
RETURN_ON_EXCEPTION(
isolate, Runtime::DefineObjectProperty(object, key, value, attributes),
Object);
return object;
return Object::AddDataProperty(&it, value, attributes, STRICT,
Object::CERTAINLY_NOT_STORE_FROM_KEYED);
}

896
deps/v8/src/api.cc vendored

File diff suppressed because it is too large Load Diff

15
deps/v8/src/api.h vendored
View File

@ -95,6 +95,7 @@ void NeanderObject::set(int offset, v8::internal::Object* value) {
template <typename T> inline T ToCData(v8::internal::Object* obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
if (obj == v8::internal::Smi::FromInt(0)) return nullptr;
return reinterpret_cast<T>(
reinterpret_cast<intptr_t>(
v8::internal::Foreign::cast(obj)->foreign_address()));
@ -105,6 +106,7 @@ template <typename T>
inline v8::internal::Handle<v8::internal::Object> FromCData(
v8::internal::Isolate* isolate, T obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
if (obj == nullptr) return handle(v8::internal::Smi::FromInt(0), isolate);
return isolate->factory()->NewForeign(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
}
@ -146,6 +148,8 @@ class RegisteredExtension {
V(RegExp, JSRegExp) \
V(Object, JSObject) \
V(Array, JSArray) \
V(Map, JSMap) \
V(Set, JSSet) \
V(ArrayBuffer, JSArrayBuffer) \
V(ArrayBufferView, JSArrayBufferView) \
V(TypedArray, JSTypedArray) \
@ -159,6 +163,7 @@ class RegisteredExtension {
V(Float32Array, JSTypedArray) \
V(Float64Array, JSTypedArray) \
V(DataView, JSDataView) \
V(SharedArrayBuffer, JSArrayBuffer) \
V(Name, Name) \
V(String, String) \
V(Symbol, Symbol) \
@ -202,6 +207,10 @@ class Utils {
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<Map> ToLocal(
v8::internal::Handle<v8::internal::JSMap> obj);
static inline Local<Set> ToLocal(
v8::internal::Handle<v8::internal::JSSet> obj);
static inline Local<ArrayBuffer> ToLocal(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
static inline Local<ArrayBufferView> ToLocal(
@ -230,6 +239,9 @@ class Utils {
static inline Local<Float64Array> ToLocalFloat64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<SharedArrayBuffer> ToLocalShared(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Promise> PromiseToLocal(
@ -356,10 +368,13 @@ MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, JSMap, Map)
MAKE_TO_LOCAL(ToLocal, JSSet, Set)
MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer)
MAKE_TO_LOCAL(ToLocal, JSArrayBufferView, ArrayBufferView)
MAKE_TO_LOCAL(ToLocal, JSDataView, DataView)
MAKE_TO_LOCAL(ToLocal, JSTypedArray, TypedArray)
MAKE_TO_LOCAL(ToLocalShared, JSArrayBuffer, SharedArrayBuffer)
TYPED_ARRAYS(MAKE_TO_LOCAL_TYPED_ARRAY)

View File

@ -102,4 +102,5 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

View File

@ -118,10 +118,11 @@ Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
if (FLAG_enable_ool_constant_pool ||
if (FLAG_enable_embedded_constant_pool ||
Assembler::IsMovW(Memory::int32_at(pc_))) {
// We return the PC for ool constant pool since this function is used by the
// serializer and expects the address to reside within the code object.
// We return the PC for embedded constant pool since this function is used
// by the serializer and expects the address to reside within the code
// object.
return reinterpret_cast<Address>(pc_);
} else {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
@ -545,7 +546,7 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
Address constant_pool_entry, Code* code, Address target) {
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
set_target_address_at(constant_pool_entry, code, target);
} else {
Memory::Address_at(constant_pool_entry) = target;
@ -562,21 +563,21 @@ void Assembler::deserialization_set_target_internal_reference_at(
bool Assembler::is_constant_pool_load(Address pc) {
if (CpuFeatures::IsSupported(ARMv7)) {
return !Assembler::IsMovW(Memory::int32_at(pc)) ||
(FLAG_enable_ool_constant_pool &&
(FLAG_enable_embedded_constant_pool &&
Assembler::IsLdrPpRegOffset(
Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
} else {
return !Assembler::IsMovImmed(Memory::int32_at(pc)) ||
(FLAG_enable_ool_constant_pool &&
(FLAG_enable_embedded_constant_pool &&
Assembler::IsLdrPpRegOffset(
Memory::int32_at(pc + 4 * Assembler::kInstrSize)));
}
}
Address Assembler::constant_pool_entry_address(
Address pc, ConstantPoolArray* constant_pool) {
if (FLAG_enable_ool_constant_pool) {
Address Assembler::constant_pool_entry_address(Address pc,
Address constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
DCHECK(constant_pool != NULL);
int cp_offset;
if (!CpuFeatures::IsSupported(ARMv7) && IsMovImmed(Memory::int32_at(pc))) {
@ -604,7 +605,7 @@ Address Assembler::constant_pool_entry_address(
DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc)));
cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc));
}
return reinterpret_cast<Address>(constant_pool) + cp_offset;
return constant_pool + cp_offset;
} else {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Instr instr = Memory::int32_at(pc);
@ -613,8 +614,7 @@ Address Assembler::constant_pool_entry_address(
}
Address Assembler::target_address_at(Address pc,
ConstantPoolArray* constant_pool) {
Address Assembler::target_address_at(Address pc, Address constant_pool) {
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Return the value in the constant pool.
return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
@ -645,8 +645,7 @@ Address Assembler::target_address_at(Address pc,
}
void Assembler::set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
if (is_constant_pool_load(pc)) {

View File

@ -234,9 +234,9 @@ const int RelocInfo::kApplyMask = 0;
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded.  Being
// specially coded on ARM means that it is a movw/movt instruction, or is an
// out of line constant pool entry.  These only occur if
// FLAG_enable_ool_constant_pool is true.
return FLAG_enable_ool_constant_pool;
// embedded constant pool entry.  These only occur if
// FLAG_enable_embedded_constant_pool is true.
return FLAG_enable_embedded_constant_pool;
}
@ -449,11 +449,11 @@ const Instr kLdrStrInstrTypeMask = 0xffff0000;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
constant_pool_builder_(),
constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
num_pending_32_bit_reloc_info_ = 0;
num_pending_64_bit_reloc_info_ = 0;
num_pending_32_bit_constants_ = 0;
num_pending_64_bit_constants_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
@ -471,23 +471,30 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
reloc_info_writer.Finish();
if (!FLAG_enable_ool_constant_pool) {
// Emit constant pool if necessary.
// Emit constant pool if necessary.
int constant_pool_offset = 0;
if (FLAG_enable_embedded_constant_pool) {
constant_pool_offset = EmitEmbeddedConstantPool();
} else {
CheckConstPool(true, false);
DCHECK(num_pending_32_bit_reloc_info_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0);
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
}
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->constant_pool_size =
(constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
desc->origin = this;
}
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@ -623,7 +630,7 @@ Register Assembler::GetRm(Instr instr) {
Instr Assembler::GetConsantPoolLoadPattern() {
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
return kLdrPpImmedPattern;
} else {
return kLdrPCImmedPattern;
@ -632,7 +639,7 @@ Instr Assembler::GetConsantPoolLoadPattern() {
Instr Assembler::GetConsantPoolLoadMask() {
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
return kLdrPpImmedMask;
} else {
return kLdrPCImmedMask;
@ -1044,8 +1051,8 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
static bool use_mov_immediate_load(const Operand& x,
const Assembler* assembler) {
if (FLAG_enable_ool_constant_pool && assembler != NULL &&
!assembler->is_ool_constant_pool_available()) {
if (FLAG_enable_embedded_constant_pool && assembler != NULL &&
!assembler->is_constant_pool_available()) {
return true;
} else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size())) {
@ -1074,8 +1081,9 @@ int Operand::instructions_required(const Assembler* assembler,
if (use_mov_immediate_load(*this, assembler)) {
// A movw / movt or mov / orr immediate load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
} else if (assembler != NULL && assembler->use_extended_constant_pool()) {
// An extended constant pool load.
} else if (assembler != NULL &&
assembler->ConstantPoolAccessIsInOverflow()) {
// An overflowed constant pool load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
} else {
// A small constant pool load.
@ -1100,23 +1108,23 @@ int Operand::instructions_required(const Assembler* assembler,
void Assembler::move_32_bit_immediate(Register rd,
const Operand& x,
Condition cond) {
RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
if (x.must_output_reloc_info(this)) {
RecordRelocInfo(rinfo);
RecordRelocInfo(x.rmode_);
}
if (use_mov_immediate_load(x, this)) {
Register target = rd.code() == pc.code() ? ip : rd;
if (CpuFeatures::IsSupported(ARMv7)) {
if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
if (!FLAG_enable_embedded_constant_pool &&
x.must_output_reloc_info(this)) {
// Make sure the movw/movt doesn't get separated.
BlockConstPoolFor(2);
}
movw(target, imm32 & 0xffff, cond);
movt(target, imm32 >> 16, cond);
} else {
DCHECK(FLAG_enable_ool_constant_pool);
DCHECK(FLAG_enable_embedded_constant_pool);
mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
@ -1126,10 +1134,11 @@ void Assembler::move_32_bit_immediate(Register rd,
mov(rd, target, LeaveCC, cond);
}
} else {
DCHECK(!FLAG_enable_ool_constant_pool || is_ool_constant_pool_available());
ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
if (section == ConstantPoolArray::EXTENDED_SECTION) {
DCHECK(FLAG_enable_ool_constant_pool);
DCHECK(!FLAG_enable_embedded_constant_pool || is_constant_pool_available());
ConstantPoolEntry::Access access =
ConstantPoolAddEntry(pc_offset(), x.rmode_, x.imm32_);
if (access == ConstantPoolEntry::OVERFLOWED) {
DCHECK(FLAG_enable_embedded_constant_pool);
Register target = rd.code() == pc.code() ? ip : rd;
// Emit instructions to load constant pool offset.
if (CpuFeatures::IsSupported(ARMv7)) {
@ -1144,8 +1153,9 @@ void Assembler::move_32_bit_immediate(Register rd,
// Load from constant pool at offset.
ldr(rd, MemOperand(pp, target), cond);
} else {
DCHECK(section == ConstantPoolArray::SMALL_SECTION);
ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
DCHECK(access == ConstantPoolEntry::REGULAR);
ldr(rd, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0),
cond);
}
}
}
@ -2554,7 +2564,7 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
} else if (FLAG_enable_vldr_imm && is_ool_constant_pool_available()) {
} else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad
@ -2570,18 +2580,17 @@ void Assembler::vmov(const DwVfpRegister dst,
// The code could also randomize the order of values, though
// that's tricky because vldr has a limited reach. Furthermore
// it breaks load locality.
RelocInfo rinfo(pc_, imm);
ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
if (section == ConstantPoolArray::EXTENDED_SECTION) {
DCHECK(FLAG_enable_ool_constant_pool);
ConstantPoolEntry::Access access = ConstantPoolAddEntry(pc_offset(), imm);
if (access == ConstantPoolEntry::OVERFLOWED) {
DCHECK(FLAG_enable_embedded_constant_pool);
// Emit instructions to load constant pool offset.
movw(ip, 0);
movt(ip, 0);
// Load from constant pool at offset.
vldr(dst, MemOperand(pp, ip));
} else {
DCHECK(section == ConstantPoolArray::SMALL_SECTION);
vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
DCHECK(access == ConstantPoolEntry::REGULAR);
vldr(dst, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0));
}
} else {
// Synthesise the double from ARM immediates.
@ -2596,7 +2605,8 @@ void Assembler::vmov(const DwVfpRegister dst,
} else if (scratch.is(no_reg)) {
mov(ip, Operand(lo));
vmov(dst, VmovIndexLo, ip);
if ((lo & 0xffff) == (hi & 0xffff)) {
if (((lo & 0xffff) == (hi & 0xffff)) &&
CpuFeatures::IsSupported(ARMv7)) {
movt(ip, hi >> 16);
} else {
mov(ip, Operand(hi));
@ -3555,22 +3565,6 @@ void Assembler::GrowBuffer() {
// None of our relocation types are pc relative pointing outside the code
// buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries.
// Relocate pending relocation entries.
for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION);
if (rinfo.rmode() != RelocInfo::JS_RETURN) {
rinfo.set_pc(rinfo.pc() + pc_delta);
}
}
for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
DCHECK(rinfo.rmode() == RelocInfo::NONE64);
rinfo.set_pc(rinfo.pc() + pc_delta);
}
constant_pool_builder_.Relocate(pc_delta);
}
@ -3578,8 +3572,8 @@ void Assembler::db(uint8_t data) {
// No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
DCHECK(num_pending_32_bit_reloc_info_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0);
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@ -3590,14 +3584,26 @@ void Assembler::dd(uint32_t data) {
// No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
DCHECK(num_pending_32_bit_reloc_info_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0);
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
}
void Assembler::dq(uint64_t value) {
// No relocation info should be pending while using dq. dq is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
CheckBuffer();
*reinterpret_cast<uint64_t*>(pc_) = value;
pc_ += sizeof(uint64_t);
}
void Assembler::emit_code_stub_address(Code* stub) {
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) =
@ -3607,64 +3613,73 @@ void Assembler::emit_code_stub_address(Code* stub) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (RelocInfo::IsNone(rmode) ||
// Don't record external references unless the heap will be serialized.
(rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
!emit_debug_code())) {
return;
}
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
data = RecordedAstId().ToInt();
ClearRecordedAstId();
}
RelocInfo rinfo(pc_, rmode, data, NULL);
RecordRelocInfo(rinfo);
reloc_info_writer.Write(&rinfo);
}
void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE &&
!serializer_enabled() && !emit_debug_code()) {
return;
}
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
RelocInfo reloc_info_with_ast_id(rinfo.pc(),
rinfo.rmode(),
RecordedAstId().ToInt(),
NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
reloc_info_writer.Write(&rinfo);
ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
RelocInfo::Mode rmode,
intptr_t value) {
DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION &&
rmode != RelocInfo::STATEMENT_POSITION &&
rmode != RelocInfo::CONST_POOL && rmode != RelocInfo::NONE64);
bool sharing_ok = RelocInfo::IsNone(rmode) ||
!(serializer_enabled() || rmode < RelocInfo::CELL);
if (FLAG_enable_embedded_constant_pool) {
return constant_pool_builder_.AddEntry(position, value, sharing_ok);
} else {
DCHECK(num_pending_32_bit_constants_ < kMaxNumPending32Constants);
if (num_pending_32_bit_constants_ == 0) {
first_const_pool_32_use_ = position;
}
ConstantPoolEntry entry(position, value, sharing_ok);
pending_32_bit_constants_[num_pending_32_bit_constants_++] = entry;
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
return ConstantPoolEntry::REGULAR;
}
}
ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry(
const RelocInfo& rinfo) {
if (FLAG_enable_ool_constant_pool) {
return constant_pool_builder_.AddEntry(this, rinfo);
ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
double value) {
if (FLAG_enable_embedded_constant_pool) {
return constant_pool_builder_.AddEntry(position, value);
} else {
if (rinfo.rmode() == RelocInfo::NONE64) {
DCHECK(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
if (num_pending_64_bit_reloc_info_ == 0) {
first_const_pool_64_use_ = pc_offset();
}
pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
} else {
DCHECK(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
if (num_pending_32_bit_reloc_info_ == 0) {
first_const_pool_32_use_ = pc_offset();
}
pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
DCHECK(num_pending_64_bit_constants_ < kMaxNumPending64Constants);
if (num_pending_64_bit_constants_ == 0) {
first_const_pool_64_use_ = position;
}
ConstantPoolEntry entry(position, value);
pending_64_bit_constants_[num_pending_64_bit_constants_++] = entry;
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
return ConstantPoolArray::SMALL_SECTION;
return ConstantPoolEntry::REGULAR;
}
}
void Assembler::BlockConstPoolFor(int instructions) {
if (FLAG_enable_ool_constant_pool) {
// Should be a no-op if using an out-of-line constant pool.
DCHECK(num_pending_32_bit_reloc_info_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0);
if (FLAG_enable_embedded_constant_pool) {
// Should be a no-op if using an embedded constant pool.
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
return;
}
@ -3673,10 +3688,11 @@ void Assembler::BlockConstPoolFor(int instructions) {
// Max pool start (if we need a jump and an alignment).
#ifdef DEBUG
int start = pc_limit + kInstrSize + 2 * kPointerSize;
DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
DCHECK((num_pending_32_bit_constants_ == 0) ||
(start - first_const_pool_32_use_ +
num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
num_pending_64_bit_constants_ * kDoubleSize <
kMaxDistToIntPool));
DCHECK((num_pending_64_bit_constants_ == 0) ||
(start - first_const_pool_64_use_ < kMaxDistToFPPool));
#endif
no_const_pool_before_ = pc_limit;
@ -3689,10 +3705,10 @@ void Assembler::BlockConstPoolFor(int instructions) {
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
if (FLAG_enable_ool_constant_pool) {
// Should be a no-op if using an out-of-line constant pool.
DCHECK(num_pending_32_bit_reloc_info_ == 0);
DCHECK(num_pending_64_bit_reloc_info_ == 0);
if (FLAG_enable_embedded_constant_pool) {
// Should be a no-op if using an embedded constant pool.
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
return;
}
@ -3706,8 +3722,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
// There is nothing to do if there are no pending constant pool entries.
if ((num_pending_32_bit_reloc_info_ == 0) &&
(num_pending_64_bit_reloc_info_ == 0)) {
if ((num_pending_32_bit_constants_ == 0) &&
(num_pending_64_bit_constants_ == 0)) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
@ -3718,15 +3734,15 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// the gap to the relocation information).
int jump_instr = require_jump ? kInstrSize : 0;
int size_up_to_marker = jump_instr + kInstrSize;
int size_after_marker = num_pending_32_bit_reloc_info_ * kPointerSize;
bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
int size_after_marker = num_pending_32_bit_constants_ * kPointerSize;
bool has_fp_values = (num_pending_64_bit_constants_ > 0);
bool require_64_bit_align = false;
if (has_fp_values) {
require_64_bit_align = (((uintptr_t)pc_ + size_up_to_marker) & 0x7);
if (require_64_bit_align) {
size_after_marker += kInstrSize;
}
size_after_marker += num_pending_64_bit_reloc_info_ * kDoubleSize;
size_after_marker += num_pending_64_bit_constants_ * kDoubleSize;
}
int size = size_up_to_marker + size_after_marker;
@ -3743,9 +3759,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
bool need_emit = false;
if (has_fp_values) {
int dist64 = pc_offset() +
size -
num_pending_32_bit_reloc_info_ * kPointerSize -
int dist64 = pc_offset() + size -
num_pending_32_bit_constants_ * kPointerSize -
first_const_pool_64_use_;
if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
(!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
@ -3787,60 +3802,52 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Emit 64-bit constant pool entries first: their range is smaller than
// 32-bit entries.
for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
for (int i = 0; i < num_pending_64_bit_constants_; i++) {
ConstantPoolEntry& entry = pending_64_bit_constants_[i];
DCHECK(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment.
Instr instr = instr_at(rinfo.pc());
Instr instr = instr_at(entry.position());
// Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
DCHECK((IsVldrDPcImmediateOffset(instr) &&
GetVldrDRegisterImmediateOffset(instr) == 0));
int delta = pc_ - rinfo.pc() - kPcLoadDelta;
int delta = pc_offset() - entry.position() - kPcLoadDelta;
DCHECK(is_uint10(delta));
bool found = false;
uint64_t value = rinfo.raw_data64();
uint64_t value = entry.value64();
for (int j = 0; j < i; j++) {
RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
if (value == rinfo2.raw_data64()) {
ConstantPoolEntry& entry2 = pending_64_bit_constants_[j];
if (value == entry2.value64()) {
found = true;
DCHECK(rinfo2.rmode() == RelocInfo::NONE64);
Instr instr2 = instr_at(rinfo2.pc());
Instr instr2 = instr_at(entry2.position());
DCHECK(IsVldrDPcImmediateOffset(instr2));
delta = GetVldrDRegisterImmediateOffset(instr2);
delta += rinfo2.pc() - rinfo.pc();
delta += entry2.position() - entry.position();
break;
}
}
instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
instr_at_put(entry.position(),
SetVldrDRegisterImmediateOffset(instr, delta));
if (!found) {
uint64_t uint_data = rinfo.raw_data64();
emit(uint_data & 0xFFFFFFFF);
emit(uint_data >> 32);
dq(entry.value64());
}
}
// Emit 32-bit constant pool entries.
for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION &&
rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
rinfo.rmode() != RelocInfo::CONST_POOL &&
rinfo.rmode() != RelocInfo::NONE64);
Instr instr = instr_at(rinfo.pc());
for (int i = 0; i < num_pending_32_bit_constants_; i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
Instr instr = instr_at(entry.position());
// 64-bit loads shouldn't get here.
DCHECK(!IsVldrDPcImmediateOffset(instr));
if (IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0) {
int delta = pc_ - rinfo.pc() - kPcLoadDelta;
int delta = pc_offset() - entry.position() - kPcLoadDelta;
DCHECK(is_uint12(delta));
// 0 is the smallest delta:
// ldr rd, [pc, #0]
@ -3848,16 +3855,15 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// data
bool found = false;
if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) {
if (entry.sharing_ok()) {
for (int j = 0; j < i; j++) {
RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
ConstantPoolEntry& entry2 = pending_32_bit_constants_[j];
if ((rinfo2.data() == rinfo.data()) &&
(rinfo2.rmode() == rinfo.rmode())) {
Instr instr2 = instr_at(rinfo2.pc());
if (entry2.value() == entry.value()) {
Instr instr2 = instr_at(entry2.position());
if (IsLdrPcImmediateOffset(instr2)) {
delta = GetLdrRegisterImmediateOffset(instr2);
delta += rinfo2.pc() - rinfo.pc();
delta += entry2.position() - entry.position();
found = true;
break;
}
@ -3865,18 +3871,19 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
}
instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
instr_at_put(entry.position(),
SetLdrRegisterImmediateOffset(instr, delta));
if (!found) {
emit(rinfo.data());
emit(entry.value());
}
} else {
DCHECK(IsMovW(instr));
}
}
num_pending_32_bit_reloc_info_ = 0;
num_pending_64_bit_reloc_info_ = 0;
num_pending_32_bit_constants_ = 0;
num_pending_64_bit_constants_ = 0;
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;
@ -3893,229 +3900,61 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
if (!FLAG_enable_ool_constant_pool) {
return isolate->factory()->empty_constant_pool_array();
}
return constant_pool_builder_.New(isolate);
}
void Assembler::PatchConstantPoolAccessInstruction(
int pc_offset, int offset, ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
DCHECK(FLAG_enable_embedded_constant_pool);
Address pc = buffer_ + pc_offset;
void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
constant_pool_builder_.Populate(this, constant_pool);
}
ConstantPoolBuilder::ConstantPoolBuilder()
: entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {}
bool ConstantPoolBuilder::IsEmpty() {
return entries_.size() == 0;
}
ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
RelocInfo::Mode rmode) {
if (rmode == RelocInfo::NONE64) {
return ConstantPoolArray::INT64;
} else if (!RelocInfo::IsGCRelocMode(rmode)) {
return ConstantPoolArray::INT32;
} else if (RelocInfo::IsCodeTarget(rmode)) {
return ConstantPoolArray::CODE_PTR;
} else {
DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
return ConstantPoolArray::HEAP_PTR;
}
}
ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
Assembler* assm, const RelocInfo& rinfo) {
RelocInfo::Mode rmode = rinfo.rmode();
DCHECK(rmode != RelocInfo::COMMENT &&
rmode != RelocInfo::POSITION &&
rmode != RelocInfo::STATEMENT_POSITION &&
rmode != RelocInfo::CONST_POOL);
// Try to merge entries which won't be patched.
int merged_index = -1;
ConstantPoolArray::LayoutSection entry_section = current_section_;
if (RelocInfo::IsNone(rmode) ||
(!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
size_t i;
std::vector<ConstantPoolEntry>::const_iterator it;
for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
// Merge with found entry.
merged_index = i;
entry_section = entries_[i].section_;
break;
}
}
}
DCHECK(entry_section <= current_section_);
entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
if (merged_index == -1) {
// Not merged, so update the appropriate count.
number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
}
// Check if we still have room for another entry in the small section
// given Arm's ldr and vldr immediate offset range.
if (current_section_ == ConstantPoolArray::SMALL_SECTION &&
!(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) &&
is_uint10(ConstantPoolArray::MaxInt64Offset(
small_entries()->count_of(ConstantPoolArray::INT64))))) {
current_section_ = ConstantPoolArray::EXTENDED_SECTION;
}
return entry_section;
}
void ConstantPoolBuilder::Relocate(int pc_delta) {
for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
entry != entries_.end(); entry++) {
DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
}
}
Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
if (IsEmpty()) {
return isolate->factory()->empty_constant_pool_array();
} else if (extended_entries()->is_empty()) {
return isolate->factory()->NewConstantPoolArray(*small_entries());
} else {
DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
return isolate->factory()->NewExtendedConstantPoolArray(
*small_entries(), *extended_entries());
}
}
void ConstantPoolBuilder::Populate(Assembler* assm,
ConstantPoolArray* constant_pool) {
DCHECK_EQ(extended_entries()->is_empty(),
!constant_pool->is_extended_layout());
DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
constant_pool, ConstantPoolArray::SMALL_SECTION)));
if (constant_pool->is_extended_layout()) {
DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
}
// Set up initial offsets.
int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
[ConstantPoolArray::NUMBER_OF_TYPES];
for (int section = 0; section <= constant_pool->final_section(); section++) {
int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
? small_entries()->total_count()
: 0;
for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
if (number_of_entries_[section].count_of(type) != 0) {
offsets[section][type] = constant_pool->OffsetOfElementAt(
number_of_entries_[section].base_of(type) + section_start);
}
}
}
for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
entry != entries_.end(); entry++) {
RelocInfo rinfo = entry->rinfo_;
RelocInfo::Mode rmode = entry->rinfo_.rmode();
ConstantPoolArray::Type type = GetConstantPoolType(rmode);
// Update constant pool if necessary and get the entry's offset.
int offset;
if (entry->merged_index_ == -1) {
offset = offsets[entry->section_][type];
offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
if (type == ConstantPoolArray::INT64) {
constant_pool->set_at_offset(offset, rinfo.data64());
} else if (type == ConstantPoolArray::INT32) {
constant_pool->set_at_offset(offset,
static_cast<int32_t>(rinfo.data()));
} else if (type == ConstantPoolArray::CODE_PTR) {
constant_pool->set_at_offset(offset,
reinterpret_cast<Address>(rinfo.data()));
} else {
DCHECK(type == ConstantPoolArray::HEAP_PTR);
constant_pool->set_at_offset(offset,
reinterpret_cast<Object*>(rinfo.data()));
}
offset -= kHeapObjectTag;
entry->merged_index_ = offset; // Stash offset for merged entries.
// Patch vldr/ldr instruction with correct offset.
Instr instr = instr_at(pc);
if (access == ConstantPoolEntry::OVERFLOWED) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
Instr next_instr = instr_at(pc + kInstrSize);
DCHECK((IsMovW(instr) && Instruction::ImmedMovwMovtValue(instr) == 0));
DCHECK((IsMovT(next_instr) &&
Instruction::ImmedMovwMovtValue(next_instr) == 0));
instr_at_put(pc, PatchMovwImmediate(instr, offset & 0xffff));
instr_at_put(pc + kInstrSize,
PatchMovwImmediate(next_instr, offset >> 16));
} else {
DCHECK(entry->merged_index_ < (entry - entries_.begin()));
offset = entries_[entry->merged_index_].merged_index_;
}
// Patch vldr/ldr instruction with correct offset.
Instr instr = assm->instr_at(rinfo.pc());
if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
DCHECK((Assembler::IsMovW(instr) &&
Instruction::ImmedMovwMovtValue(instr) == 0));
DCHECK((Assembler::IsMovT(next_instr) &&
Instruction::ImmedMovwMovtValue(next_instr) == 0));
assm->instr_at_put(
rinfo.pc(), Assembler::PatchMovwImmediate(instr, offset & 0xffff));
assm->instr_at_put(
rinfo.pc() + Assembler::kInstrSize,
Assembler::PatchMovwImmediate(next_instr, offset >> 16));
} else {
// Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
Instr instr_2 = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
Instr instr_3 = assm->instr_at(rinfo.pc() + 2 * Assembler::kInstrSize);
Instr instr_4 = assm->instr_at(rinfo.pc() + 3 * Assembler::kInstrSize);
DCHECK((Assembler::IsMovImmed(instr) &&
Instruction::Immed8Value(instr) == 0));
DCHECK((Assembler::IsOrrImmed(instr_2) &&
Instruction::Immed8Value(instr_2) == 0) &&
Assembler::GetRn(instr_2).is(Assembler::GetRd(instr_2)));
DCHECK((Assembler::IsOrrImmed(instr_3) &&
Instruction::Immed8Value(instr_3) == 0) &&
Assembler::GetRn(instr_3).is(Assembler::GetRd(instr_3)));
DCHECK((Assembler::IsOrrImmed(instr_4) &&
Instruction::Immed8Value(instr_4) == 0) &&
Assembler::GetRn(instr_4).is(Assembler::GetRd(instr_4)));
assm->instr_at_put(
rinfo.pc(), Assembler::PatchShiftImm(instr, (offset & kImm8Mask)));
assm->instr_at_put(
rinfo.pc() + Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
assm->instr_at_put(
rinfo.pc() + 2 * Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
assm->instr_at_put(
rinfo.pc() + 3 * Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
}
} else if (type == ConstantPoolArray::INT64) {
// Instruction to patch must be 'vldr rd, [pp, #0]'.
DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) &&
Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
DCHECK(is_uint10(offset));
assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset(
instr, offset));
} else {
// Instruction to patch must be 'ldr rd, [pp, #0]'.
DCHECK((Assembler::IsLdrPpImmediateOffset(instr) &&
Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
DCHECK(is_uint12(offset));
assm->instr_at_put(
rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset));
// Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
Instr instr_2 = instr_at(pc + kInstrSize);
Instr instr_3 = instr_at(pc + 2 * kInstrSize);
Instr instr_4 = instr_at(pc + 3 * kInstrSize);
DCHECK((IsMovImmed(instr) && Instruction::Immed8Value(instr) == 0));
DCHECK((IsOrrImmed(instr_2) && Instruction::Immed8Value(instr_2) == 0) &&
GetRn(instr_2).is(GetRd(instr_2)));
DCHECK((IsOrrImmed(instr_3) && Instruction::Immed8Value(instr_3) == 0) &&
GetRn(instr_3).is(GetRd(instr_3)));
DCHECK((IsOrrImmed(instr_4) && Instruction::Immed8Value(instr_4) == 0) &&
GetRn(instr_4).is(GetRd(instr_4)));
instr_at_put(pc, PatchShiftImm(instr, (offset & kImm8Mask)));
instr_at_put(pc + kInstrSize,
PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
instr_at_put(pc + 2 * kInstrSize,
PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
instr_at_put(pc + 3 * kInstrSize,
PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
}
} else if (type == ConstantPoolEntry::DOUBLE) {
// Instruction to patch must be 'vldr rd, [pp, #0]'.
DCHECK((IsVldrDPpImmediateOffset(instr) &&
GetVldrDRegisterImmediateOffset(instr) == 0));
DCHECK(is_uint10(offset));
instr_at_put(pc, SetVldrDRegisterImmediateOffset(instr, offset));
} else {
// Instruction to patch must be 'ldr rd, [pp, #0]'.
DCHECK((IsLdrPpImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0));
DCHECK(is_uint12(offset));
instr_at_put(pc, SetLdrRegisterImmediateOffset(instr, offset));
}
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

View File

@ -94,7 +94,7 @@ const int kRegister_pc_Code = 15;
struct Register {
static const int kNumRegisters = 16;
static const int kMaxNumAllocatableRegisters =
FLAG_enable_ool_constant_pool ? 8 : 9;
FLAG_enable_embedded_constant_pool ? 8 : 9;
static const int kSizeInBytes = 4;
inline static int NumAllocatableRegisters();
@ -122,7 +122,7 @@ struct Register {
"r7",
"r8",
};
if (FLAG_enable_ool_constant_pool && (index >= 7)) {
if (FLAG_enable_embedded_constant_pool && (index >= 7)) {
return names[index + 1];
}
return names[index];
@ -164,7 +164,7 @@ const Register r5 = { kRegister_r5_Code };
const Register r6 = { kRegister_r6_Code };
// Used as context register.
const Register r7 = {kRegister_r7_Code};
// Used as constant pool pointer register if FLAG_enable_ool_constant_pool.
// Used as constant pool pointer register if FLAG_enable_embedded_constant_pool.
const Register r8 = { kRegister_r8_Code };
// Used as lithium codegen scratch register.
const Register r9 = { kRegister_r9_Code };
@ -651,52 +651,6 @@ class NeonListOperand BASE_EMBEDDED {
};
// Class used to build a constant pool.
class ConstantPoolBuilder BASE_EMBEDDED {
public:
ConstantPoolBuilder();
ConstantPoolArray::LayoutSection AddEntry(Assembler* assm,
const RelocInfo& rinfo);
void Relocate(int pc_delta);
bool IsEmpty();
Handle<ConstantPoolArray> New(Isolate* isolate);
void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
inline ConstantPoolArray::LayoutSection current_section() const {
return current_section_;
}
inline ConstantPoolArray::NumberOfEntries* number_of_entries(
ConstantPoolArray::LayoutSection section) {
return &number_of_entries_[section];
}
inline ConstantPoolArray::NumberOfEntries* small_entries() {
return number_of_entries(ConstantPoolArray::SMALL_SECTION);
}
inline ConstantPoolArray::NumberOfEntries* extended_entries() {
return number_of_entries(ConstantPoolArray::EXTENDED_SECTION);
}
private:
struct ConstantPoolEntry {
ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section,
int merged_index)
: rinfo_(rinfo), section_(section), merged_index_(merged_index) {}
RelocInfo rinfo_;
ConstantPoolArray::LayoutSection section_;
int merged_index_;
};
ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode);
std::vector<ConstantPoolEntry> entries_;
ConstantPoolArray::LayoutSection current_section_;
ConstantPoolArray::NumberOfEntries number_of_entries_[2];
};
struct VmovIndex {
unsigned char index;
};
@ -754,19 +708,16 @@ class Assembler : public AssemblerBase {
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address constant_pool_entry_address(
Address pc, ConstantPoolArray* constant_pool));
INLINE(static Address constant_pool_entry_address(Address pc,
Address constant_pool));
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc,
ConstantPoolArray* constant_pool));
INLINE(static void set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(Address pc,
@ -774,7 +725,7 @@ class Assembler : public AssemblerBase {
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED)) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
}
@ -841,6 +792,9 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
// Insert the smallest number of zero bytes possible to align the pc offset
// to a mulitple of m. m must be a power of 2 (>= 2).
void DataAlign(int m);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@ -1450,11 +1404,13 @@ class Assembler : public AssemblerBase {
void RecordConstPool(int size);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables. The constant pool should be
// emitted before any use of db and dd to ensure that constant pools
// for inline tables, e.g., jump-tables. CheckConstantPool() should be
// called before any use of db/dd/dq/dp to ensure that constant pools
// are not emitted as part of the tables generated.
void db(uint8_t data);
void dd(uint32_t data);
void dq(uint64_t data);
void dp(uintptr_t data) { dd(data); }
// Emits the address of the code stub's first instruction.
void emit_code_stub_address(Code* stub);
@ -1526,8 +1482,8 @@ class Assembler : public AssemblerBase {
static const int kMaxDistToIntPool = 4*KB;
static const int kMaxDistToFPPool = 1*KB;
// All relocations could be integer, it therefore acts as the limit.
static const int kMaxNumPending32RelocInfo = kMaxDistToIntPool/kInstrSize;
static const int kMaxNumPending64RelocInfo = kMaxDistToFPPool/kInstrSize;
static const int kMaxNumPending32Constants = kMaxDistToIntPool / kInstrSize;
static const int kMaxNumPending64Constants = kMaxDistToFPPool / kInstrSize;
// Postpone the generation of the constant pool for the specified number of
// instructions.
@ -1536,17 +1492,19 @@ class Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
// Allocate a constant pool of the correct size for the generated code.
Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
// Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool);
bool use_extended_constant_pool() const {
return constant_pool_builder_.current_section() ==
ConstantPoolArray::EXTENDED_SECTION;
int EmitEmbeddedConstantPool() {
DCHECK(FLAG_enable_embedded_constant_pool);
return constant_pool_builder_.Emit(this);
}
bool ConstantPoolAccessIsInOverflow() const {
return constant_pool_builder_.NextAccess(ConstantPoolEntry::INTPTR) ==
ConstantPoolEntry::OVERFLOWED;
}
void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type);
protected:
// Relocation for a type-recording IC has the AST id added to it. This
@ -1581,10 +1539,10 @@ class Assembler : public AssemblerBase {
// Max pool start (if we need a jump and an alignment).
int start = pc_offset() + kInstrSize + 2 * kPointerSize;
// Check the constant pool hasn't been blocked for too long.
DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
(start + num_pending_64_bit_reloc_info_ * kDoubleSize <
DCHECK((num_pending_32_bit_constants_ == 0) ||
(start + num_pending_64_bit_constants_ * kDoubleSize <
(first_const_pool_32_use_ + kMaxDistToIntPool)));
DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
DCHECK((num_pending_64_bit_constants_ == 0) ||
(start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
#endif
// Two cases:
@ -1643,20 +1601,20 @@ class Assembler : public AssemblerBase {
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
// Relocation info records are also used during code generation as temporary
// ConstantPoolEntry records are used during code generation as temporary
// containers for constants and code target addresses until they are emitted
// to the constant pool. These pending relocation info records are temporarily
// stored in a separate buffer until a constant pool is emitted.
// to the constant pool. These records are temporarily stored in a separate
// buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
// The buffers of pending relocation info.
RelocInfo pending_32_bit_reloc_info_[kMaxNumPending32RelocInfo];
RelocInfo pending_64_bit_reloc_info_[kMaxNumPending64RelocInfo];
// Number of pending reloc info entries in the 32 bits buffer.
int num_pending_32_bit_reloc_info_;
// Number of pending reloc info entries in the 64 bits buffer.
int num_pending_64_bit_reloc_info_;
// The buffers of pending constant pool entries.
ConstantPoolEntry pending_32_bit_constants_[kMaxNumPending32Constants];
ConstantPoolEntry pending_64_bit_constants_[kMaxNumPending64Constants];
// Number of pending constant pool entries in the 32 bits buffer.
int num_pending_32_bit_constants_;
// Number of pending constant pool entries in the 64 bits buffer.
int num_pending_64_bit_constants_;
ConstantPoolBuilder constant_pool_builder_;
@ -1685,15 +1643,12 @@ class Assembler : public AssemblerBase {
void bind_to(Label* L, int pos);
void next(Label* L);
enum UseConstantPoolMode {
USE_CONSTANT_POOL,
DONT_USE_CONSTANT_POOL
};
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
void RecordRelocInfo(const RelocInfo& rinfo);
ConstantPoolArray::LayoutSection ConstantPoolAddEntry(const RelocInfo& rinfo);
ConstantPoolEntry::Access ConstantPoolAddEntry(int position,
RelocInfo::Mode rmode,
intptr_t value);
ConstantPoolEntry::Access ConstantPoolAddEntry(int position, double value);
friend class RelocInfo;
friend class CodePatcher;

View File

@ -343,6 +343,7 @@ static void Generate_Runtime_NewObject(MacroAssembler* masm,
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@ -367,10 +368,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(r2);
}
// Preserve the two incoming parameters on the stack.
// Preserve the incoming parameters on the stack.
__ SmiTag(r0);
__ push(r0); // Smi-tagged arguments count.
__ push(r1); // Constructor function.
__ push(r0);
__ push(r1);
if (use_new_target) {
__ push(r3);
}
Label rt_call, allocated, normal_new, count_incremented;
__ cmp(r1, r3);
@ -446,7 +450,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map and properties and elements are set to empty fixed array.
// r1: constructor function
// r2: initial map
// r3: object size (not including memento if create_memento)
// r3: object size (including memento if create_memento)
// r4: JSObject (not tagged)
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
@ -520,7 +524,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r4, r4, Operand(kHeapObjectTag));
// Check if a non-empty properties array is needed. Continue with
// allocated object if not fall through to runtime call if it is.
// allocated object if not; allocate and initialize a FixedArray if yes.
// r1: constructor function
// r4: JSObject
// r5: start of next object (not tagged)
@ -575,15 +579,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r5: FixedArray (not tagged)
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry;
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(&entry);
__ bind(&loop);
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(r2, r6);
__ b(lt, &loop);
}
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ InitializeFieldsWithFiller(r2, r6, r0);
// Store the initialized FixedArray into the properties field of
// the JSObject
@ -617,7 +614,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&allocated);
if (create_memento) {
__ ldr(r2, MemOperand(sp, kPointerSize * 2));
int offset = (use_new_target ? 3 : 2) * kPointerSize;
__ ldr(r2, MemOperand(sp, offset));
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r5);
__ b(eq, &count_incremented);
@ -631,23 +629,27 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&count_incremented);
}
__ push(r4);
__ push(r4);
// Restore the parameters.
if (use_new_target) {
__ pop(r3);
}
__ pop(r1);
// Reload the number of arguments and the constructor from the stack.
// sp[0]: receiver
// sp[1]: receiver
// sp[2]: constructor function
// sp[3]: number of arguments (smi-tagged)
__ ldr(r1, MemOperand(sp, 2 * kPointerSize));
__ ldr(r3, MemOperand(sp, 3 * kPointerSize));
// Retrieve smi-tagged arguments count from the stack.
__ ldr(r0, MemOperand(sp));
__ SmiUntag(r0);
// Push new.target onto the construct frame. This is stored just below the
// receiver on the stack.
if (use_new_target) {
__ push(r3);
}
__ push(r4);
__ push(r4);
// Set up pointer to last argument.
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Set up number of arguments for function call below
__ SmiUntag(r0, r3);
// Copy arguments and receiver to the expression stack.
// r0: number of arguments
// r1: constructor function
@ -655,9 +657,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of arguments (smi-tagged)
// sp[0]: receiver
// sp[1]: receiver
// sp[2]: constructor function
// sp[3]: number of arguments (smi-tagged)
// sp[2]: new.target (if used)
// sp[2/3]: number of arguments (smi-tagged)
Label loop, entry;
__ SmiTag(r3, r0);
__ b(&entry);
__ bind(&loop);
__ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
@ -680,15 +683,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
if (!is_api_function) {
// TODO(arv): Remove the "!use_new_target" before supporting optimization
// of functions that reference new.target
if (!is_api_function && !use_new_target) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r0: result
// sp[0]: receiver
// sp[1]: constructor function
// sp[2]: number of arguments (smi-tagged)
// sp[1]: new.target (if used)
// sp[1/2]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
@ -699,8 +704,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// r0: result
// sp[0]: receiver (newly allocated object)
// sp[1]: constructor function
// sp[2]: number of arguments (smi-tagged)
// sp[1]: new.target (if used)
// sp[1/2]: number of arguments (smi-tagged)
__ JumpIfSmi(r0, &use_receiver);
// If the type of the result (stored in its map) is less than
@ -718,9 +723,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&exit);
// r0: result
// sp[0]: receiver (newly allocated object)
// sp[1]: constructor function
// sp[2]: number of arguments (smi-tagged)
__ ldr(r1, MemOperand(sp, 2 * kPointerSize));
// sp[1]: new.target (if used)
// sp[1/2]: number of arguments (smi-tagged)
int offset = (use_new_target ? 2 : 1) * kPointerSize;
__ ldr(r1, MemOperand(sp, offset));
// Leave construct frame.
}
@ -733,12 +739,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false);
Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
}
@ -789,8 +800,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ sub(r4, r4, Operand(2), SetCC);
__ b(ge, &loop);
__ add(r0, r0, Operand(1));
// Handle step in.
Label skip_step_in;
ExternalReference debug_step_in_fp =
@ -819,7 +828,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// r0: result
// sp[0]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ ldr(r1, MemOperand(sp, 0));
// Get arguments count, skipping over new.target.
__ ldr(r1, MemOperand(sp, kPointerSize));
// Leave construct frame.
}
@ -874,7 +884,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r2: receiver
// r3: argc
// r4: argv
// r5-r6, r8 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered
// r5-r6, r8 (if !FLAG_enable_embedded_constant_pool) and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
@ -922,7 +932,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
if (!FLAG_enable_ool_constant_pool) {
if (!FLAG_enable_embedded_constant_pool) {
__ mov(r8, Operand(r4));
}
if (kR9Available == 1) {
@ -1166,8 +1176,10 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
if (FLAG_enable_ool_constant_pool) {
__ ldr(pp, FieldMemOperand(r0, Code::kConstantPoolOffset));
__ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
if (FLAG_enable_embedded_constant_pool) {
__ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r0);
}
// Load the OSR entrypoint offset from the deoptimization data.
@ -1175,10 +1187,8 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
DeoptimizationInputData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ add(r0, r0, Operand::SmiUntag(r1));
__ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Compute the target address = code start + osr_offset
__ add(lr, r0, Operand::SmiUntag(r1));
// And "return" to the OSR entry point of the function.
__ Ret();
@ -1392,6 +1402,8 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
Label entry, loop;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
Register slot = LoadDescriptor::SlotRegister();
Register vector = LoadWithVectorDescriptor::VectorRegister();
__ ldr(key, MemOperand(fp, indexOffset));
__ b(&entry);
@ -1401,7 +1413,14 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic();
FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
Handle<TypeFeedbackVector> feedback_vector =
masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
__ mov(slot, Operand(Smi::FromInt(index)));
__ Move(vector, feedback_vector);
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
// Push the nth argument.
@ -1649,8 +1668,8 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
(FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
(FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
__ add(fp, sp,
Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
}
@ -1722,6 +1741,27 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected
__ bind(&too_few);
// If the function is strong we need to throw an error.
Label no_strong_error;
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r5, Operand(1 << (SharedFunctionInfo::kStrongModeFunction +
kSmiTagSize)));
__ b(eq, &no_strong_error);
// What we really care about is the required number of arguments.
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kLengthOffset));
__ cmp(r0, Operand::SmiUntag(r4));
__ b(ge, &no_strong_error);
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
__ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
}
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
// Calculate copy start address into r0 and copy end address is fp.
@ -1792,6 +1832,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

View File

@ -93,9 +93,8 @@ void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cond);
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cond, Strength strength);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@ -113,15 +112,15 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
int param_count = descriptor.GetEnvironmentParameterCount();
int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
r0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
r0.is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments
for (int i = 0; i < param_count; ++i) {
__ push(descriptor.GetEnvironmentParameterRegister(i));
__ push(descriptor.GetRegisterParameter(i));
}
__ CallExternalReference(miss, param_count);
}
@ -238,9 +237,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Handle the case where the lhs and rhs are the same object.
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cond) {
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cond, Strength strength) {
Label not_identical;
Label heap_number, return_equal;
__ cmp(r0, r1);
@ -251,10 +249,20 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
if (cond == lt || cond == gt) {
// Call runtime on identical JSObjects.
__ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
__ cmp(r4, Operand(HEAP_NUMBER_TYPE));
__ b(eq, &return_equal);
__ tst(r4, Operand(kIsNotStringMask));
__ b(ne, slow);
}
} else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number);
@ -262,8 +270,16 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
if (cond != eq) {
__ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics,
// since we need to throw a TypeError. Smis and heap numbers have
// already been ruled out.
__ tst(r4, Operand(kIsNotStringMask));
__ b(ne, slow);
}
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@ -561,7 +577,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
EmitIdenticalObjectComparison(masm, &slow, cc);
EmitIdenticalObjectComparison(masm, &slow, cc, strength());
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@ -663,7 +679,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
native =
is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result
if (cc == lt || cc == le) {
ncr = GREATER;
@ -1084,10 +1101,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ ldr(r1, MemOperand(r1));
__ mov(r2, Operand(pending_handler_offset_address));
__ ldr(r2, MemOperand(r2));
if (FLAG_enable_ool_constant_pool) {
__ ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset));
__ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
if (FLAG_enable_embedded_constant_pool) {
__ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r1);
}
__ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ add(pc, r1, r2);
}
@ -1132,8 +1149,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r3: argc
// r4: argv
int marker = type();
if (FLAG_enable_ool_constant_pool) {
__ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array()));
if (FLAG_enable_embedded_constant_pool) {
__ mov(r8, Operand::Zero());
}
__ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
@ -1142,8 +1159,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ldr(r5, MemOperand(r5));
__ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
__ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
(FLAG_enable_ool_constant_pool ? r8.bit() : 0) |
ip.bit());
(FLAG_enable_embedded_constant_pool ? r8.bit() : 0) |
ip.bit());
// Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@ -1331,11 +1348,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ ldr(map_load_offset, MemOperand(map_load_offset));
__ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset));
__ mov(r8, map);
__ mov(scratch, map);
// |map_load_offset| points at the beginning of the cell. Calculate the
// field containing the map.
__ add(function, map_load_offset, Operand(Cell::kValueOffset - 1));
__ RecordWriteField(map_load_offset, Cell::kValueOffset, r8, function,
__ RecordWriteField(map_load_offset, Cell::kValueOffset, scratch, function,
kLRHasNotBeenSaved, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
}
@ -1473,9 +1490,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler.
DCHECK(!FLAG_vector_ics ||
!AreAliased(r4, r5, VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister()));
DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::VectorRegister(),
LoadWithVectorDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
r5, &miss);
@ -1494,9 +1510,8 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register scratch = r5;
Register result = r0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
DCHECK(!FLAG_vector_ics ||
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
result.is(VectorLoadICDescriptor::SlotRegister())));
DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
result.is(LoadWithVectorDescriptor::SlotRegister()));
// StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict
@ -1520,7 +1535,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
CHECK(!has_new_target());
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
@ -1578,8 +1592,6 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[4] : receiver displacement
// sp[8] : function
CHECK(!has_new_target());
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@ -1608,8 +1620,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r6 : allocated object (tagged)
// r9 : mapped parameter count (tagged)
CHECK(!has_new_target());
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
// r1 = parameter count (tagged)
@ -1850,14 +1860,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
if (has_new_target()) {
__ cmp(r1, Operand(Smi::FromInt(0)));
Label skip_decrement;
__ b(eq, &skip_decrement);
// Subtract 1 from smi-tagged arguments count.
__ sub(r1, r1, Operand(2));
__ bind(&skip_decrement);
}
__ str(r1, MemOperand(sp, 0));
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
@ -1939,9 +1941,10 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Stack layout on entry.
// sp[0] : index of rest parameter
// sp[4] : number of parameters
// sp[8] : receiver displacement
// sp[0] : language mode
// sp[4] : index of rest parameter
// sp[8] : number of parameters
// sp[12] : receiver displacement
Label runtime;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@ -1951,13 +1954,13 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ str(r1, MemOperand(sp, 1 * kPointerSize));
__ str(r1, MemOperand(sp, 2 * kPointerSize));
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
__ str(r3, MemOperand(sp, 2 * kPointerSize));
__ str(r3, MemOperand(sp, 3 * kPointerSize));
__ bind(&runtime);
__ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
__ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
}
@ -2418,7 +2421,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// this position in a symbol (see static asserts in type-feedback-vector.h).
Label check_allocation_site;
Register feedback_map = r5;
Register weak_value = r8;
Register weak_value = r6;
__ ldr(weak_value, FieldMemOperand(r4, WeakCell::kValueOffset));
__ cmp(r1, weak_value);
__ b(eq, &done);
@ -2703,6 +2706,13 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &miss);
// Increment the call count for monomorphic function calls.
__ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
__ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
__ ldr(r3, FieldMemOperand(r2, 0));
__ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ str(r3, FieldMemOperand(r2, 0));
__ mov(r2, r4);
__ mov(r3, r1);
ArrayConstructorStub stub(masm->isolate(), arg_count());
@ -2762,6 +2772,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(r1, &extra_checks_or_miss);
// Increment the call count for monomorphic function calls.
__ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
__ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
__ ldr(r3, FieldMemOperand(r2, 0));
__ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ str(r3, FieldMemOperand(r2, 0));
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@ -2837,6 +2854,11 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ add(r4, r4, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(r2, with_types_offset));
// Initialize the call counter.
__ Move(r5, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
// Store the function. Use a stub since we need a frame for allocation.
// r2 - vector
// r3 - slot
@ -2937,9 +2959,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
__ Push(VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister(), object_, index_);
if (embed_mode == PART_OF_IC_HANDLER) {
__ Push(LoadWithVectorDescriptor::VectorRegister(),
LoadWithVectorDescriptor::SlotRegister(), object_, index_);
} else {
// index_ is consumed by runtime conversion function.
__ Push(object_, index_);
@ -2954,9 +2976,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Move(index_, r0);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
__ Pop(VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister(), object_);
if (embed_mode == PART_OF_IC_HANDLER) {
__ Pop(LoadWithVectorDescriptor::VectorRegister(),
LoadWithVectorDescriptor::SlotRegister(), object_);
} else {
__ pop(object_);
}
@ -3567,7 +3589,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@ -4348,15 +4370,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
VectorRawLoadStub stub(isolate(), state());
EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
VectorRawKeyedLoadStub stub(isolate());
EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@ -4375,12 +4397,10 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
void VectorRawLoadStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@ -4474,14 +4494,14 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
}
void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r1
Register name = VectorLoadICDescriptor::NameRegister(); // r2
Register vector = VectorLoadICDescriptor::VectorRegister(); // r3
Register slot = VectorLoadICDescriptor::SlotRegister(); // r0
void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r1
Register name = LoadWithVectorDescriptor::NameRegister(); // r2
Register vector = LoadWithVectorDescriptor::VectorRegister(); // r3
Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
Register feedback = r4;
Register receiver_map = r5;
Register scratch1 = r8;
Register scratch1 = r6;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
@ -4521,24 +4541,24 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r1
Register key = VectorLoadICDescriptor::NameRegister(); // r2
Register vector = VectorLoadICDescriptor::VectorRegister(); // r3
Register slot = VectorLoadICDescriptor::SlotRegister(); // r0
void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r1
Register key = LoadWithVectorDescriptor::NameRegister(); // r2
Register vector = LoadWithVectorDescriptor::VectorRegister(); // r3
Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
Register feedback = r4;
Register receiver_map = r5;
Register scratch1 = r8;
Register scratch1 = r6;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
@ -4568,7 +4588,7 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ b(ne, &try_poly_name);
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
@ -4592,6 +4612,58 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label miss;
// TODO(mvstanton): Implement.
__ bind(&miss);
StoreIC::GenerateMiss(masm);
}
void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label miss;
// TODO(mvstanton): Implement.
__ bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@ -5297,6 +5369,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

View File

@ -946,6 +946,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

View File

@ -126,6 +126,7 @@ int Registers::Number(const char* name) {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

View File

@ -42,6 +42,11 @@ const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
const int kPCRegister = 15;
const int kNoRegister = -1;
// Used in embedded constant pool builder - max reach in bits for
// various load instructions (unsigned)
const int kLdrMaxReachBits = 12;
const int kVldrMaxReachBits = 10;
// -----------------------------------------------------------------------------
// Conditions.

View File

@ -77,6 +77,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
#endif
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

View File

@ -144,10 +144,8 @@ void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
RegList regs = receiver.bit() | name.bit();
if (FLAG_vector_ics) {
regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
}
Register slot = LoadDescriptor::SlotRegister();
RegList regs = receiver.bit() | name.bit() | slot.bit();
Generate_DebugBreakCallHelper(masm, regs, 0);
}
@ -157,8 +155,11 @@ void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0);
RegList regs = receiver.bit() | name.bit() | value.bit();
if (FLAG_vector_stores) {
regs |= VectorStoreICDescriptor::SlotRegister().bit();
}
Generate_DebugBreakCallHelper(masm, regs, 0);
}
@ -170,11 +171,7 @@ void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-arm.cc).
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0);
GenerateStoreICDebugBreak(masm);
}
@ -267,7 +264,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
StandardFrameConstants::kConstantPoolOffset - kPointerSize));
// Pop return address, frame and constant pool pointer (if
// FLAG_enable_ool_constant_pool).
// FLAG_enable_embedded_constant_pool).
__ LeaveFrame(StackFrame::INTERNAL);
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
@ -289,6 +286,7 @@ const bool LiveEdit::kFrameDropperSupported = true;
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

View File

@ -353,11 +353,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
DCHECK(FLAG_enable_ool_constant_pool);
DCHECK(FLAG_enable_embedded_constant_pool);
SetFrameSlot(offset, value);
}
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8

View File

@ -1904,8 +1904,8 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
//------------------------------------------------------------------------------

View File

@ -21,7 +21,7 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
DCHECK(FLAG_enable_ool_constant_pool);
DCHECK(FLAG_enable_embedded_constant_pool);
return pp;
}
@ -29,18 +29,12 @@ Register JavaScriptFrame::constant_pool_pointer_register() {
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
DCHECK(FLAG_enable_ool_constant_pool);
DCHECK(FLAG_enable_embedded_constant_pool);
return pp;
}
Object*& ExitFrame::constant_pool_slot() const {
DCHECK(FLAG_enable_ool_constant_pool);
const int offset = ExitFrameConstants::kConstantPoolOffset;
return Memory::Object_at(fp() + offset);
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

View File

@ -66,11 +66,23 @@ const int kNumDoubleCalleeSaved = 8;
// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
const int kNumSafepointRegisters = 16;
// The embedded constant pool pointer (r8/pp) is not included in the safepoint
// since it is not tagged. This register is preserved in the stack frame where
// its value will be updated if GC code movement occurs. Including it in the
// safepoint (where it will not be relocated) would cause a stale value to be
// restored.
const RegList kConstantPointerRegMask =
FLAG_enable_embedded_constant_pool ? (1 << 8) : 0;
const int kNumConstantPoolPointerReg =
FLAG_enable_embedded_constant_pool ? 1 : 0;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
const RegList kSafepointSavedRegisters =
kJSCallerSaved | (kCalleeSaved & ~kConstantPointerRegMask);
const int kNumSafepointSavedRegisters =
kNumJSCallerSaved + kNumCalleeSaved - kNumConstantPoolPointerReg;
// ----------------------------------------------------
@ -84,11 +96,11 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
static const int kFrameSize = FLAG_enable_ool_constant_pool ?
3 * kPointerSize : 2 * kPointerSize;
static const int kFrameSize =
FLAG_enable_embedded_constant_pool ? 3 * kPointerSize : 2 * kPointerSize;
static const int kConstantPoolOffset = FLAG_enable_ool_constant_pool ?
-3 * kPointerSize : 0;
static const int kConstantPoolOffset =
FLAG_enable_embedded_constant_pool ? -3 * kPointerSize : 0;
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
@ -129,13 +141,12 @@ class ArgumentsAdaptorFrameConstants : public AllStatic {
class ConstructFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kImplicitReceiverOffset = -6 * kPointerSize;
static const int kConstructorOffset = -5 * kPointerSize;
static const int kImplicitReceiverOffset = -5 * kPointerSize;
static const int kLengthOffset = -4 * kPointerSize;
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
};

File diff suppressed because it is too large Load Diff

View File

@ -16,12 +16,10 @@ const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return r1; }
const Register LoadDescriptor::NameRegister() { return r2; }
const Register LoadDescriptor::SlotRegister() { return r0; }
const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return r0; }
const Register VectorLoadICDescriptor::VectorRegister() { return r3; }
const Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
const Register StoreDescriptor::ReceiverRegister() { return r1; }
@ -29,6 +27,12 @@ const Register StoreDescriptor::NameRegister() { return r2; }
const Register StoreDescriptor::ValueRegister() { return r0; }
const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r4; }
const Register VectorStoreICDescriptor::VectorRegister() { return r3; }
const Register StoreTransitionDescriptor::MapRegister() { return r3; }
@ -56,109 +60,101 @@ const Register MathPowIntegerDescriptor::exponent() {
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
const Register GrowArrayElementsDescriptor::CapacityRegister() { return r2; }
void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2};
data->Initialize(arraysize(registers), registers, NULL);
}
void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1};
data->Initialize(arraysize(registers), registers, NULL);
}
void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3};
data->Initialize(arraysize(registers), registers, NULL);
}
void FastCloneShallowArrayDescriptor::Initialize(
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3, r2, r1};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowObjectDescriptor::Initialize(
void FastNewContextDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3, r2, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::Initialize(
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r3};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Smi()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r3, r1};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
}
void StoreArrayLiteralElementDescriptor::Initialize(
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3, r0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1};
data->Initialize(arraysize(registers), registers, NULL);
}
void CallFunctionWithFeedbackDescriptor::Initialize(
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r3};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Smi()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r3, r2};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {r3, r2, r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r2, r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r3, r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
@ -166,234 +162,206 @@ void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// vector (Smi)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {cp, r0, r1, r2};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r0, r1, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void RegExpConstructResultDescriptor::Initialize(
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r2, r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void TransitionElementsKindDescriptor::Initialize(
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0, r1};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r0, r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void AllocateHeapNumberDescriptor::Initialize(
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// cp -- context
Register registers[] = {cp};
data->Initialize(arraysize(registers), registers, nullptr);
}
void ArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// register state
// cp -- context
// r0 -- number of arguments
// r1 -- function
// r2 -- allocation site with elements kind
Register registers[] = {cp, r1, r2};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r1, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, r1, r2, r0};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(),
Representation::Tagged(), Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
}
void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {r1, r2, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InternalArrayConstructorConstantArgCountDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// register state
// cp -- context
// r0 -- number of arguments
// r1 -- constructor function
Register registers[] = {cp, r1};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InternalArrayConstructorDescriptor::Initialize(
void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, r1, r0};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void BinaryOpWithAllocationSiteDescriptor::Initialize(
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
void CompareNilDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ToBooleanDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void KeyedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r2, // key
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // key
};
data->Initialize(arraysize(registers), registers, representations,
&noInlineDescriptor);
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void NamedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r2, // name
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // name
};
data->Initialize(arraysize(registers), registers, representations,
&noInlineDescriptor);
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void CallHandlerDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r0, // receiver
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // receiver
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r1, // JSFunction
r0, // actual number of arguments
r2, // expected number of arguments
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // JSFunction
Representation::Integer32(), // actual number of arguments
Representation::Integer32(), // expected number of arguments
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ApiFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r0, // callee
r4, // call_data
r2, // holder
r1, // api_function_address
r3, // actual number of arguments
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // callee
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address
Representation::Integer32(), // actual number of arguments
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ApiAccessorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r0, // callee
r4, // call_data
r2, // holder
r1, // api_function_address
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // callee
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void MathRoundVariantDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r1, // math rounding function
r3, // vector slot id
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
data->InitializePlatformSpecific(arraysize(registers), registers);
}
}
} // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

View File

@ -1092,10 +1092,18 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
// Target
ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) {
LOperand* op =
UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
// Context
LOperand* op = UseFixed(instr->OperandAt(1), cp);
ops.Add(op, zone());
// Other register parameters
for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
i < instr->OperandCount(); i++) {
op =
UseFixed(instr->OperandAt(i),
descriptor.GetRegisterParameter(
i - LCallWithDescriptor::kImplicitRegisterParameterCount));
ops.Add(op, zone());
}
@ -1105,20 +1113,6 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
}
LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
HTailCallThroughMegamorphicCache* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* receiver_register =
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
context, receiver_register, name_register);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1);
@ -1869,7 +1863,7 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), r0);
LDateField* result =
new(zone()) LDateField(object, FixedTemp(r1), instr->index());
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@ -2148,7 +2142,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
new(zone()) LLoadGlobalGeneric(context, global_object, vector);
@ -2197,7 +2191,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@ -2271,7 +2265,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@ -2336,8 +2330,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
return MarkAsCall(
new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
LOperand* slot = NULL;
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
}
LStoreKeyedGeneric* result =
new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
return MarkAsCall(result, instr);
}
@ -2369,6 +2371,21 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
}
LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = Use(instr->object());
LOperand* elements = Use(instr->elements());
LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
LMaybeGrowElements* result = new (zone())
LMaybeGrowElements(context, object, elements, key, current_capacity);
DefineFixed(result, r0);
return AssignPointerMap(AssignEnvironment(result));
}
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
@ -2407,8 +2424,15 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj =
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LOperand* slot = NULL;
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
}
LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
return MarkAsCall(result, instr);
}
@ -2485,7 +2509,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CallInterfaceDescriptor descriptor =
info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor.GetEnvironmentParameterRegister(index);
Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
}
}
@ -2602,7 +2626,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
@ -2671,4 +2695,5 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
return MarkAsCall(DefineFixed(result, cp), instr);
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

View File

@ -117,6 +117,7 @@ class LCodeGen;
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
V(MaybeGrowElements) \
V(ModByConstI) \
V(ModByPowerOf2I) \
V(ModI) \
@ -153,7 +154,6 @@ class LCodeGen;
V(SubI) \
V(RSubI) \
V(TaggedToI) \
V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@ -474,26 +474,6 @@ class LCallStub final : public LTemplateInstruction<1, 1, 0> {
};
class LTailCallThroughMegamorphicCache final
: public LTemplateInstruction<0, 3, 0> {
public:
LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
LOperand* name) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
};
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@ -1196,6 +1176,8 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Strength strength() { return hydrogen()->strength(); }
Token::Value op() const { return hydrogen()->token(); }
};
@ -1567,7 +1549,7 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
LanguageMode language_mode() { return hydrogen()->language_mode(); }
Strength strength() { return hydrogen()->strength(); }
private:
Token::Value op_;
@ -1865,8 +1847,12 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
const ZoneList<LOperand*>& operands, Zone* zone)
: descriptor_(descriptor),
inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
inputs_(descriptor.GetRegisterParameterCount() +
kImplicitRegisterParameterCount,
zone) {
DCHECK(descriptor.GetRegisterParameterCount() +
kImplicitRegisterParameterCount ==
operands.length());
inputs_.AddAll(operands, zone);
}
@ -1876,6 +1862,10 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
// The target and context are passed as implicit parameters that are not
// explicitly listed in the descriptor.
static const int kImplicitRegisterParameterCount = 2;
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@ -2188,17 +2178,22 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
};
class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
temps_[0] = slot;
temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
LOperand* temp_slot() { return temps_[0]; }
LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@ -2247,22 +2242,24 @@ class LStoreKeyed final : public LTemplateInstruction<0, 3, 0> {
};
class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 0> {
class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* obj,
LOperand* key,
LOperand* value) {
LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
LOperand* value, LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = obj;
inputs_[1] = object;
inputs_[2] = key;
inputs_[3] = value;
temps_[0] = slot;
temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
LOperand* temp_slot() { return temps_[0]; }
LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@ -2318,6 +2315,28 @@ class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
};
class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
public:
LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
LOperand* key, LOperand* current_capacity) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = elements;
inputs_[3] = key;
inputs_[4] = current_capacity;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* elements() { return inputs_[2]; }
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {

View File

@ -113,7 +113,7 @@ bool LCodeGen::GeneratePrologue() {
// r1: Callee's JS function.
// cp: Callee's context.
// pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
// pp: Callee's constant pool pointer (if enabled)
// fp: Caller's frame pointer.
// lr: Caller's pc.
@ -121,7 +121,7 @@ bool LCodeGen::GeneratePrologue() {
// global proxy when called as functions (without an explicit receiver
// object).
if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
!info_->is_native()) {
!info_->is_native() && info_->scope()->has_this_declaration()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
@ -197,8 +197,9 @@ bool LCodeGen::GeneratePrologue() {
__ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Variable* var = scope()->parameter(i);
int first_parameter = scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
@ -595,52 +596,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The translation includes one command per value in the environment.
int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
bool has_closure_id = !info()->closure().is_null() &&
!info()->closure().is_identical_to(environment->closure());
int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
break;
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
case JS_GETTER:
DCHECK(translation_size == 1);
DCHECK(height == 0);
translation->BeginGetterStubFrame(closure_id);
break;
case JS_SETTER:
DCHECK(translation_size == 2);
DCHECK(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
case STUB:
translation->BeginCompiledStubFrame();
break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
}
WriteTranslationFrame(environment, translation);
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
AddToTranslation(environment,
translation,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
&object_index,
&dematerialized_index);
AddToTranslation(
environment, translation, value, environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
}
}
@ -960,28 +926,11 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
}
int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
int result = deoptimization_literals_.length();
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
deoptimization_literals_.Add(literal, zone());
return result;
}
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
DCHECK(deoptimization_literals_.length() == 0);
const ZoneList<Handle<JSFunction> >* inlined_closures =
chunk()->inlined_closures();
for (int i = 0, length = inlined_closures->length();
i < length;
i++) {
DefineDeoptimizationLiteral(inlined_closures->at(i));
DCHECK_EQ(0, deoptimization_literals_.length());
for (auto function : chunk()->inlined_functions()) {
DefineDeoptimizationLiteral(function);
}
inlined_function_count_ = deoptimization_literals_.length();
}
@ -1016,10 +965,6 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
// Register pp always contains a pointer to the constant pool.
safepoint.DefinePointerRegister(pp, zone());
}
}
@ -1936,20 +1881,15 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp());
Smi* index = instr->index();
Label runtime, done;
DCHECK(object.is(result));
DCHECK(object.is(r0));
DCHECK(!scratch.is(scratch0()));
DCHECK(!scratch.is(object));
__ SmiTst(object);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
} else {
Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch, Operand(stamp));
@ -2174,8 +2114,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(r0));
DCHECK(ToRegister(instr->result()).is(r0));
Handle<Code> code = CodeFactory::BinaryOpIC(
isolate(), instr->op(), instr->language_mode()).code();
Handle<Code> code =
CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
@ -2611,7 +2551,8 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero());
@ -2885,37 +2826,41 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
int additional_delta = (call_size / Assembler::kInstrSize) + 4;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
PredictableCodeSizeScope predictable(
masm_, (additional_delta + 1) * Assembler::kInstrSize);
// Make sure we don't emit any additional entries in the constant pool before
// the call to ensure that the CallCodeSize() calculated the correct number of
// instructions for the constant pool load.
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
int map_check_delta =
masm_->InstructionsGeneratedSince(map_check) + additional_delta;
int bool_load_delta =
masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
Label before_push_delta;
__ bind(&before_push_delta);
__ BlockConstPoolFor(additional_delta);
// r5 is used to communicate the offset to the location of the map check.
__ mov(r5, Operand(map_check_delta * kPointerSize));
// r6 is used to communicate the offset to the location of the bool load.
__ mov(r6, Operand(bool_load_delta * kPointerSize));
// The mov above can generate one or two instructions. The delta was
// computed for two instructions, so we need to pad here in case of one
// instruction.
while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
__ nop();
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
PredictableCodeSizeScope predictable(
masm_, additional_delta * Assembler::kInstrSize);
// The labels must be already bound since the code has predictabel size up
// to the call instruction.
DCHECK(map_check->is_bound());
DCHECK(bool_load->is_bound());
// Make sure we don't emit any additional entries in the constant pool
// before the call to ensure that the CallCodeSize() calculated the
// correct number of instructions for the constant pool load.
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
int map_check_delta =
masm_->InstructionsGeneratedSince(map_check) + additional_delta;
int bool_load_delta =
masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
Label before_push_delta;
__ bind(&before_push_delta);
__ BlockConstPoolFor(additional_delta);
// r5 is used to communicate the offset to the location of the map check.
__ mov(r5, Operand(map_check_delta * kPointerSize));
// r6 is used to communicate the offset to the location of the bool load.
__ mov(r6, Operand(bool_load_delta * kPointerSize));
// The mov above can generate one or two instructions. The delta was
// computed for two instructions, so we need to pad here in case of one
// instruction.
while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
__ nop();
}
}
CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
}
CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Put the result value (r0) into the result register slot and
@ -2928,7 +2873,8 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero());
@ -2986,10 +2932,9 @@ void LCodeGen::DoReturn(LReturn* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = VectorLoadICDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
Register slot_register = LoadDescriptor::SlotRegister();
DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(r0));
AllowDeferredHandleDereference vector_structure_check;
@ -3002,6 +2947,20 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
template <class T>
void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = ToRegister(instr->temp_slot());
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ mov(slot_register, Operand(Smi::FromInt(index)));
}
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->global_object())
@ -3009,11 +2968,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -3108,12 +3065,11 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in r2.
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_CONTEXTUAL,
instr->hydrogen()->initialization_state()).code();
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@ -3420,9 +3376,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
Handle<Code> ic =
CodeFactory::KeyedLoadICInOptimizedCode(
isolate(), instr->hydrogen()->initialization_state()).code();
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@ -3961,29 +3917,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
}
void LCodeGen::DoTailCallThroughMegamorphicCache(
LTailCallThroughMegamorphicCache* instr) {
Register receiver = ToRegister(instr->receiver());
Register name = ToRegister(instr->name());
DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
DCHECK(name.is(LoadDescriptor::NameRegister()));
DCHECK(receiver.is(r1));
DCHECK(name.is(r2));
Register scratch = r4;
Register extra = r5;
Register extra2 = r6;
Register extra3 = r9;
// The probe will tail call to a handler if found.
isolate()->stub_cache()->GenerateProbe(
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), false, receiver, name,
scratch, extra, extra2, extra3);
// Tail call to miss if we ended up here.
LoadIC::GenerateMiss(masm());
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(r0));
@ -4274,10 +4207,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
}
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic =
StoreIC::initialize_stub(isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state());
Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@ -4498,6 +4435,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
@ -4505,6 +4446,100 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
class DeferredMaybeGrowElements final : public LDeferredCode {
public:
DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
: LDeferredCode(codegen), instr_(instr) {}
void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
LInstruction* instr() override { return instr_; }
private:
LMaybeGrowElements* instr_;
};
Register result = r0;
DeferredMaybeGrowElements* deferred =
new (zone()) DeferredMaybeGrowElements(this, instr);
LOperand* key = instr->key();
LOperand* current_capacity = instr->current_capacity();
DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
DCHECK(key->IsConstantOperand() || key->IsRegister());
DCHECK(current_capacity->IsConstantOperand() ||
current_capacity->IsRegister());
if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
int32_t constant_capacity =
ToInteger32(LConstantOperand::cast(current_capacity));
if (constant_key >= constant_capacity) {
// Deferred case.
__ jmp(deferred->entry());
}
} else if (key->IsConstantOperand()) {
int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
__ cmp(ToRegister(current_capacity), Operand(constant_key));
__ b(le, deferred->entry());
} else if (current_capacity->IsConstantOperand()) {
int32_t constant_capacity =
ToInteger32(LConstantOperand::cast(current_capacity));
__ cmp(ToRegister(key), Operand(constant_capacity));
__ b(ge, deferred->entry());
} else {
__ cmp(ToRegister(key), ToRegister(current_capacity));
__ b(ge, deferred->entry());
}
if (instr->elements()->IsRegister()) {
__ Move(result, ToRegister(instr->elements()));
} else {
__ ldr(result, ToMemOperand(instr->elements()));
}
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register result = r0;
__ mov(result, Operand::Zero());
// We have to call a stub.
{
PushSafepointRegistersScope scope(this);
if (instr->object()->IsRegister()) {
__ Move(result, ToRegister(instr->object()));
} else {
__ ldr(result, ToMemOperand(instr->object()));
}
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
__ Move(r3, Operand(ToSmi(LConstantOperand::cast(key))));
} else {
__ Move(r3, ToRegister(key));
__ SmiTag(r3);
}
GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
__ StoreToSafepointRegisterSlot(result, result);
}
// Deopt on smi, which means the elements array changed to dictionary mode.
__ SmiTst(result);
DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
}
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
Register scratch = scratch0();
@ -5957,4 +5992,5 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8

View File

@ -27,7 +27,6 @@ class LCodeGen: public LCodeGenBase {
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
@ -112,6 +111,7 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
@ -241,7 +241,6 @@ class LCodeGen: public LCodeGenBase {
int* object_index_pointer,
int* dematerialized_index_pointer);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
@ -324,10 +323,11 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
template <class T>
void EmitVectorStoreICRegisters(T* instr);
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;

View File

@ -299,4 +299,5 @@ void LGapResolver::EmitMove(int index) {
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8

View File

@ -691,28 +691,28 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
void MacroAssembler::PushFixedFrame(Register marker_reg) {
DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
cp.bit() |
(FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
fp.bit() |
lr.bit());
stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
(FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
}
void MacroAssembler::PopFixedFrame(Register marker_reg) {
DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
cp.bit() |
(FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
fp.bit() |
lr.bit());
ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) | cp.bit() |
(FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
}
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of contiguous register values starting with r0:
DCHECK(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
// Safepoints expect a block of contiguous register values starting with r0.
// except when FLAG_enable_embedded_constant_pool, which omits pp.
DCHECK(kSafepointSavedRegisters ==
(FLAG_enable_embedded_constant_pool
? ((1 << (kNumSafepointSavedRegisters + 1)) - 1) & ~pp.bit()
: (1 << kNumSafepointSavedRegisters) - 1));
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
@ -742,6 +742,10 @@ void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
if (FLAG_enable_embedded_constant_pool && reg_code > pp.code()) {
// RegList omits pp.
reg_code -= 1;
}
DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
return reg_code;
}
@ -985,13 +989,20 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
DCHECK(FLAG_enable_embedded_constant_pool);
ldr(pp, MemOperand(code_target_address,
Code::kConstantPoolOffset - Code::kHeaderSize));
add(pp, pp, code_target_address);
}
void MacroAssembler::LoadConstantPoolPointerRegister() {
if (FLAG_enable_ool_constant_pool) {
int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
pc_offset() - Instruction::kPCReadOffset;
DCHECK(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
ldr(pp, MemOperand(pc, constant_pool_offset));
}
DCHECK(FLAG_enable_embedded_constant_pool);
int entry_offset = pc_offset() + Instruction::kPCReadOffset;
sub(ip, pc, Operand(entry_offset));
LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
}
@ -1000,9 +1011,9 @@ void MacroAssembler::StubPrologue() {
Push(Smi::FromInt(StackFrame::STUB));
// Adjust FP to point to saved FP.
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
LoadConstantPoolPointerRegister();
set_ool_constant_pool_available(true);
set_constant_pool_available(true);
}
}
@ -1025,9 +1036,9 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
}
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
LoadConstantPoolPointerRegister();
set_ool_constant_pool_available(true);
set_constant_pool_available(true);
}
}
@ -1036,7 +1047,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
PushFixedFrame();
if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) {
if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
LoadConstantPoolPointerRegister();
}
mov(ip, Operand(Smi::FromInt(type)));
@ -1056,9 +1067,9 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type) {
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer, return address and constant pool pointer
// (if FLAG_enable_ool_constant_pool).
// (if FLAG_enable_embedded_constant_pool).
int frame_ends;
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
frame_ends = pc_offset();
ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
@ -1084,7 +1095,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
mov(ip, Operand::Zero());
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
}
mov(ip, Operand(CodeObject()));
@ -1103,7 +1114,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// fp - ExitFrameConstants::kFrameSize -
// DwVfpRegister::kMaxNumRegisters * kDoubleSize,
// since the sp slot, code slot and constant pool slot (if
// FLAG_enable_ool_constant_pool) were pushed after the fp.
// FLAG_enable_embedded_constant_pool) were pushed after the fp.
}
// Reserve place for the return address and stack space and align the frame
@ -1183,7 +1194,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
#endif
// Tear down the exit frame, pop the arguments, and return.
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
}
mov(sp, Operand(fp));
@ -1559,6 +1570,7 @@ void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
add(t0, t0, scratch);
// hash = hash ^ (hash >> 16);
eor(t0, t0, Operand(t0, LSR, 16));
bic(t0, t0, Operand(0xc0000000u));
}
@ -3162,7 +3174,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
bind(&entry);
cmp(start_offset, end_offset);
b(lt, &loop);
b(lo, &loop);
}
@ -3390,7 +3402,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
if (ActivationFrameAlignment() > kPointerSize) {
ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
add(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
}
@ -3401,7 +3413,7 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
Label small_constant_pool_load, load_result;
ldr(result, MemOperand(ldr_location));
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
// Check if this is an extended constant pool load.
and_(scratch, result, Operand(GetConsantPoolLoadMask()));
teq(scratch, Operand(GetConsantPoolLoadPattern()));
@ -3455,7 +3467,7 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
bind(&load_result);
// Get the address of the constant.
if (FLAG_enable_ool_constant_pool) {
if (FLAG_enable_embedded_constant_pool) {
add(result, pp, Operand(result));
} else {
add(result, ldr_location, Operand(result));

View File

@ -437,7 +437,7 @@ class MacroAssembler: public Assembler {
}
// Push a fixed frame, consisting of lr, fp, constant pool (if
// FLAG_enable_ool_constant_pool), context and JS function / marker id if
// FLAG_enable_embedded_constant_pool), context and JS function / marker id if
// marker_reg is a valid register.
void PushFixedFrame(Register marker_reg = no_reg);
void PopFixedFrame(Register marker_reg = no_reg);
@ -1441,6 +1441,11 @@ class MacroAssembler: public Assembler {
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found);
// Loads the constant pool pointer (pp) register.
void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address);
void LoadConstantPoolPointerRegister();
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
@ -1482,9 +1487,6 @@ class MacroAssembler: public Assembler {
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
// Loads the constant pool pointer (pp) register.
void LoadConstantPoolPointerRegister();
bool generating_stub_;
bool has_frame_;
// This handle will be patched with the code object on installation.

View File

@ -1193,6 +1193,7 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

View File

@ -774,8 +774,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
}
Simulator::~Simulator() {
}
Simulator::~Simulator() { free(stack_); }
// When the generated code calls an external reference we need to catch that in
@ -824,7 +823,7 @@ class Redirection {
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
char* addr_of_redirection =
addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
addr_of_swi - offsetof(Redirection, swi_instruction_);
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
@ -834,6 +833,14 @@ class Redirection {
return redirection->external_function();
}
static void DeleteChain(Redirection* redirection) {
while (redirection != nullptr) {
Redirection* next = redirection->next_;
delete redirection;
redirection = next;
}
}
private:
void* external_function_;
uint32_t swi_instruction_;
@ -842,6 +849,19 @@ class Redirection {
};
// static
void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
Redirection::DeleteChain(first);
if (i_cache != nullptr) {
for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
entry = i_cache->Next(entry)) {
delete static_cast<CachePage*>(entry->value);
}
delete i_cache;
}
}
void* Simulator::RedirectExternalReference(void* external_function,
ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(external_function, type);
@ -4131,7 +4151,8 @@ uintptr_t Simulator::PopAddress() {
return address;
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // USE_SIMULATOR

View File

@ -194,6 +194,8 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
static void TearDown(HashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.

View File

@ -586,14 +586,13 @@ Address Assembler::target_pointer_address_at(Address pc) {
// Read/Modify the code target address in the branch/call instruction at pc.
Address Assembler::target_address_at(Address pc,
ConstantPoolArray* constant_pool) {
Address Assembler::target_address_at(Address pc, Address constant_pool) {
return Memory::Address_at(target_pointer_address_at(pc));
}
Address Assembler::target_address_at(Address pc, Code* code) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
@ -665,8 +664,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
}
void Assembler::set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
Memory::Address_at(target_pointer_address_at(pc)) = target;
@ -685,7 +683,7 @@ void Assembler::set_target_address_at(Address pc,
Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
}
@ -867,8 +865,8 @@ bool RelocInfo::IsPatchedReturnSequence() {
// See arm64/debug-arm64.cc BreakLocation::SetDebugBreakAtReturn().
Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
Instruction* i2 = i1->following();
return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
return i1->IsLdrLiteralX() && (i1->Rt() == kIp0Code) &&
i2->IsBranchAndLinkToRegister() && (i2->Rn() == kIp0Code);
}
@ -1084,13 +1082,14 @@ Instr Assembler::SF(Register rd) {
}
Instr Assembler::ImmAddSub(int64_t imm) {
Instr Assembler::ImmAddSub(int imm) {
DCHECK(IsImmAddSub(imm));
if (is_uint12(imm)) { // No shift required.
return imm << ImmAddSub_offset;
imm <<= ImmAddSub_offset;
} else {
return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
}
return imm;
}
@ -1239,13 +1238,13 @@ LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
}
Instr Assembler::ImmMoveWide(uint64_t imm) {
Instr Assembler::ImmMoveWide(int imm) {
DCHECK(is_uint16(imm));
return imm << ImmMoveWide_offset;
}
Instr Assembler::ShiftMoveWide(int64_t shift) {
Instr Assembler::ShiftMoveWide(int shift) {
DCHECK(is_uint2(shift));
return shift << ShiftMoveWide_offset;
}

View File

@ -580,8 +580,9 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer = reinterpret_cast<byte*>(buffer_);
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
reloc_info_writer.pos();
desc->reloc_size =
static_cast<int>((reinterpret_cast<byte*>(buffer_) + buffer_size_) -
reloc_info_writer.pos());
desc->origin = this;
}
}
@ -600,13 +601,13 @@ void Assembler::CheckLabelLinkChain(Label const * label) {
if (label->is_linked()) {
static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour.
int links_checked = 0;
int linkoffset = label->pos();
int64_t linkoffset = label->pos();
bool end_of_chain = false;
while (!end_of_chain) {
if (++links_checked > kMaxLinksToCheck) break;
Instruction * link = InstructionAt(linkoffset);
int linkpcoffset = link->ImmPCOffset();
int prevlinkoffset = linkoffset + linkpcoffset;
int64_t linkpcoffset = link->ImmPCOffset();
int64_t prevlinkoffset = linkoffset + linkpcoffset;
end_of_chain = (linkoffset == prevlinkoffset);
linkoffset = linkoffset + linkpcoffset;
@ -645,7 +646,8 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
// currently referring to this label.
label->Unuse();
} else {
label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
label->link_to(
static_cast<int>(reinterpret_cast<byte*>(next_link) - buffer_));
}
} else if (branch == next_link) {
@ -721,7 +723,7 @@ void Assembler::bind(Label* label) {
while (label->is_linked()) {
int linkoffset = label->pos();
Instruction* link = InstructionAt(linkoffset);
int prevlinkoffset = linkoffset + link->ImmPCOffset();
int prevlinkoffset = linkoffset + static_cast<int>(link->ImmPCOffset());
CheckLabelLinkChain(label);
@ -811,12 +813,13 @@ void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
while (!end_of_chain) {
Instruction * link = InstructionAt(link_offset);
link_pcoffset = link->ImmPCOffset();
link_pcoffset = static_cast<int>(link->ImmPCOffset());
// ADR instructions are not handled by veneers.
if (link->IsImmBranch()) {
int max_reachable_pc = InstructionOffset(link) +
Instruction::ImmBranchRange(link->BranchType());
int max_reachable_pc =
static_cast<int>(InstructionOffset(link) +
Instruction::ImmBranchRange(link->BranchType()));
typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
std::pair<unresolved_info_it, unresolved_info_it> range;
range = unresolved_branches_.equal_range(max_reachable_pc);
@ -888,12 +891,12 @@ bool Assembler::IsConstantPoolAt(Instruction* instr) {
// The constant pool marker is made of two instructions. These instructions
// will never be emitted by the JIT, so checking for the first one is enough:
// 0: ldr xzr, #<size of pool>
bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
bool result = instr->IsLdrLiteralX() && (instr->Rt() == kZeroRegCode);
// It is still worth asserting the marker is complete.
// 4: blr xzr
DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() &&
instr->following()->Rn() == xzr.code()));
instr->following()->Rn() == kZeroRegCode));
return result;
}
@ -909,7 +912,7 @@ int Assembler::ConstantPoolSizeAt(Instruction* instr) {
const char* message =
reinterpret_cast<const char*>(
instr->InstructionAtOffset(kDebugMessageOffset));
int size = kDebugMessageOffset + strlen(message) + 1;
int size = static_cast<int>(kDebugMessageOffset + strlen(message) + 1);
return RoundUp(size, kInstructionSize) / kInstructionSize;
}
// Same for printf support, see MacroAssembler::CallPrintf().
@ -1599,9 +1602,11 @@ void Assembler::LoadStorePair(const CPURegister& rt,
// 'rt' and 'rt2' can only be aliased for stores.
DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
DCHECK(AreSameSizeAndType(rt, rt2));
DCHECK(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op)));
int offset = static_cast<int>(addr.offset());
Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
ImmLSPair(offset, CalcLSPairDataSize(op));
Instr addrmodeop;
if (addr.IsImmediateOffset()) {
@ -1645,11 +1650,11 @@ void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
DCHECK(!rt.Is(rt2));
DCHECK(AreSameSizeAndType(rt, rt2));
DCHECK(addr.IsImmediateOffset());
LSDataSize size = CalcLSPairDataSize(
static_cast<LoadStorePairOp>(op & LoadStorePairMask));
Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
ImmLSPair(addr.offset(), size));
DCHECK(IsImmLSPair(addr.offset(), size));
int offset = static_cast<int>(addr.offset());
Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size));
}
@ -2137,13 +2142,13 @@ Instr Assembler::ImmFP64(double imm) {
// 0000.0000.0000.0000.0000.0000.0000.0000
uint64_t bits = double_to_rawbits(imm);
// bit7: a000.0000
uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
// bit6: 0b00.0000
uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
// bit5_to_0: 00cd.efgh
uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
return static_cast<Instr>((bit7 | bit6 | bit5_to_0) << ImmFP_offset);
}
@ -2188,8 +2193,8 @@ void Assembler::MoveWide(const Register& rd,
DCHECK(is_uint16(imm));
Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) |
ImmMoveWide(static_cast<int>(imm)) | ShiftMoveWide(shift));
}
@ -2205,7 +2210,7 @@ void Assembler::AddSub(const Register& rd,
DCHECK(IsImmAddSub(immediate));
Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
ImmAddSub(immediate) | dest_reg | RnSP(rn));
ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
} else if (operand.IsShiftedRegister()) {
DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
DCHECK(operand.shift() != ROR);
@ -2259,7 +2264,7 @@ void Assembler::brk(int code) {
void Assembler::EmitStringData(const char* string) {
size_t len = strlen(string) + 1;
DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
EmitData(string, len);
EmitData(string, static_cast<int>(len));
// Pad with NULL characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
STATIC_ASSERT(sizeof(pad) == kInstructionSize);
@ -2362,7 +2367,8 @@ void Assembler::ConditionalCompare(const Register& rn,
if (operand.IsImmediate()) {
int64_t immediate = operand.ImmediateValue();
DCHECK(IsImmConditionalCompare(immediate));
ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
ccmpop = ConditionalCompareImmediateFixed | op |
ImmCondCmp(static_cast<unsigned>(immediate));
} else {
DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
@ -2502,15 +2508,16 @@ void Assembler::LoadStore(const CPURegister& rt,
const MemOperand& addr,
LoadStoreOp op) {
Instr memop = op | Rt(rt) | RnSP(addr.base());
int64_t offset = addr.offset();
if (addr.IsImmediateOffset()) {
LSDataSize size = CalcLSDataSize(op);
if (IsImmLSScaled(offset, size)) {
if (IsImmLSScaled(addr.offset(), size)) {
int offset = static_cast<int>(addr.offset());
// Use the scaled addressing mode.
Emit(LoadStoreUnsignedOffsetFixed | memop |
ImmLSUnsigned(offset >> size));
} else if (IsImmLSUnscaled(offset)) {
} else if (IsImmLSUnscaled(addr.offset())) {
int offset = static_cast<int>(addr.offset());
// Use the unscaled addressing mode.
Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
} else {
@ -2536,7 +2543,8 @@ void Assembler::LoadStore(const CPURegister& rt,
} else {
// Pre-index and post-index modes.
DCHECK(!rt.Is(addr.base()));
if (IsImmLSUnscaled(offset)) {
if (IsImmLSUnscaled(addr.offset())) {
int offset = static_cast<int>(addr.offset());
if (addr.IsPreIndex()) {
Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
} else {
@ -2568,6 +2576,14 @@ bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) {
}
bool Assembler::IsImmLLiteral(int64_t offset) {
int inst_size = static_cast<int>(kInstructionSizeLog2);
bool offset_is_inst_multiple =
(((offset >> inst_size) << inst_size) == offset);
return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
}
// Test if a given value can be encoded in the immediate field of a logical
// instruction.
// If it can be encoded, the function returns true, and values pointed to by n,
@ -2849,7 +2865,8 @@ void Assembler::GrowBuffer() {
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
desc.reloc_size =
static_cast<int>((buffer + buffer_size_) - reloc_info_writer.pos());
// Copy the data.
intptr_t pc_delta = desc.buffer - buffer;
@ -3065,7 +3082,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
}
// Record the veneer pool size.
int pool_size = SizeOfCodeGeneratedSince(&size_check);
int pool_size = static_cast<int>(SizeOfCodeGeneratedSince(&size_check));
RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
if (unresolved_branches_.empty()) {
@ -3113,7 +3130,8 @@ void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
int Assembler::buffer_space() const {
return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
return static_cast<int>(reloc_info_writer.pos() -
reinterpret_cast<byte*>(pc_));
}
@ -3124,20 +3142,6 @@ void Assembler::RecordConstPool(int size) {
}
Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
// No out-of-line constant pool support.
DCHECK(!FLAG_enable_ool_constant_pool);
return isolate->factory()->empty_constant_pool_array();
}
void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
// No out-of-line constant pool support.
DCHECK(!FLAG_enable_ool_constant_pool);
return;
}
void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
// The code at the current instruction should be:
// adr rd, 0
@ -3171,6 +3175,7 @@ void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -764,7 +764,7 @@ class ConstPool {
shared_entries_count(0) {}
void RecordEntry(intptr_t data, RelocInfo::Mode mode);
int EntryCount() const {
return shared_entries_count + unique_entries_.size();
return shared_entries_count + static_cast<int>(unique_entries_.size());
}
bool IsEmpty() const {
return shared_entries_.empty() && unique_entries_.empty();
@ -851,6 +851,9 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
// Insert the smallest number of zero bytes possible to align the pc offset
// to a mulitple of m. m must be a power of 2 (>= 2).
void DataAlign(int m);
inline void Unreachable();
@ -871,13 +874,10 @@ class Assembler : public AssemblerBase {
inline static Address target_pointer_address_at(Address pc);
// Read/Modify the code target address in the branch/call instruction at pc.
inline static Address target_address_at(Address pc,
ConstantPoolArray* constant_pool);
inline static void set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
Address target,
ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED);
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
static inline Address target_address_at(Address pc, Code* code);
static inline void set_target_address_at(Address pc,
Code* code,
@ -951,7 +951,7 @@ class Assembler : public AssemblerBase {
// Return the number of instructions generated from label to the
// current position.
int InstructionsGeneratedSince(const Label* label) {
uint64_t InstructionsGeneratedSince(const Label* label) {
return SizeOfCodeGeneratedSince(label) / kInstructionSize;
}
@ -1767,6 +1767,8 @@ class Assembler : public AssemblerBase {
// Required by V8.
void dd(uint32_t data) { dc32(data); }
void db(uint8_t data) { dc8(data); }
void dq(uint64_t data) { dc64(data); }
void dp(uintptr_t data) { dc64(data); }
// Code generation helpers --------------------------------------------------
@ -1774,7 +1776,7 @@ class Assembler : public AssemblerBase {
Instruction* pc() const { return Instruction::Cast(pc_); }
Instruction* InstructionAt(int offset) const {
Instruction* InstructionAt(ptrdiff_t offset) const {
return reinterpret_cast<Instruction*>(buffer_ + offset);
}
@ -1841,7 +1843,7 @@ class Assembler : public AssemblerBase {
// Data Processing encoding.
inline static Instr SF(Register rd);
inline static Instr ImmAddSub(int64_t imm);
inline static Instr ImmAddSub(int imm);
inline static Instr ImmS(unsigned imms, unsigned reg_size);
inline static Instr ImmR(unsigned immr, unsigned reg_size);
inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
@ -1876,10 +1878,11 @@ class Assembler : public AssemblerBase {
static bool IsImmLSUnscaled(int64_t offset);
static bool IsImmLSScaled(int64_t offset, LSDataSize size);
static bool IsImmLLiteral(int64_t offset);
// Move immediates encoding.
inline static Instr ImmMoveWide(uint64_t imm);
inline static Instr ShiftMoveWide(int64_t shift);
inline static Instr ImmMoveWide(int imm);
inline static Instr ShiftMoveWide(int shift);
// FP Immediates.
static Instr ImmFP32(float imm);
@ -1908,11 +1911,12 @@ class Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
// Allocate a constant pool of the correct size for the generated code.
Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
// Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool);
void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
// No embedded constant pool support.
UNREACHABLE();
}
// Returns true if we should emit a veneer as soon as possible for a branch
// which can at most reach to specified pc.

View File

@ -331,6 +331,7 @@ static void Generate_Runtime_NewObject(MacroAssembler* masm,
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool use_new_target,
bool create_memento) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@ -360,11 +361,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Register argc = x0;
Register constructor = x1;
Register original_constructor = x3;
// x1: constructor function
// Preserve the incoming parameters on the stack.
__ SmiTag(argc);
__ Push(argc, constructor);
// sp[0] : Constructor function.
// sp[1]: number of arguments (smi-tagged)
if (use_new_target) {
__ Push(argc, constructor, original_constructor);
} else {
__ Push(argc, constructor);
}
// sp[0]: new.target (if used)
// sp[0/1]: Constructor function.
// sp[1/2]: number of arguments (smi-tagged)
Label rt_call, count_incremented, allocated, normal_new;
__ Cmp(constructor, original_constructor);
@ -522,7 +529,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Add(new_obj, new_obj, kHeapObjectTag);
// Check if a non-empty properties array is needed. Continue with
// allocated object if not, or fall through to runtime call if it is.
// allocated object if not; allocate and initialize a FixedArray if yes.
Register element_count = x3;
__ Ldrb(element_count,
FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset));
@ -580,7 +587,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Bind(&allocated);
if (create_memento) {
__ Peek(x10, 2 * kXRegSize);
int offset = (use_new_target ? 3 : 2) * kXRegSize;
__ Peek(x10, offset);
__ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &count_incremented);
// r2 is an AllocationSite. We are creating a memento from it, so we
// need to increment the memento create count.
@ -592,18 +600,24 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&count_incremented);
}
__ Push(x4, x4);
// Restore the parameters.
if (use_new_target) {
__ Pop(original_constructor);
}
__ Pop(constructor);
// Reload the number of arguments from the stack.
// Set it up in x0 for the function call below.
// jssp[0]: receiver
// jssp[1]: receiver
// jssp[2]: constructor function
// jssp[3]: number of arguments (smi-tagged)
__ Peek(constructor, 2 * kXRegSize); // Load constructor.
__ Peek(argc, 3 * kXRegSize); // Load number of arguments.
// jssp[0]: number of arguments (smi-tagged)
__ Peek(argc, 0); // Load number of arguments.
__ SmiUntag(argc);
if (use_new_target) {
__ Push(original_constructor, x4, x4);
} else {
__ Push(x4, x4);
}
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@ -614,8 +628,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// x2: address of last argument (caller sp)
// jssp[0]: receiver
// jssp[1]: receiver
// jssp[2]: constructor function
// jssp[3]: number of arguments (smi-tagged)
// jssp[2]: new.target (if used)
// jssp[2/3]: number of arguments (smi-tagged)
// Compute the start address of the copy in x3.
__ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
Label loop, entry, done_copying_arguments;
@ -646,15 +660,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
}
// Store offset of return address for deoptimizer.
if (!is_api_function) {
// TODO(arv): Remove the "!use_new_target" before supporting optimization
// of functions that reference new.target
if (!is_api_function && !use_new_target) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore the context from the frame.
// x0: result
// jssp[0]: receiver
// jssp[1]: constructor function
// jssp[2]: number of arguments (smi-tagged)
// jssp[1]: new.target (if used)
// jssp[1/2]: number of arguments (smi-tagged)
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// If the result is an object (in the ECMA sense), we should get rid
@ -665,8 +681,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the result is a smi, it is *not* an object in the ECMA sense.
// x0: result
// jssp[0]: receiver (newly allocated object)
// jssp[1]: constructor function
// jssp[2]: number of arguments (smi-tagged)
// jssp[1]: number of arguments (smi-tagged)
__ JumpIfSmi(x0, &use_receiver);
// If the type of the result (stored in its map) is less than
@ -683,9 +698,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Bind(&exit);
// x0: result
// jssp[0]: receiver (newly allocated object)
// jssp[1]: constructor function
// jssp[2]: number of arguments (smi-tagged)
__ Peek(x1, 2 * kXRegSize);
// jssp[1]: new.target (if used)
// jssp[1/2]: number of arguments (smi-tagged)
int offset = (use_new_target ? 2 : 1) * kXRegSize;
__ Peek(x1, offset);
// Leave construct frame.
}
@ -698,12 +714,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true, false);
Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSConstructStubNewTarget(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, FLAG_pretenuring_call_new);
}
@ -731,7 +752,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// sp[1]: new.target
// sp[2]: receiver (the hole)
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@ -759,8 +779,6 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ Drop(1);
__ Bind(&done_copying_arguments);
__ Add(x0, x0, Operand(1)); // new.target
// Handle step in.
Label skip_step_in;
ExternalReference debug_step_in_fp =
@ -787,8 +805,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// jssp[0]: number of arguments (smi-tagged)
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Load number of arguments (smi).
__ Peek(x1, 0);
// Load number of arguments (smi), skipping over new.target.
__ Peek(x1, kPointerSize);
// Leave construct frame
}
@ -1388,6 +1406,8 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
Label entry, loop;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
Register slot = LoadDescriptor::SlotRegister();
Register vector = LoadWithVectorDescriptor::VectorRegister();
__ Ldr(key, MemOperand(fp, indexOffset));
__ B(&entry);
@ -1397,7 +1417,14 @@ static void Generate_PushAppliedArguments(MacroAssembler* masm,
__ Ldr(receiver, MemOperand(fp, argumentsOffset));
// Use inline caching to speed up access to arguments.
Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Megamorphic();
FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
Handle<TypeFeedbackVector> feedback_vector =
masm->isolate()->factory()->NewTypeFeedbackVector(&spec);
int index = feedback_vector->GetIndex(FeedbackVectorICSlot(0));
__ Mov(slot, Smi::FromInt(index));
__ Mov(vector, feedback_vector);
Handle<Code> ic =
KeyedLoadICStub(masm->isolate(), LoadICState(kNoExtraICState)).GetCode();
__ Call(ic, RelocInfo::CODE_TARGET);
// Push the nth argument.
@ -1733,13 +1760,38 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{ // Too few parameters: Actual < expected
__ Bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
Register copy_from = x10;
Register copy_end = x11;
Register copy_to = x12;
Register scratch1 = x13, scratch2 = x14;
// If the function is strong we need to throw an error.
Label no_strong_error;
__ Ldr(scratch1,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(scratch2.W(),
FieldMemOperand(scratch1, SharedFunctionInfo::kCompilerHintsOffset));
__ TestAndBranchIfAllClear(scratch2.W(),
(1 << SharedFunctionInfo::kStrongModeFunction),
&no_strong_error);
// What we really care about is the required number of arguments.
DCHECK_EQ(kPointerSize, kInt64Size);
__ Ldr(scratch2.W(),
FieldMemOperand(scratch1, SharedFunctionInfo::kLengthOffset));
__ Cmp(argc_actual, Operand(scratch2, LSR, 1));
__ B(ge, &no_strong_error);
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
__ CallRuntime(Runtime::kThrowStrongModeTooFewArguments, 0);
}
__ Bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
__ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
__ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
@ -1810,6 +1862,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM

View File

@ -102,17 +102,17 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
int param_count = descriptor.GetEnvironmentParameterCount();
int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK((param_count == 0) ||
x0.Is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
x0.Is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments
MacroAssembler::PushPopQueue queue(masm);
for (int i = 0; i < param_count; ++i) {
queue.Queue(descriptor.GetEnvironmentParameterRegister(i));
queue.Queue(descriptor.GetRegisterParameter(i));
}
queue.PushQueued();
@ -203,13 +203,11 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// See call site for description.
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Register left,
Register right,
Register scratch,
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
Register right, Register scratch,
FPRegister double_scratch,
Label* slow,
Condition cond) {
Label* slow, Condition cond,
Strength strength) {
DCHECK(!AreAliased(left, right, scratch));
Label not_identical, return_equal, heap_number;
Register result = x0;
@ -223,10 +221,20 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// Smis. If it's not a heap number, then return equal.
Register right_type = scratch;
if ((cond == lt) || (cond == gt)) {
// Call runtime on identical JSObjects. Otherwise return equal.
__ JumpIfObjectType(right, right_type, right_type, FIRST_SPEC_OBJECT_TYPE,
slow, ge);
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics, since
// we need to throw a TypeError. Smis have already been ruled out.
__ Cmp(right_type, Operand(HEAP_NUMBER_TYPE));
__ B(eq, &return_equal);
__ Tst(right_type, Operand(kIsNotStringMask));
__ B(ne, slow);
}
} else if (cond == eq) {
__ JumpIfHeapNumber(right, &heap_number);
} else {
@ -235,8 +243,16 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// Comparing JS objects with <=, >= is complicated.
__ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
__ B(ge, slow);
// Call runtime on identical symbols since we need to throw a TypeError.
__ Cmp(right_type, SYMBOL_TYPE);
__ B(eq, slow);
if (is_strong(strength)) {
// Call the runtime on anything that is converted in the semantics,
// since we need to throw a TypeError. Smis and heap numbers have
// already been ruled out.
__ Tst(right_type, Operand(kIsNotStringMask));
__ B(ne, slow);
}
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@ -513,7 +529,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond,
strength());
// If either is a smi (we know that at least one is not a smi), then they can
// only be strictly equal if the other is a HeapNumber.
@ -632,7 +649,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cond == eq) {
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
native =
is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
int ncr; // NaN compare result
if ((cond == lt) || (cond == le)) {
ncr = GREATER;
@ -1433,9 +1451,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler.
DCHECK(!FLAG_vector_ics ||
!AreAliased(x10, x11, VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister()));
DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::VectorRegister(),
LoadWithVectorDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
x11, &miss);
@ -1455,9 +1472,8 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register result = x0;
Register scratch = x10;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
DCHECK(!FLAG_vector_ics ||
(!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
result.is(VectorLoadICDescriptor::SlotRegister())));
DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
result.is(LoadWithVectorDescriptor::SlotRegister()));
// StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict
@ -1669,7 +1685,6 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
CHECK(!has_new_target());
Register arg_count = ArgumentsAccessReadDescriptor::parameter_count();
Register key = ArgumentsAccessReadDescriptor::index();
DCHECK(arg_count.is(x0));
@ -1726,8 +1741,6 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// jssp[8]: address of receiver argument
// jssp[16]: function
CHECK(!has_new_target());
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
Register caller_fp = x10;
@ -1759,8 +1772,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
//
// Returns pointer to result object in x0.
CHECK(!has_new_target());
// Note: arg_count_smi is an alias of param_count_smi.
Register arg_count_smi = x3;
Register param_count_smi = x3;
@ -2087,15 +2098,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
MemOperand(caller_fp,
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(param_count, param_count_smi);
if (has_new_target()) {
__ Cmp(param_count, Operand(0));
Label skip_decrement;
__ B(eq, &skip_decrement);
// Skip new.target: it is not a part of arguments.
__ Sub(param_count, param_count, Operand(1));
__ SmiTag(param_count_smi, param_count);
__ Bind(&skip_decrement);
}
__ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
__ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
@ -2192,19 +2194,21 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
// Stack layout on entry.
// jssp[0]: index of rest parameter (tagged)
// jssp[8]: number of parameters (tagged)
// jssp[16]: address of receiver argument
// jssp[0]: language mode (tagged)
// jssp[8]: index of rest parameter (tagged)
// jssp[16]: number of parameters (tagged)
// jssp[24]: address of receiver argument
//
// Returns pointer to result object in x0.
// Get the stub arguments from the frame, and make an untagged copy of the
// parameter count.
Register rest_index_smi = x1;
Register param_count_smi = x2;
Register params = x3;
Register language_mode_smi = x1;
Register rest_index_smi = x2;
Register param_count_smi = x3;
Register params = x4;
Register param_count = x13;
__ Pop(rest_index_smi, param_count_smi, params);
__ Pop(language_mode_smi, rest_index_smi, param_count_smi, params);
__ SmiUntag(param_count, param_count_smi);
// Test if arguments adaptor needed.
@ -2217,11 +2221,12 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
__ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(ne, &runtime);
// x1 rest_index_smi index of rest parameter
// x2 param_count_smi number of parameters passed to function (smi)
// x3 params pointer to parameters
// x11 caller_fp caller's frame pointer
// x13 param_count number of parameters passed to function
// x1 language_mode_smi language mode
// x2 rest_index_smi index of rest parameter
// x3 param_count_smi number of parameters passed to function (smi)
// x4 params pointer to parameters
// x11 caller_fp caller's frame pointer
// x13 param_count number of parameters passed to function
// Patch the argument length and parameters pointer.
__ Ldr(param_count_smi,
@ -2232,8 +2237,8 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
__ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
__ Bind(&runtime);
__ Push(params, param_count_smi, rest_index_smi);
__ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
__ Push(params, param_count_smi, rest_index_smi, language_mode_smi);
__ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
}
@ -2286,27 +2291,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Register last_match_info_elements = x21;
Register code_object = x22;
// TODO(jbramley): Is it necessary to preserve these? I don't think ARM does.
CPURegList used_callee_saved_registers(subject,
regexp_data,
last_match_info_elements,
code_object);
__ PushCPURegList(used_callee_saved_registers);
// Stack frame.
// jssp[0] : x19
// jssp[8] : x20
// jssp[16]: x21
// jssp[24]: x22
// jssp[32]: last_match_info (JSArray)
// jssp[40]: previous index
// jssp[48]: subject string
// jssp[56]: JSRegExp object
// jssp[00]: last_match_info (JSArray)
// jssp[08]: previous index
// jssp[16]: subject string
// jssp[24]: JSRegExp object
const int kLastMatchInfoOffset = 4 * kPointerSize;
const int kPreviousIndexOffset = 5 * kPointerSize;
const int kSubjectOffset = 6 * kPointerSize;
const int kJSRegExpOffset = 7 * kPointerSize;
const int kLastMatchInfoOffset = 0 * kPointerSize;
const int kPreviousIndexOffset = 1 * kPointerSize;
const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
@ -2673,7 +2667,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Return last match info.
__ Peek(x0, kLastMatchInfoOffset);
__ PopCPURegList(used_callee_saved_registers);
// Drop the 4 arguments of the stub from the stack.
__ Drop(4);
__ Ret();
@ -2696,13 +2689,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Bind(&failure);
__ Mov(x0, Operand(isolate()->factory()->null_value()));
__ PopCPURegList(used_callee_saved_registers);
// Drop the 4 arguments of the stub from the stack.
__ Drop(4);
__ Ret();
__ Bind(&runtime);
__ PopCPURegList(used_callee_saved_registers);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
// Deferred code for string handling.
@ -3100,10 +3091,18 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ Ldr(map, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ JumpIfNotRoot(map, Heap::kAllocationSiteMapRootIndex, &miss);
Register allocation_site = feedback_vector;
__ Mov(allocation_site, scratch);
// Increment the call count for monomorphic function calls.
__ Add(feedback_vector, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Add(feedback_vector, feedback_vector,
Operand(FixedArray::kHeaderSize + kPointerSize));
__ Ldr(index, FieldMemOperand(feedback_vector, 0));
__ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ Str(index, FieldMemOperand(feedback_vector, 0));
Register original_constructor = x3;
Register allocation_site = feedback_vector;
Register original_constructor = index;
__ Mov(allocation_site, scratch);
__ Mov(original_constructor, function);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
@ -3169,6 +3168,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(function, &extra_checks_or_miss);
// Increment the call count for monomorphic function calls.
__ Add(feedback_vector, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Add(feedback_vector, feedback_vector,
Operand(FixedArray::kHeaderSize + kPointerSize));
__ Ldr(index, FieldMemOperand(feedback_vector, 0));
__ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ Str(index, FieldMemOperand(feedback_vector, 0));
__ bind(&have_js_function);
if (CallAsMethod()) {
EmitContinueIfStrictOrNative(masm, &cont);
@ -3244,6 +3252,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Adds(x4, x4, Operand(Smi::FromInt(1)));
__ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
// Initialize the call counter.
__ Mov(x5, Smi::FromInt(CallICNexus::kCallCountIncrement));
__ Adds(x4, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
__ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize + kPointerSize));
// Store the function. Use a stub since we need a frame for allocation.
// x2 - vector
// x3 - slot
@ -3338,9 +3352,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// If index is a heap number, try converting it to an integer.
__ JumpIfNotHeapNumber(index_, index_not_number_);
call_helper.BeforeCall(masm);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
__ Push(VectorLoadICDescriptor::VectorRegister(),
VectorLoadICDescriptor::SlotRegister(), object_, index_);
if (embed_mode == PART_OF_IC_HANDLER) {
__ Push(LoadWithVectorDescriptor::VectorRegister(),
LoadWithVectorDescriptor::SlotRegister(), object_, index_);
} else {
// Save object_ on the stack and pass index_ as argument for runtime call.
__ Push(object_, index_);
@ -3355,9 +3369,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Mov(index_, x0);
if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
__ Pop(object_, VectorLoadICDescriptor::SlotRegister(),
VectorLoadICDescriptor::VectorRegister());
if (embed_mode == PART_OF_IC_HANDLER) {
__ Pop(object_, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister());
} else {
__ Pop(object_);
}
@ -3485,7 +3499,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ Ret();
__ Bind(&unordered);
CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@ -4481,15 +4495,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
VectorRawLoadStub stub(isolate(), state());
EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
VectorRawKeyedLoadStub stub(isolate());
EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@ -4508,12 +4522,10 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
void VectorRawLoadStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
@ -4610,11 +4622,11 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
}
void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // x1
Register name = VectorLoadICDescriptor::NameRegister(); // x2
Register vector = VectorLoadICDescriptor::VectorRegister(); // x3
Register slot = VectorLoadICDescriptor::SlotRegister(); // x0
void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
Register name = LoadWithVectorDescriptor::NameRegister(); // x2
Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
Register feedback = x4;
Register receiver_map = x5;
Register scratch1 = x6;
@ -4654,21 +4666,21 @@ void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
void KeyedLoadICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // x1
Register key = VectorLoadICDescriptor::NameRegister(); // x2
Register vector = VectorLoadICDescriptor::VectorRegister(); // x3
Register slot = VectorLoadICDescriptor::SlotRegister(); // x0
void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
Register key = LoadWithVectorDescriptor::NameRegister(); // x2
Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
Register feedback = x4;
Register receiver_map = x5;
Register scratch1 = x6;
@ -4700,7 +4712,7 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
&try_poly_name);
Handle<Code> megamorphic_stub =
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ Bind(&try_poly_name);
@ -4724,6 +4736,58 @@ void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
}
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label miss;
// TODO(mvstanton): Implement.
__ Bind(&miss);
StoreIC::GenerateMiss(masm);
}
void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label miss;
// TODO(mvstanton): Implement.
__ Bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
}
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
@ -5426,7 +5490,7 @@ static const int kCallApiFunctionSpillSpace = 4;
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
return static_cast<int>(ref0.address() - ref1.address());
}
@ -5765,6 +5829,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -138,8 +138,10 @@ class RecordWriteStub: public PlatformCodeStub {
DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
// Retrieve the offsets to the labels.
int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
auto offset_to_incremental_noncompacting =
static_cast<int32_t>(instr1->ImmPCOffset());
auto offset_to_incremental_compacting =
static_cast<int32_t>(instr2->ImmPCOffset());
switch (mode) {
case STORE_BUFFER_ONLY:

View File

@ -634,6 +634,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -84,6 +84,8 @@ const int64_t kXMaxInt = 0x7fffffffffffffffL;
const int64_t kXMinInt = 0x8000000000000000L;
const int32_t kWMaxInt = 0x7fffffff;
const int32_t kWMinInt = 0x80000000;
const unsigned kIp0Code = 16;
const unsigned kIp1Code = 17;
const unsigned kFramePointerRegCode = 29;
const unsigned kLinkRegCode = 30;
const unsigned kZeroRegCode = 31;

View File

@ -120,6 +120,7 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
#endif
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -207,10 +207,8 @@ void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
RegList regs = receiver.Bit() | name.Bit();
if (FLAG_vector_ics) {
regs |= VectorLoadICTrampolineDescriptor::SlotRegister().Bit();
}
Register slot = LoadDescriptor::SlotRegister();
RegList regs = receiver.Bit() | name.Bit() | slot.Bit();
Generate_DebugBreakCallHelper(masm, regs, 0, x10);
}
@ -220,8 +218,11 @@ void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
RegList regs = receiver.Bit() | name.Bit() | value.Bit();
if (FLAG_vector_stores) {
regs |= VectorStoreICDescriptor::SlotRegister().Bit();
}
Generate_DebugBreakCallHelper(masm, regs, 0, x10);
}
@ -233,11 +234,7 @@ void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-arm64.cc).
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper(
masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
GenerateStoreICDebugBreak(masm);
}
@ -346,6 +343,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
const bool LiveEdit::kFrameDropperSupported = true;
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -81,6 +81,7 @@ VISITOR_LIST(DEFINE_VISITOR_CALLERS)
#undef DEFINE_VISITOR_CALLERS
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -193,6 +193,7 @@ void DelayedMasm::EmitPending() {
ResetPending();
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -354,11 +354,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No out-of-line constant pool support.
// No embedded constant pool support.
UNREACHABLE();
}
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8

View File

@ -1369,11 +1369,12 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
switch (format[1]) {
case 'M': { // IMoveImm or IMoveLSL.
if (format[5] == 'I') {
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
uint64_t imm = static_cast<uint64_t>(instr->ImmMoveWide())
<< (16 * instr->ShiftMoveWide());
AppendToOutput("#0x%" PRIx64, imm);
} else {
DCHECK(format[5] == 'L');
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
AppendToOutput("#0x%" PRIx32, instr->ImmMoveWide());
if (instr->ShiftMoveWide() > 0) {
AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
}
@ -1383,13 +1384,13 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
case 'L': {
switch (format[2]) {
case 'L': { // ILLiteral - Immediate Load Literal.
AppendToOutput("pc%+" PRId64,
instr->ImmLLiteral() << kLoadLiteralScaleLog2);
AppendToOutput("pc%+" PRId32, instr->ImmLLiteral()
<< kLoadLiteralScaleLog2);
return 9;
}
case 'S': { // ILS - Immediate Load/Store.
if (instr->ImmLS() != 0) {
AppendToOutput(", #%" PRId64, instr->ImmLS());
AppendToOutput(", #%" PRId32, instr->ImmLS());
}
return 3;
}
@ -1397,14 +1398,14 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
if (instr->ImmLSPair() != 0) {
// format[3] is the scale value. Convert to a number.
int scale = format[3] - 0x30;
AppendToOutput(", #%" PRId64, instr->ImmLSPair() * scale);
AppendToOutput(", #%" PRId32, instr->ImmLSPair() * scale);
}
return 4;
}
case 'U': { // ILU - Immediate Load/Store Unsigned.
if (instr->ImmLSUnsigned() != 0) {
AppendToOutput(", #%" PRIu64,
instr->ImmLSUnsigned() << instr->SizeLS());
AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned()
<< instr->SizeLS());
}
return 3;
}
@ -1427,7 +1428,7 @@ int Disassembler::SubstituteImmediateField(Instruction* instr,
AppendToOutput("#%d", 64 - instr->FPScale());
return 8;
} else {
AppendToOutput("#0x%" PRIx64 " (%.4f)", instr->ImmFP(),
AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmFP(),
format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
return 9;
}
@ -1538,7 +1539,7 @@ int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
case 'L': { // HLo.
if (instr->ImmDPShift() != 0) {
const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
AppendToOutput(", %s #%" PRId64, shift_type[instr->ShiftDP()],
AppendToOutput(", %s #%" PRId32, shift_type[instr->ShiftDP()],
instr->ImmDPShift());
}
return 3;
@ -1729,7 +1730,8 @@ void PrintDisassembler::ProcessOutput(Instruction* instr) {
GetOutput());
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
namespace disasm {

View File

@ -31,12 +31,7 @@ Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
}
Object*& ExitFrame::constant_pool_slot() const {
UNREACHABLE();
return Memory::Object_at(NULL);
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -78,11 +78,10 @@ class ConstructFrameConstants : public AllStatic {
// FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
static const int kLengthOffset = -4 * kPointerSize;
static const int kConstructorOffset = -5 * kPointerSize;
static const int kImplicitReceiverOffset = -6 * kPointerSize;
static const int kImplicitReceiverOffset = -5 * kPointerSize;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
};

File diff suppressed because it is too large Load Diff

View File

@ -93,9 +93,9 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
// met.
uint64_t Instruction::ImmLogical() {
unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
int64_t n = BitN();
int64_t imm_s = ImmSetBits();
int64_t imm_r = ImmRotate();
int32_t n = BitN();
int32_t imm_s = ImmSetBits();
int32_t imm_r = ImmRotate();
// An integer is constructed from the n, imm_s and imm_r bits according to
// the following table:
@ -211,7 +211,7 @@ Instruction* Instruction::ImmPCOffsetTarget() {
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
int32_t offset) {
ptrdiff_t offset) {
return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
}
@ -242,7 +242,7 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
ptrdiff_t target_offset = DistanceTo(target);
Instr imm;
if (Instruction::IsValidPCRelOffset(target_offset)) {
imm = Assembler::ImmPCRelAddress(target_offset);
imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} else {
PatchingAssembler patcher(this,
@ -254,9 +254,11 @@ void Instruction::SetPCRelImmTarget(Instruction* target) {
void Instruction::SetBranchImmTarget(Instruction* target) {
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(IsValidImmPCOffset(BranchType(),
DistanceTo(target) >> kInstructionSizeLog2));
int offset = static_cast<int>(DistanceTo(target) >> kInstructionSizeLog2);
Instr branch_imm = 0;
uint32_t imm_mask = 0;
ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2;
switch (BranchType()) {
case CondBranchType: {
branch_imm = Assembler::ImmCondBranch(offset);
@ -287,9 +289,9 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
ptrdiff_t target_offset = DistanceTo(target) >> kInstructionSizeLog2;
DCHECK(is_int32(target_offset));
DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
int32_t target_offset =
static_cast<int32_t>(DistanceTo(target) >> kInstructionSizeLog2);
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
@ -302,8 +304,9 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
void Instruction::SetImmLLiteral(Instruction* source) {
DCHECK(IsLdrLiteral());
DCHECK(IsAligned(DistanceTo(source), kInstructionSize));
ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2;
Instr imm = Assembler::ImmLLiteral(offset);
DCHECK(Assembler::IsImmLLiteral(DistanceTo(source)));
Instr imm = Assembler::ImmLLiteral(
static_cast<int>(DistanceTo(source) >> kLoadLiteralScaleLog2));
Instr mask = ImmLLiteral_mask;
SetInstructionBits(Mask(~mask) | imm);
@ -316,7 +319,7 @@ void Instruction::SetImmLLiteral(Instruction* source) {
bool InstructionSequence::IsInlineData() const {
// Inline data is encoded as a single movz instruction which writes to xzr
// (x31).
return IsMovz() && SixtyFourBits() && (Rd() == xzr.code());
return IsMovz() && SixtyFourBits() && (Rd() == kZeroRegCode);
// TODO(all): If we extend ::InlineData() to support bigger data, we need
// to update this method too.
}
@ -334,6 +337,7 @@ uint64_t InstructionSequence::InlineData() const {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -137,8 +137,8 @@ class Instruction {
return following(-count);
}
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int64_t Name() const { return Func(HighBit, LowBit); }
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int32_t Name() const { return Func(HighBit, LowBit); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER
@ -146,8 +146,8 @@ class Instruction {
// formed from ImmPCRelLo and ImmPCRelHi.
int ImmPCRel() const {
DCHECK(IsPCRelAddressing());
int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
int const width = ImmPCRelLo_width + ImmPCRelHi_width;
int offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
int width = ImmPCRelLo_width + ImmPCRelHi_width;
return signed_bitextract_32(width - 1, 0, offset);
}
@ -369,7 +369,7 @@ class Instruction {
// PC-relative addressing instruction.
Instruction* ImmPCOffsetTarget();
static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
static bool IsValidImmPCOffset(ImmBranchType branch_type, ptrdiff_t offset);
bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
@ -409,9 +409,7 @@ class Instruction {
static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(int offset) {
return is_int21(offset);
}
static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
void SetPCRelImmTarget(Instruction* target);
void SetBranchImmTarget(Instruction* target);
};

View File

@ -591,4 +591,5 @@ void Instrument::VisitUnimplemented(Instruction* instr) {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

View File

@ -16,12 +16,10 @@ const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return x1; }
const Register LoadDescriptor::NameRegister() { return x2; }
const Register LoadDescriptor::SlotRegister() { return x0; }
const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return x0; }
const Register VectorLoadICDescriptor::VectorRegister() { return x3; }
const Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
const Register StoreDescriptor::ReceiverRegister() { return x1; }
@ -29,6 +27,12 @@ const Register StoreDescriptor::NameRegister() { return x2; }
const Register StoreDescriptor::ValueRegister() { return x0; }
const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return x4; }
const Register VectorStoreICDescriptor::VectorRegister() { return x3; }
const Register StoreTransitionDescriptor::MapRegister() { return x3; }
@ -62,389 +66,338 @@ const Register MathPowIntegerDescriptor::exponent() { return x12; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
const Register GrowArrayElementsDescriptor::CapacityRegister() { return x2; }
void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x2: function info
Register registers[] = {cp, x2};
data->Initialize(arraysize(registers), registers, NULL);
}
void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x1: function
Register registers[] = {cp, x1};
data->Initialize(arraysize(registers), registers, NULL);
}
void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x0: value
Register registers[] = {cp, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x0: value
Register registers[] = {cp, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void TypeofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, x3};
data->Initialize(arraysize(registers), registers, NULL);
}
void FastCloneShallowArrayDescriptor::Initialize(
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: function info
Register registers[] = {x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewContextDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: function
Register registers[] = {x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void NumberToStringDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowArrayDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// x3: array literals array
// x2: array literal index
// x1: constant elements
Register registers[] = {cp, x3, x2, x1};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {x3, x2, x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastCloneShallowObjectDescriptor::Initialize(
void FastCloneShallowObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// x3: object literals array
// x2: object literal index
// x1: constant properties
// x0: object literal flags
Register registers[] = {cp, x3, x2, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x3, x2, x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateAllocationSiteDescriptor::Initialize(
void CreateAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// x2: feedback vector
// x3: call feedback slot
Register registers[] = {cp, x2, x3};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Smi()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {x2, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CreateWeakCellDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
void CreateWeakCellDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: feedback vector
// x3: call feedback slot
// x1: tagged value to put in the weak cell
Register registers[] = {cp, x2, x3, x1};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {x2, x3, x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StoreArrayLiteralElementDescriptor::Initialize(
void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, x3, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x3, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1 function the function to call
Register registers[] = {cp, x1};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackDescriptor::Initialize(
void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, x1, x3};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Smi()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {x1, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackAndVectorDescriptor::Initialize(
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, x1, x3, x2};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {x1, x3, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void CallConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {cp, x0, x1, x2};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x0, x1, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void RegExpConstructResultDescriptor::Initialize(
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// x2: length
// x1: index (of last match)
// x0: string
Register registers[] = {cp, x2, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x2, x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void TransitionElementsKindDescriptor::Initialize(
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// x0: value (js_array)
// x1: to_map
Register registers[] = {cp, x0, x1};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x0, x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void AllocateHeapNumberDescriptor::Initialize(
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
Register registers[] = {cp};
data->Initialize(arraysize(registers), registers, nullptr);
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
void ArrayConstructorConstantArgCountDescriptor::Initialize(
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// x1: function
// x2: allocation site with elements kind
// x0: number of arguments to the constructor function
Register registers[] = {cp, x1, x2};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x1, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, x1, x2, x0};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(),
Representation::Tagged(), Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
}
void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {x1, x2, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InternalArrayConstructorConstantArgCountDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
// x1: constructor function
// x0: number of arguments to the constructor function
Register registers[] = {cp, x1};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InternalArrayConstructorDescriptor::Initialize(
void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, x1, x0};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
Register registers[] = {x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x1: left operand
// x0: right operand
Register registers[] = {cp, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x0: value to compare
Register registers[] = {cp, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x0: value
Register registers[] = {cp, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x1: left operand
// x0: right operand
Register registers[] = {cp, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void BinaryOpWithAllocationSiteDescriptor::Initialize(
void CompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
// x0: right operand
Register registers[] = {x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CompareNilDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value to compare
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ToBooleanDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
// x0: right operand
Register registers[] = {x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// cp: context
// x2: allocation site
// x1: left operand
// x0: right operand
Register registers[] = {cp, x2, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x2, x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
// x0: right operand
Register registers[] = {cp, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
Register registers[] = {x1, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void KeyedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x2, // key
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // key
};
data->Initialize(arraysize(registers), registers, representations,
&noInlineDescriptor);
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void NamedDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x2, // name
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // name
};
data->Initialize(arraysize(registers), registers, representations,
&noInlineDescriptor);
data->InitializePlatformSpecific(arraysize(registers), registers,
&noInlineDescriptor);
}
void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void CallHandlerDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x0, // receiver
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // receiver
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x1, // JSFunction
x0, // actual number of arguments
x2, // expected number of arguments
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // JSFunction
Representation::Integer32(), // actual number of arguments
Representation::Integer32(), // expected number of arguments
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ApiFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x0, // callee
x4, // call_data
x2, // holder
x1, // api_function_address
x3, // actual number of arguments
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // callee
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address
Representation::Integer32(), // actual number of arguments
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void ApiAccessorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
void ApiAccessorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x0, // callee
x4, // call_data
x2, // holder
x1, // api_function_address
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // callee
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
}
void MathRoundVariantDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x1, // math rounding function
x3, // vector slot id
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
data->InitializePlatformSpecific(arraysize(registers), registers);
}
}
} // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -1050,10 +1050,18 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone());
// Target
ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) {
LOperand* op =
UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
// Context
LOperand* op = UseFixed(instr->OperandAt(1), cp);
ops.Add(op, zone());
// Other register parameters
for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
i < instr->OperandCount(); i++) {
op =
UseFixed(instr->OperandAt(i),
descriptor.GetRegisterParameter(
i - LCallWithDescriptor::kImplicitRegisterParameterCount));
ops.Add(op, zone());
}
@ -1391,7 +1399,7 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* object = UseFixed(instr->value(), x0);
LDateField* result = new(zone()) LDateField(object, instr->index());
return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
@ -1504,7 +1512,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
@ -1588,20 +1596,6 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
HTailCallThroughMegamorphicCache* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* receiver_register =
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
context, receiver_register, name_register);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp);
// The function is required (by MacroAssembler::InvokeFunction) to be in x1.
@ -1700,7 +1694,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LLoadGlobalGeneric* result =
@ -1766,7 +1760,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@ -1788,7 +1782,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
}
LInstruction* result =
@ -2028,7 +2022,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
CallInterfaceDescriptor descriptor =
info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor.GetEnvironmentParameterRegister(index);
Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
}
}
@ -2402,8 +2396,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
DCHECK(instr->key()->representation().IsTagged());
DCHECK(instr->value()->representation().IsTagged());
return MarkAsCall(
new(zone()) LStoreKeyedGeneric(context, object, key, value), instr);
LOperand* slot = NULL;
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
}
LStoreKeyedGeneric* result = new (zone())
LStoreKeyedGeneric(context, object, key, value, slot, vector);
return MarkAsCall(result, instr);
}
@ -2442,7 +2444,15 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value);
LOperand* slot = NULL;
LOperand* vector = NULL;
if (instr->HasVectorAndSlot()) {
slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
}
LStoreNamedGeneric* result =
new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
return MarkAsCall(result, instr);
}
@ -2567,6 +2577,21 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
}
LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
info()->MarkAsDeferredCalling();
LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseRegister(instr->object());
LOperand* elements = UseRegister(instr->elements());
LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
LMaybeGrowElements* result = new (zone())
LMaybeGrowElements(context, object, elements, key, current_capacity);
DefineFixed(result, x0);
return AssignPointerMap(AssignEnvironment(result));
}
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* value = UseFixed(instr->value(), x3);
@ -2763,4 +2788,5 @@ LInstruction* LChunkBuilder::DoAllocateBlockContext(
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

View File

@ -125,6 +125,7 @@ class LCodeGen;
V(MathRoundD) \
V(MathRoundI) \
V(MathSqrt) \
V(MaybeGrowElements) \
V(ModByConstI) \
V(ModByPowerOf2I) \
V(ModI) \
@ -164,7 +165,6 @@ class LCodeGen;
V(SubI) \
V(SubS) \
V(TaggedToI) \
V(TailCallThroughMegamorphicCache) \
V(ThisFunction) \
V(ToFastProperties) \
V(TransitionElementsKind) \
@ -318,26 +318,6 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
};
class LTailCallThroughMegamorphicCache final
: public LTemplateInstruction<0, 3, 0> {
public:
LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
LOperand* name) {
inputs_[0] = context;
inputs_[1] = receiver;
inputs_[2] = name;
}
LOperand* context() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* name() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
"tail-call-through-megamorphic-cache")
DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
};
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@ -739,7 +719,7 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
LanguageMode language_mode() { return hydrogen()->language_mode(); }
Strength strength() { return hydrogen()->strength(); }
private:
Token::Value op_;
@ -1181,6 +1161,8 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Strength strength() { return hydrogen()->strength(); }
Token::Value op() const { return hydrogen()->token(); }
};
@ -1550,8 +1532,12 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
LCallWithDescriptor(CallInterfaceDescriptor descriptor,
const ZoneList<LOperand*>& operands, Zone* zone)
: descriptor_(descriptor),
inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
inputs_(descriptor.GetRegisterParameterCount() +
kImplicitRegisterParameterCount,
zone) {
DCHECK(descriptor.GetRegisterParameterCount() +
kImplicitRegisterParameterCount ==
operands.length());
inputs_.AddAll(operands, zone);
}
@ -1561,6 +1547,10 @@ class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
// The target and context are passed as implicit parameters that are not
// explicitly listed in the descriptor.
static const int kImplicitRegisterParameterCount = 2;
private:
DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
@ -2563,22 +2553,24 @@ class LStoreKeyedFixedDouble final : public LStoreKeyed<1> {
};
class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 0> {
class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
public:
LStoreKeyedGeneric(LOperand* context,
LOperand* obj,
LOperand* key,
LOperand* value) {
LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
LOperand* value, LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = obj;
inputs_[1] = object;
inputs_[2] = key;
inputs_[3] = value;
temps_[0] = slot;
temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
LOperand* temp_slot() { return temps_[0]; }
LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
@ -2615,17 +2607,22 @@ class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
};
class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
public:
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
LOperand* slot, LOperand* vector) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
temps_[0] = slot;
temps_[1] = vector;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
LOperand* temp_slot() { return temps_[0]; }
LOperand* temp_vector() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
@ -2637,6 +2634,28 @@ class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 0> {
};
class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
public:
LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
LOperand* key, LOperand* current_capacity) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = elements;
inputs_[3] = key;
inputs_[4] = current_capacity;
}
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* elements() { return inputs_[2]; }
LOperand* key() { return inputs_[3]; }
LOperand* current_capacity() { return inputs_[4]; }
DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
};
class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
public:
LStringAdd(LOperand* context, LOperand* left, LOperand* right) {

View File

@ -224,55 +224,17 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// The translation includes one command per value in the environment.
int translation_size = environment->translation_size();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
bool has_closure_id = !info()->closure().is_null() &&
!info()->closure().is_identical_to(environment->closure());
int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
break;
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
case JS_GETTER:
DCHECK(translation_size == 1);
DCHECK(height == 0);
translation->BeginGetterStubFrame(closure_id);
break;
case JS_SETTER:
DCHECK(translation_size == 2);
DCHECK(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
case STUB:
translation->BeginCompiledStubFrame();
break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
default:
UNREACHABLE();
}
WriteTranslationFrame(environment, translation);
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
AddToTranslation(environment,
translation,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
&object_index,
&dematerialized_index);
AddToTranslation(
environment, translation, value, environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
}
}
@ -345,16 +307,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
int result = deoptimization_literals_.length();
for (int i = 0; i < deoptimization_literals_.length(); ++i) {
if (deoptimization_literals_[i].is_identical_to(literal)) return i;
}
deoptimization_literals_.Add(literal, zone());
return result;
}
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) {
environment->set_has_been_used();
@ -435,6 +387,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
CallFunctionStub stub(isolate(), arity, flags);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@ -449,6 +402,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
DCHECK(ToRegister(instr->result()).is(x0));
}
@ -504,6 +458,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
DCHECK(ToRegister(instr->result()).is(x0));
}
@ -525,7 +480,7 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) {
__ Mov(cp, ToRegister(context));
} else if (context->IsStackSlot()) {
__ Ldr(cp, ToMemOperand(context));
__ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
@ -669,7 +624,7 @@ bool LCodeGen::GeneratePrologue() {
// global proxy when called as functions (without an explicit receiver
// object).
if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
!info_->is_native()) {
!info()->is_native() && info()->scope()->has_this_declaration()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
@ -728,8 +683,9 @@ bool LCodeGen::GeneratePrologue() {
__ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Variable* var = scope()->parameter(i);
int first_parameter = scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
Register value = x0;
Register scratch = x3;
@ -743,8 +699,9 @@ bool LCodeGen::GeneratePrologue() {
__ Str(value, target);
// Update the write barrier. This clobbers value and scratch.
if (need_write_barrier) {
__ RecordWriteContextSlot(cp, target.offset(), value, scratch,
GetLinkRegisterState(), kSaveFPRegs);
__ RecordWriteContextSlot(cp, static_cast<int>(target.offset()),
value, scratch, GetLinkRegisterState(),
kSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, &done);
@ -995,15 +952,10 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
DCHECK(deoptimization_literals_.length() == 0);
const ZoneList<Handle<JSFunction> >* inlined_closures =
chunk()->inlined_closures();
for (int i = 0, length = inlined_closures->length(); i < length; i++) {
DefineDeoptimizationLiteral(inlined_closures->at(i));
DCHECK_EQ(0, deoptimization_literals_.length());
for (auto function : chunk()->inlined_functions()) {
DefineDeoptimizationLiteral(function);
}
inlined_function_count_ = deoptimization_literals_.length();
}
@ -1281,13 +1233,37 @@ static int64_t ArgumentsOffsetWithoutFrame(int index) {
}
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
DCHECK(op != NULL);
DCHECK(!op->IsRegister());
DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
return MemOperand(fp, StackSlotOffset(op->index()));
int fp_offset = StackSlotOffset(op->index());
// Loads and stores have a bigger reach in positive offset than negative.
// We try to access using jssp (positive offset) first, then fall back to
// fp (negative offset) if that fails.
//
// We can reference a stack slot from jssp only if we know how much we've
// put on the stack. We don't know this in the following cases:
// - stack_mode != kCanUseStackPointer: this is the case when deferred
// code has saved the registers.
// - saves_caller_doubles(): some double registers have been pushed, jssp
// references the end of the double registers and not the end of the stack
// slots.
// In both of the cases above, we _could_ add the tracking information
// required so that we can use jssp here, but in practice it isn't worth it.
if ((stack_mode == kCanUseStackPointer) &&
!info()->saves_caller_doubles()) {
int jssp_offset_to_fp =
StandardFrameConstants::kFixedFrameSizeFromFp +
(pushed_arguments_ + GetStackSlotCount()) * kPointerSize;
int jssp_offset = fp_offset + jssp_offset_to_fp;
if (masm()->IsImmLSScaled(jssp_offset, LSDoubleWord)) {
return MemOperand(masm()->StackPointer(), jssp_offset);
}
}
return MemOperand(fp, fp_offset);
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@ -1772,8 +1748,8 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(x0));
DCHECK(ToRegister(instr->result()).is(x0));
Handle<Code> code = CodeFactory::BinaryOpIC(
isolate(), instr->op(), instr->language_mode()).code();
Handle<Code> code =
CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@ -2021,29 +1997,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
}
void LCodeGen::DoTailCallThroughMegamorphicCache(
LTailCallThroughMegamorphicCache* instr) {
Register receiver = ToRegister(instr->receiver());
Register name = ToRegister(instr->name());
DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
DCHECK(name.is(LoadDescriptor::NameRegister()));
DCHECK(receiver.is(x1));
DCHECK(name.is(x2));
Register scratch = x4;
Register extra = x5;
Register extra2 = x6;
Register extra3 = x7;
// The probe will tail call to a handler if found.
isolate()->stub_cache()->GenerateProbe(
masm(), Code::LOAD_IC, instr->hydrogen()->flags(), false, receiver, name,
scratch, extra, extra2, extra3);
// Tail call to miss if we ended up here.
LoadIC::GenerateMiss(masm());
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->result()).Is(x0));
@ -2085,6 +2038,8 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
generator.AfterCall();
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@ -2104,11 +2059,13 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
__ Call(x10);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr);
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@ -2134,6 +2091,7 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
default:
UNREACHABLE();
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@ -2554,7 +2512,8 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->left()).Is(x1));
DCHECK(ToRegister(instr->right()).Is(x0));
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// Signal that we don't inline smi code before this stub.
InlineSmiCheckInfo::EmitNotInlined(masm());
@ -2653,18 +2612,14 @@ void LCodeGen::DoDateField(LDateField* instr) {
Register temp1 = x10;
Register temp2 = x11;
Smi* index = instr->index();
Label runtime, done;
DCHECK(object.is(result) && object.Is(x0));
DCHECK(instr->IsMarkedAsCall());
DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
__ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
if (index->value() == 0) {
__ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
} else {
Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ Mov(temp1, Operand(stamp));
@ -2680,9 +2635,8 @@ void LCodeGen::DoDateField(LDateField* instr) {
__ Bind(&runtime);
__ Mov(x1, Operand(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ Bind(&done);
}
__ Bind(&done);
}
@ -3196,6 +3150,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
instr->hydrogen()->formal_parameter_count(),
instr->arity(), instr);
}
RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
}
@ -3309,6 +3264,16 @@ void LCodeGen::DoLabel(LLabel* label) {
label->block_id(),
LabelType(label));
// Inherit pushed_arguments_ from the predecessor's argument count.
if (label->block()->HasPredecessor()) {
pushed_arguments_ = label->block()->predecessors()->at(0)->argument_count();
#ifdef DEBUG
for (auto p : *label->block()->predecessors()) {
DCHECK_EQ(p->argument_count(), pushed_arguments_);
}
#endif
}
__ Bind(label->label());
current_block_ = label->block_id();
DoGap(label);
@ -3361,10 +3326,9 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = VectorLoadICDescriptor::SlotRegister();
DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
Register slot_register = LoadWithVectorDescriptor::SlotRegister();
DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(x0));
AllowDeferredHandleDereference vector_structure_check;
@ -3377,17 +3341,29 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
}
template <class T>
void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = ToRegister(instr->temp_slot());
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Mov(vector_register, vector);
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ Mov(slot_register, Smi::FromInt(index));
}
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->global_object())
.is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).Is(x0));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode, SLOPPY,
PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -3657,9 +3633,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
Handle<Code> ic =
CodeFactory::KeyedLoadICInOptimizedCode(
isolate(), instr->hydrogen()->initialization_state()).code();
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).Is(x0));
@ -3712,13 +3688,11 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// LoadIC expects name and receiver in registers.
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_CONTEXTUAL,
instr->hydrogen()->initialization_state()).code();
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_CONTEXTUAL, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).is(x0));
@ -4754,6 +4728,8 @@ void LCodeGen::DoPushArguments(LPushArguments* instr) {
// The preamble was done by LPreparePushArguments.
args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
RecordPushedArgumentsDelta(instr->ArgumentCount());
}
@ -5137,14 +5113,9 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
SmiCheck check_needed =
instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
__ RecordWriteContextSlot(context,
target.offset(),
value,
scratch,
GetLinkRegisterState(),
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
__ RecordWriteContextSlot(context, static_cast<int>(target.offset()), value,
scratch, GetLinkRegisterState(), kSaveFPRegs,
EMIT_REMEMBERED_SET, check_needed);
}
__ Bind(&skip_assignment);
}
@ -5322,6 +5293,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
@ -5329,6 +5304,91 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
class DeferredMaybeGrowElements final : public LDeferredCode {
public:
DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
: LDeferredCode(codegen), instr_(instr) {}
void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
LInstruction* instr() override { return instr_; }
private:
LMaybeGrowElements* instr_;
};
Register result = x0;
DeferredMaybeGrowElements* deferred =
new (zone()) DeferredMaybeGrowElements(this, instr);
LOperand* key = instr->key();
LOperand* current_capacity = instr->current_capacity();
DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
DCHECK(key->IsConstantOperand() || key->IsRegister());
DCHECK(current_capacity->IsConstantOperand() ||
current_capacity->IsRegister());
if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
int32_t constant_capacity =
ToInteger32(LConstantOperand::cast(current_capacity));
if (constant_key >= constant_capacity) {
// Deferred case.
__ B(deferred->entry());
}
} else if (key->IsConstantOperand()) {
int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
__ Cmp(ToRegister(current_capacity), Operand(constant_key));
__ B(le, deferred->entry());
} else if (current_capacity->IsConstantOperand()) {
int32_t constant_capacity =
ToInteger32(LConstantOperand::cast(current_capacity));
__ Cmp(ToRegister(key), Operand(constant_capacity));
__ B(ge, deferred->entry());
} else {
__ Cmp(ToRegister(key), ToRegister(current_capacity));
__ B(ge, deferred->entry());
}
__ Mov(result, ToRegister(instr->elements()));
__ Bind(deferred->exit());
}
void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register result = x0;
__ Mov(result, 0);
// We have to call a stub.
{
PushSafepointRegistersScope scope(this);
__ Move(result, ToRegister(instr->object()));
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
__ Mov(x3, Operand(ToSmi(LConstantOperand::cast(key))));
} else {
__ Mov(x3, ToRegister(key));
__ SmiTag(x3);
}
GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(
instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
__ StoreToSafepointRegisterSlot(result, result);
}
// Deopt on smi, which means the elements array changed to dictionary mode.
DeoptimizeIfSmi(result, instr, Deoptimizer::kSmi);
}
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Representation representation = instr->representation();
@ -5433,10 +5493,14 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
}
__ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
Handle<Code> ic =
StoreIC::initialize_stub(isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state());
Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -5548,7 +5612,8 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
InlineSmiCheckInfo::EmitNotInlined(masm());
@ -6054,5 +6119,5 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

View File

@ -28,7 +28,6 @@ class LCodeGen: public LCodeGenBase {
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
@ -37,7 +36,8 @@ class LCodeGen: public LCodeGenBase {
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
expected_safepoint_kind_(Safepoint::kSimple),
pushed_arguments_(0) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@ -81,7 +81,9 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister32(LOperand* op) const;
Operand ToOperand(LOperand* op);
Operand ToOperand32(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
MemOperand ToMemOperand(LOperand* op,
StackMode stack_mode = kCanUseStackPointer) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
template <class LI>
@ -114,6 +116,7 @@ class LCodeGen: public LCodeGenBase {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
@ -190,6 +193,8 @@ class LCodeGen: public LCodeGenBase {
template <class T>
void EmitVectorLoadICRegisters(T* instr);
template <class T>
void EmitVectorStoreICRegisters(T* instr);
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
@ -197,7 +202,6 @@ class LCodeGen: public LCodeGenBase {
Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
SmiCheck check_needed);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
@ -341,7 +345,6 @@ class LCodeGen: public LCodeGenBase {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;
@ -358,6 +361,15 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_;
// The number of arguments pushed onto the stack, either by this block or by a
// predecessor.
int pushed_arguments_;
void RecordPushedArgumentsDelta(int delta) {
pushed_arguments_ += delta;
DCHECK(pushed_arguments_ >= 0);
}
int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED {

View File

@ -292,4 +292,5 @@ void LGapResolver::EmitMove(int index) {
moves_[index].Eliminate();
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8

View File

@ -926,8 +926,8 @@ void MacroAssembler::PushPopQueue::PushQueued(
masm_->PushPreamble(size_);
}
int count = queued_.size();
int index = 0;
size_t count = queued_.size();
size_t index = 0;
while (index < count) {
// PushHelper can only handle registers with the same size and type, and it
// can handle only four at a time. Batch them up accordingly.
@ -949,8 +949,8 @@ void MacroAssembler::PushPopQueue::PushQueued(
void MacroAssembler::PushPopQueue::PopQueued() {
if (queued_.empty()) return;
int count = queued_.size();
int index = 0;
size_t count = queued_.size();
size_t index = 0;
while (index < count) {
// PopHelper can only handle registers with the same size and type, and it
// can handle only four at a time. Batch them up accordingly.
@ -1263,7 +1263,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
// system stack pointer (csp).
DCHECK(csp.Is(StackPointer()));
MemOperand tos(csp, -2 * kXRegSize, PreIndex);
MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
stp(d14, d15, tos);
stp(d12, d13, tos);
@ -3928,6 +3928,7 @@ void MacroAssembler::GetNumberHash(Register key, Register scratch) {
Add(key, key, scratch);
// hash = hash ^ (hash >> 16);
Eor(key, key, Operand(key, LSR, 16));
Bic(key, key, Operand(0xc0000000u));
}
@ -4693,7 +4694,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
// Check that the function's map is the same as the expected cached map.
Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
int offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
Ldr(scratch2, FieldMemOperand(scratch1, offset));
Cmp(map_in_out, scratch2);
B(ne, no_map_match);
@ -5115,7 +5116,8 @@ void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
// 'check' in the other bits. The possible offset is limited in that we
// use BitField to pack the data, and the underlying data type is a
// uint32_t.
uint32_t delta = __ InstructionsGeneratedSince(smi_check);
uint32_t delta =
static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
__ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
} else {
DCHECK(!smi_check->is_bound());
@ -5136,9 +5138,10 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
// 32-bit values.
DCHECK(is_uint32(payload));
if (payload != 0) {
int reg_code = RegisterBits::decode(payload);
uint32_t payload32 = static_cast<uint32_t>(payload);
int reg_code = RegisterBits::decode(payload32);
reg_ = Register::XRegFromCode(reg_code);
uint64_t smi_check_delta = DeltaBits::decode(payload);
int smi_check_delta = DeltaBits::decode(payload32);
DCHECK(smi_check_delta != 0);
smi_check_ = inline_data->preceding(smi_check_delta);
}
@ -5149,6 +5152,7 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
#undef __
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -886,8 +886,8 @@ class MacroAssembler : public Assembler {
template<typename Field>
void DecodeField(Register dst, Register src) {
static const uint64_t shift = Field::kShift;
static const uint64_t setbits = CountSetBits(Field::kMask, 32);
static const int shift = Field::kShift;
static const int setbits = CountSetBits(Field::kMask, 32);
Ubfx(dst, src, shift, setbits);
}

View File

@ -1611,6 +1611,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -20,6 +20,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
RegExpMacroAssemblerARM64(Isolate* isolate, Zone* zone, Mode mode,
int registers_to_save);
virtual ~RegExpMacroAssemblerARM64();
virtual void AbortedCodeGeneration() { masm_->AbortedCodeGeneration(); }
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by);

View File

@ -490,7 +490,7 @@ class Redirection {
static Redirection* FromHltInstruction(Instruction* redirect_call) {
char* addr_of_hlt = reinterpret_cast<char*>(redirect_call);
char* addr_of_redirection =
addr_of_hlt - OFFSET_OF(Redirection, redirect_call_);
addr_of_hlt - offsetof(Redirection, redirect_call_);
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
@ -500,6 +500,14 @@ class Redirection {
return redirection->external_function<void*>();
}
static void DeleteChain(Redirection* redirection) {
while (redirection != nullptr) {
Redirection* next = redirection->next_;
delete redirection;
redirection = next;
}
}
private:
void* external_function_;
Instruction redirect_call_;
@ -508,6 +516,12 @@ class Redirection {
};
// static
void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
Redirection::DeleteChain(first);
}
// Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair structure.
@ -903,10 +917,11 @@ T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
return static_cast<unsignedT>(value) >> amount;
case ASR:
return value >> amount;
case ROR:
case ROR: {
unsignedT mask = (static_cast<unsignedT>(1) << amount) - 1;
return (static_cast<unsignedT>(value) >> amount) |
((value & ((1L << amount) - 1L)) <<
(sizeof(unsignedT) * 8 - amount));
((value & mask) << (sizeof(mask) * 8 - amount));
}
default:
UNIMPLEMENTED();
return 0;
@ -1399,7 +1414,8 @@ void Simulator::VisitAddSubShifted(Instruction* instr) {
int64_t op2 = ShiftOperand(xreg(instr->Rm()), shift_type, shift_amount);
AddSubHelper(instr, op2);
} else {
int32_t op2 = ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount);
int32_t op2 = static_cast<int32_t>(
ShiftOperand(wreg(instr->Rm()), shift_type, shift_amount));
AddSubHelper(instr, op2);
}
}
@ -1410,7 +1426,7 @@ void Simulator::VisitAddSubImmediate(Instruction* instr) {
if (instr->SixtyFourBits()) {
AddSubHelper<int64_t>(instr, op2);
} else {
AddSubHelper<int32_t>(instr, op2);
AddSubHelper<int32_t>(instr, static_cast<int32_t>(op2));
}
}
@ -1457,7 +1473,7 @@ void Simulator::VisitLogicalImmediate(Instruction* instr) {
if (instr->SixtyFourBits()) {
LogicalHelper<int64_t>(instr, instr->ImmLogical());
} else {
LogicalHelper<int32_t>(instr, instr->ImmLogical());
LogicalHelper<int32_t>(instr, static_cast<int32_t>(instr->ImmLogical()));
}
}
@ -1879,7 +1895,7 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
// Get the shifted immediate.
int64_t shift = instr->ShiftMoveWide() * 16;
int64_t shifted_imm16 = instr->ImmMoveWide() << shift;
int64_t shifted_imm16 = static_cast<int64_t>(instr->ImmMoveWide()) << shift;
// Compute the new value.
switch (mov_op) {
@ -1912,25 +1928,32 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
void Simulator::VisitConditionalSelect(Instruction* instr) {
uint64_t new_val = xreg(instr->Rn());
if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
uint64_t new_val = xreg(instr->Rm());
new_val = xreg(instr->Rm());
switch (instr->Mask(ConditionalSelectMask)) {
case CSEL_w: set_wreg(instr->Rd(), new_val); break;
case CSEL_x: set_xreg(instr->Rd(), new_val); break;
case CSINC_w: set_wreg(instr->Rd(), new_val + 1); break;
case CSINC_x: set_xreg(instr->Rd(), new_val + 1); break;
case CSINV_w: set_wreg(instr->Rd(), ~new_val); break;
case CSINV_x: set_xreg(instr->Rd(), ~new_val); break;
case CSNEG_w: set_wreg(instr->Rd(), -new_val); break;
case CSNEG_x: set_xreg(instr->Rd(), -new_val); break;
case CSEL_w:
case CSEL_x:
break;
case CSINC_w:
case CSINC_x:
new_val++;
break;
case CSINV_w:
case CSINV_x:
new_val = ~new_val;
break;
case CSNEG_w:
case CSNEG_x:
new_val = -new_val;
break;
default: UNIMPLEMENTED();
}
}
if (instr->SixtyFourBits()) {
set_xreg(instr->Rd(), new_val);
} else {
if (instr->SixtyFourBits()) {
set_xreg(instr->Rd(), xreg(instr->Rn()));
} else {
set_wreg(instr->Rd(), wreg(instr->Rn()));
}
set_wreg(instr->Rd(), static_cast<uint32_t>(new_val));
}
}
@ -1940,13 +1963,27 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
unsigned src = instr->Rn();
switch (instr->Mask(DataProcessing1SourceMask)) {
case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSizeInBits)); break;
case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSizeInBits)); break;
case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break;
case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break;
case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break;
case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break;
case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break;
case RBIT_w:
set_wreg(dst, ReverseBits(wreg(src)));
break;
case RBIT_x:
set_xreg(dst, ReverseBits(xreg(src)));
break;
case REV16_w:
set_wreg(dst, ReverseBytes(wreg(src), 1));
break;
case REV16_x:
set_xreg(dst, ReverseBytes(xreg(src), 1));
break;
case REV_w:
set_wreg(dst, ReverseBytes(wreg(src), 2));
break;
case REV32_x:
set_xreg(dst, ReverseBytes(xreg(src), 2));
break;
case REV_x:
set_xreg(dst, ReverseBytes(xreg(src), 3));
break;
case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits));
break;
case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits));
@ -1964,44 +2001,6 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
}
uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
DCHECK((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
uint64_t result = 0;
for (unsigned i = 0; i < num_bits; i++) {
result = (result << 1) | (value & 1);
value >>= 1;
}
return result;
}
uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) {
// Split the 64-bit value into an 8-bit array, where b[0] is the least
// significant byte, and b[7] is the most significant.
uint8_t bytes[8];
uint64_t mask = 0xff00000000000000UL;
for (int i = 7; i >= 0; i--) {
bytes[i] = (value & mask) >> (i * 8);
mask >>= 8;
}
// Permutation tables for REV instructions.
// permute_table[Reverse16] is used by REV16_x, REV16_w
// permute_table[Reverse32] is used by REV32_x, REV_w
// permute_table[Reverse64] is used by REV_x
DCHECK((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
{4, 5, 6, 7, 0, 1, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7} };
uint64_t result = 0;
for (int i = 0; i < 8; i++) {
result <<= 8;
result |= bytes[permute_table[mode][i]];
}
return result;
}
template <typename T>
void Simulator::DataProcessing2Source(Instruction* instr) {
Shift shift_op = NO_SHIFT;
@ -2121,7 +2120,7 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
if (instr->SixtyFourBits()) {
set_xreg(instr->Rd(), result);
} else {
set_wreg(instr->Rd(), result);
set_wreg(instr->Rd(), static_cast<int32_t>(result));
}
}
@ -2138,8 +2137,9 @@ void Simulator::BitfieldHelper(Instruction* instr) {
mask = diff < reg_size - 1 ? (static_cast<T>(1) << (diff + 1)) - 1
: static_cast<T>(-1);
} else {
mask = ((1L << (S + 1)) - 1);
mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
uint64_t umask = ((1L << (S + 1)) - 1);
umask = (umask >> R) | (umask << (reg_size - R));
mask = static_cast<T>(umask);
diff += reg_size;
}
@ -2563,7 +2563,7 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// Bail out early for zero inputs.
if (mantissa == 0) {
return sign << sign_offset;
return static_cast<T>(sign << sign_offset);
}
// If all bits in the exponent are set, the value is infinite or NaN.
@ -2580,9 +2580,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// FPTieEven rounding mode handles overflows using infinities.
exponent = infinite_exponent;
mantissa = 0;
return (sign << sign_offset) |
(exponent << exponent_offset) |
(mantissa << mantissa_offset);
return static_cast<T>((sign << sign_offset) |
(exponent << exponent_offset) |
(mantissa << mantissa_offset));
}
// Calculate the shift required to move the top mantissa bit to the proper
@ -2605,7 +2605,7 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// non-zero result after rounding.
if (shift > (highest_significant_bit + 1)) {
// The result will always be +/-0.0.
return sign << sign_offset;
return static_cast<T>(sign << sign_offset);
}
// Properly encode the exponent for a subnormal output.
@ -2624,9 +2624,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
uint64_t adjusted = mantissa - (halfbit_mantissa & ~onebit_mantissa);
T halfbit_adjusted = (adjusted >> (shift-1)) & 1;
T result = (sign << sign_offset) |
(exponent << exponent_offset) |
((mantissa >> shift) << mantissa_offset);
T result =
static_cast<T>((sign << sign_offset) | (exponent << exponent_offset) |
((mantissa >> shift) << mantissa_offset));
// A very large mantissa can overflow during rounding. If this happens, the
// exponent should be incremented and the mantissa set to 1.0 (encoded as
@ -2641,9 +2641,9 @@ static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
// We have to shift the mantissa to the left (or not at all). The input
// mantissa is exactly representable in the output mantissa, so apply no
// rounding correction.
return (sign << sign_offset) |
(exponent << exponent_offset) |
((mantissa << -shift) << mantissa_offset);
return static_cast<T>((sign << sign_offset) |
(exponent << exponent_offset) |
((mantissa << -shift) << mantissa_offset));
}
}
@ -2838,7 +2838,8 @@ float Simulator::FPToFloat(double value, FPRounding round_mode) {
uint32_t sign = raw >> 63;
uint32_t exponent = (1 << 8) - 1;
uint32_t payload = unsigned_bitextract_64(50, 52 - 23, raw);
uint32_t payload =
static_cast<uint32_t>(unsigned_bitextract_64(50, 52 - 23, raw));
payload |= (1 << 22); // Force a quiet NaN.
return rawbits_to_float((sign << 31) | (exponent << 23) | payload);
@ -2859,7 +2860,8 @@ float Simulator::FPToFloat(double value, FPRounding round_mode) {
// Extract the IEEE-754 double components.
uint32_t sign = raw >> 63;
// Extract the exponent and remove the IEEE-754 encoding bias.
int32_t exponent = unsigned_bitextract_64(62, 52, raw) - 1023;
int32_t exponent =
static_cast<int32_t>(unsigned_bitextract_64(62, 52, raw)) - 1023;
// Extract the mantissa and add the implicit '1' bit.
uint64_t mantissa = unsigned_bitextract_64(51, 0, raw);
if (std::fpclassify(value) == FP_NORMAL) {
@ -3210,11 +3212,11 @@ void Simulator::VisitSystem(Instruction* instr) {
case MSR: {
switch (instr->ImmSystemRegister()) {
case NZCV:
nzcv().SetRawValue(xreg(instr->Rt()));
nzcv().SetRawValue(wreg(instr->Rt()));
LogSystemRegister(NZCV);
break;
case FPCR:
fpcr().SetRawValue(xreg(instr->Rt()));
fpcr().SetRawValue(wreg(instr->Rt()));
LogSystemRegister(FPCR);
break;
default: UNIMPLEMENTED();
@ -3835,6 +3837,7 @@ void Simulator::DoPrintf(Instruction* instr) {
#endif // USE_SIMULATOR
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -72,12 +72,6 @@ class SimulatorStack : public v8::internal::AllStatic {
#else // !defined(USE_SIMULATOR)
enum ReverseByteMode {
Reverse16 = 0,
Reverse32 = 1,
Reverse64 = 2
};
// The proper way to initialize a simulated system register (such as NZCV) is as
// follows:
@ -169,6 +163,8 @@ class Simulator : public DecoderVisitor {
static void Initialize(Isolate* isolate);
static void TearDown(HashMap* i_cache, Redirection* first);
static Simulator* current(v8::internal::Isolate* isolate);
class CallArgument;
@ -706,9 +702,6 @@ class Simulator : public DecoderVisitor {
template <typename T>
void BitfieldHelper(Instruction* instr);
uint64_t ReverseBits(uint64_t value, unsigned num_bits);
uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
template <typename T>
T FPDefaultNaN() const;
@ -884,10 +877,10 @@ class Simulator : public DecoderVisitor {
FUNCTION_ADDR(entry), \
p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->CallRegExp( \
entry, \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
static_cast<int>( \
Simulator::current(Isolate::Current()) \
->CallRegExp(entry, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
// The simulator has its own stack. Thus it has a different stack limit from

View File

@ -74,7 +74,7 @@ int CountSetBits(uint64_t value, int width) {
value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
return value;
return static_cast<int>(value);
}
@ -89,6 +89,7 @@ int MaskToBit(uint64_t mask) {
}
} } // namespace v8::internal
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64

View File

@ -61,6 +61,49 @@ uint64_t LargestPowerOf2Divisor(uint64_t value);
int MaskToBit(uint64_t mask);
template <typename T>
T ReverseBits(T value) {
DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) ||
(sizeof(value) == 8));
T result = 0;
for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
result = (result << 1) | (value & 1);
value >>= 1;
}
return result;
}
template <typename T>
T ReverseBytes(T value, int block_bytes_log2) {
DCHECK((sizeof(value) == 4) || (sizeof(value) == 8));
DCHECK((1U << block_bytes_log2) <= sizeof(value));
// Split the 64-bit value into an 8-bit array, where b[0] is the least
// significant byte, and b[7] is the most significant.
uint8_t bytes[8];
uint64_t mask = 0xff00000000000000;
for (int i = 7; i >= 0; i--) {
bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
mask >>= 8;
}
// Permutation tables for REV instructions.
// permute_table[0] is used by REV16_x, REV16_w
// permute_table[1] is used by REV32_x, REV_w
// permute_table[2] is used by REV_x
DCHECK((0 < block_bytes_log2) && (block_bytes_log2 < 4));
static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
{4, 5, 6, 7, 0, 1, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7}};
T result = 0;
for (int i = 0; i < 8; i++) {
result <<= 8;
result |= bytes[permute_table[block_bytes_log2 - 1][i]];
}
return result;
}
// NaN tests.
inline bool IsSignallingNaN(double num) {
uint64_t raw = double_to_rawbits(num);

View File

@ -2,17 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
var $iteratorCreateResultObject;
var $arrayValues;
(function(global, shared, exports) {
(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
var GlobalArray = global.Array;
var GlobalObject = global.Object;
macro TYPED_ARRAYS(FUNCTION)
FUNCTION(Uint8Array)
@ -122,19 +120,19 @@ function ArrayKeys() {
}
%FunctionSetPrototype(ArrayIterator, new GlobalObject());
%FunctionSetPrototype(ArrayIterator, {__proto__: $iteratorPrototype});
%FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
$installFunctions(ArrayIterator.prototype, DONT_ENUM, [
utils.InstallFunctions(ArrayIterator.prototype, DONT_ENUM, [
'next', ArrayIteratorNext
]);
$setFunctionName(ArrayIteratorIterator, symbolIterator);
utils.SetFunctionName(ArrayIteratorIterator, symbolIterator);
%AddNamedProperty(ArrayIterator.prototype, symbolIterator,
ArrayIteratorIterator, DONT_ENUM);
%AddNamedProperty(ArrayIterator.prototype, symbolToStringTag,
"Array Iterator", READ_ONLY | DONT_ENUM);
$installFunctions(GlobalArray.prototype, DONT_ENUM, [
utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
// No 'values' since it breaks webcompat: http://crbug.com/409858
'entries', ArrayEntries,
'keys', ArrayKeys
@ -153,7 +151,13 @@ endmacro
TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
$iteratorCreateResultObject = CreateIteratorResultObject;
// -------------------------------------------------------------------
// Exports
utils.Export(function(to) {
to.ArrayIteratorCreateResultObject = CreateIteratorResultObject;
});
$arrayValues = ArrayValues;
})

283
deps/v8/src/array.js vendored
View File

@ -3,7 +3,6 @@
// found in the LICENSE file.
var $arrayConcat;
var $arrayJoin;
var $arrayPush;
var $arrayPop;
var $arrayShift;
@ -11,13 +10,34 @@ var $arraySlice;
var $arraySplice;
var $arrayUnshift;
(function(global, shared, exports) {
(function(global, utils) {
"use strict";
%CheckIsBootstrapping();
// -------------------------------------------------------------------
// Imports
var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
var Delete;
var MathMin;
var ObjectHasOwnProperty;
var ObjectIsFrozen;
var ObjectIsSealed;
var ObjectToString;
utils.Import(function(from) {
Delete = from.Delete;
MathMin = from.MathMin;
ObjectHasOwnProperty = from.ObjectHasOwnProperty;
ObjectIsFrozen = from.ObjectIsFrozen;
ObjectIsSealed = from.ObjectIsSealed;
ObjectToString = from.ObjectToString;
});
// -------------------------------------------------------------------
@ -223,7 +243,7 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
for (var i = start_i; i < limit; ++i) {
var current = array[i];
if (!IS_UNDEFINED(current) || i in array) {
%AddElement(deleted_elements, i - start_i, current, NONE);
%AddElement(deleted_elements, i - start_i, current);
}
}
} else {
@ -234,7 +254,7 @@ function SparseSlice(array, start_i, del_count, len, deleted_elements) {
if (key >= start_i) {
var current = array[key];
if (!IS_UNDEFINED(current) || key in array) {
%AddElement(deleted_elements, key - start_i, current, NONE);
%AddElement(deleted_elements, key - start_i, current);
}
}
}
@ -251,7 +271,7 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
// Move data to new array.
var new_array = new InternalArray(
// Clamp array length to 2^32-1 to avoid early RangeError.
$min(len - del_count + num_additional_args, 0xffffffff));
MathMin(len - del_count + num_additional_args, 0xffffffff));
var big_indices;
var indices = %GetArrayKeys(array, len);
if (IS_NUMBER(indices)) {
@ -283,7 +303,7 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
if (!IS_UNDEFINED(current) || key in array) {
var new_key = key - del_count + num_additional_args;
new_array[new_key] = current;
if (new_key > 0xffffffff) {
if (new_key > 0xfffffffe) {
big_indices = big_indices || new InternalArray();
big_indices.push(new_key);
}
@ -316,7 +336,7 @@ function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
var current = array[index];
// The spec requires [[DefineOwnProperty]] here, %AddElement is close
// enough (in that it ignores the prototype).
%AddElement(deleted_elements, i, current, NONE);
%AddElement(deleted_elements, i, current);
}
}
}
@ -372,26 +392,27 @@ function ArrayToString() {
func = array.join;
}
if (!IS_SPEC_FUNCTION(func)) {
return %_CallFunction(array, $objectToString);
return %_CallFunction(array, ObjectToString);
}
return %_CallFunction(array, func);
}
function InnerArrayToLocaleString(array, length) {
var len = TO_UINT32(length);
if (len === 0) return "";
return Join(array, len, ',', ConvertToLocaleString);
}
function ArrayToLocaleString() {
var array = $toObject(this);
var arrayLen = array.length;
var len = TO_UINT32(arrayLen);
if (len === 0) return "";
return Join(array, len, ',', ConvertToLocaleString);
return InnerArrayToLocaleString(array, arrayLen);
}
function ArrayJoin(separator) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
var array = TO_OBJECT_INLINE(this);
var length = TO_UINT32(array.length);
function InnerArrayJoin(separator, array, length) {
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
@ -413,6 +434,16 @@ function ArrayJoin(separator) {
}
function ArrayJoin(separator) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
var array = TO_OBJECT_INLINE(this);
var length = TO_UINT32(array.length);
return InnerArrayJoin(separator, array, length);
}
function ObservedArrayPop(n) {
n--;
var value = this[n];
@ -447,7 +478,7 @@ function ArrayPop() {
n--;
var value = array[n];
$delete(array, $toName(n), true);
Delete(array, $toName(n), true);
array.length = n;
return value;
}
@ -557,18 +588,7 @@ function SparseReverse(array, len) {
}
function ArrayReverse() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
var array = TO_OBJECT_INLINE(this);
var len = TO_UINT32(array.length);
if (UseSparseVariant(array, len, IS_ARRAY(array), len)) {
%NormalizeElements(array);
SparseReverse(array, len);
return array;
}
function InnerArrayReverse(array, len) {
var j = len - 1;
for (var i = 0; i < j; i++, j--) {
var current_i = array[i];
@ -593,6 +613,22 @@ function ArrayReverse() {
}
function ArrayReverse() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
var array = TO_OBJECT_INLINE(this);
var len = TO_UINT32(array.length);
if (UseSparseVariant(array, len, IS_ARRAY(array), len)) {
%NormalizeElements(array);
SparseReverse(array, len);
return array;
}
return InnerArrayReverse(array, len);
}
function ObservedArrayShift(len) {
var first = this[0];
@ -620,7 +656,7 @@ function ArrayShift() {
return;
}
if ($objectIsSealed(array)) throw MakeTypeError(kArrayFunctionsOnSealed);
if (ObjectIsSealed(array)) throw MakeTypeError(kArrayFunctionsOnSealed);
if (%IsObserved(array))
return ObservedArrayShift.call(array, len);
@ -671,7 +707,7 @@ function ArrayUnshift(arg1) { // length == 1
var num_arguments = %_ArgumentsLength();
if (len > 0 && UseSparseVariant(array, len, IS_ARRAY(array), len) &&
!$objectIsSealed(array)) {
!ObjectIsSealed(array)) {
SparseMove(array, 0, 0, len, num_arguments);
} else {
SimpleMove(array, 0, 0, len, num_arguments);
@ -817,9 +853,9 @@ function ArraySplice(start, delete_count) {
deleted_elements.length = del_count;
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
if (del_count != num_elements_to_add && $objectIsSealed(array)) {
if (del_count != num_elements_to_add && ObjectIsSealed(array)) {
throw MakeTypeError(kArrayFunctionsOnSealed);
} else if (del_count > 0 && $objectIsFrozen(array)) {
} else if (del_count > 0 && ObjectIsFrozen(array)) {
throw MakeTypeError(kArrayFunctionsOnFrozen);
}
@ -854,9 +890,7 @@ function ArraySplice(start, delete_count) {
}
function ArraySort(comparefn) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
function InnerArraySort(length, comparefn) {
// In-place QuickSort algorithm.
// For short (length <= 22) arrays, insertion sort is used for efficiency.
@ -1101,7 +1135,6 @@ function ArraySort(comparefn) {
return first_undefined;
};
var length = TO_UINT32(this.length);
if (length < 2) return this;
var is_array = IS_ARRAY(this);
@ -1140,17 +1173,19 @@ function ArraySort(comparefn) {
}
function ArraySort(comparefn) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
var array = $toObject(this);
var length = TO_UINT32(array.length);
return %_CallFunction(array, length, comparefn, InnerArraySort);
}
// The following functions cannot be made efficient on sparse arrays while
// preserving the semantics, since the calls to the receiver function can add
// or delete elements from the array.
function ArrayFilter(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.filter");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = $toUint32(array.length);
function InnerArrayFilter(f, receiver, array, length) {
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL(receiver)) {
@ -1159,7 +1194,6 @@ function ArrayFilter(f, receiver) {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
var result = new GlobalArray();
var accumulator = new InternalArray();
var accumulator_length = 0;
var is_array = IS_ARRAY(array);
@ -1175,19 +1209,23 @@ function ArrayFilter(f, receiver) {
}
}
}
%MoveArrayContents(accumulator, result);
return result;
return accumulator;
}
function ArrayForEach(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.forEach");
function ArrayFilter(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.filter");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = TO_UINT32(array.length);
var length = $toUint32(array.length);
var accumulator = InnerArrayFilter(f, receiver, array, length);
var result = new GlobalArray();
%MoveArrayContents(accumulator, result);
return result;
}
function InnerArrayForEach(f, receiver, array, length) {
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL(receiver)) {
@ -1209,17 +1247,18 @@ function ArrayForEach(f, receiver) {
}
}
// Executes the function once for each element present in the
// array until it finds one where callback returns true.
function ArraySome(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.some");
function ArrayForEach(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.forEach");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = TO_UINT32(array.length);
InnerArrayForEach(f, receiver, array, length);
}
function InnerArraySome(f, receiver, array, length) {
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL(receiver)) {
@ -1243,14 +1282,20 @@ function ArraySome(f, receiver) {
}
function ArrayEvery(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.every");
// Executes the function once for each element present in the
// array until it finds one where callback returns true.
function ArraySome(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.some");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = TO_UINT32(array.length);
return InnerArraySome(f, receiver, array, length);
}
function InnerArrayEvery(f, receiver, array, length) {
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL(receiver)) {
@ -1273,15 +1318,18 @@ function ArrayEvery(f, receiver) {
return true;
}
function ArrayMap(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.map");
function ArrayEvery(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.every");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = TO_UINT32(array.length);
return InnerArrayEvery(f, receiver, array, length);
}
function InnerArrayMap(f, receiver, array, length) {
if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
var needs_wrapper = false;
if (IS_NULL(receiver)) {
@ -1290,7 +1338,6 @@ function ArrayMap(f, receiver) {
needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
}
var result = new GlobalArray();
var accumulator = new InternalArray(length);
var is_array = IS_ARRAY(array);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
@ -1303,15 +1350,29 @@ function ArrayMap(f, receiver) {
accumulator[i] = %_CallFunction(new_receiver, element, i, array, f);
}
}
return accumulator;
}
function ArrayMap(f, receiver) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.map");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = TO_UINT32(array.length);
var accumulator = InnerArrayMap(f, receiver, array, length);
var result = new GlobalArray();
%MoveArrayContents(accumulator, result);
return result;
}
function ArrayIndexOf(element, index) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.indexOf");
var length = TO_UINT32(this.length);
// For .indexOf, we don't need to pass in the number of arguments
// at the callsite since ToInteger(undefined) == 0; however, for
// .lastIndexOf, we need to pass it, since the behavior for passing
// undefined is 0 but for not including the argument is length-1.
function InnerArrayIndexOf(element, index, length) {
if (length == 0) return -1;
if (IS_UNDEFINED(index)) {
index = 0;
@ -1365,12 +1426,17 @@ function ArrayIndexOf(element, index) {
}
function ArrayLastIndexOf(element, index) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf");
function ArrayIndexOf(element, index) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.indexOf");
var length = TO_UINT32(this.length);
return %_CallFunction(this, element, index, length, InnerArrayIndexOf);
}
function InnerArrayLastIndexOf(element, index, length, argumentsLength) {
if (length == 0) return -1;
if (%_ArgumentsLength() < 2) {
if (argumentsLength < 2) {
index = length - 1;
} else {
index = TO_INTEGER(index);
@ -1418,21 +1484,23 @@ function ArrayLastIndexOf(element, index) {
}
function ArrayReduce(callback, current) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduce");
function ArrayLastIndexOf(element, index) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf");
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = $toUint32(array.length);
var length = TO_UINT32(this.length);
return %_CallFunction(this, element, index, length,
%_ArgumentsLength(), InnerArrayLastIndexOf);
}
function InnerArrayReduce(callback, current, array, length, argumentsLength) {
if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError(kCalledNonCallable, callback);
}
var is_array = IS_ARRAY(array);
var i = 0;
find_initial: if (%_ArgumentsLength() < 2) {
find_initial: if (argumentsLength < 2) {
for (; i < length; i++) {
if (HAS_INDEX(array, i, is_array)) {
current = array[i++];
@ -1455,21 +1523,27 @@ function ArrayReduce(callback, current) {
}
function ArrayReduceRight(callback, current) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduceRight");
function ArrayReduce(callback, current) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduce");
// Pull out the length so that side effects are visible before the
// callback function is checked.
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = $toObject(this);
var length = $toUint32(array.length);
return InnerArrayReduce(callback, current, array, length,
%_ArgumentsLength());
}
function InnerArrayReduceRight(callback, current, array, length,
argumentsLength) {
if (!IS_SPEC_FUNCTION(callback)) {
throw MakeTypeError(kCalledNonCallable, callback);
}
var is_array = IS_ARRAY(array);
var i = length - 1;
find_initial: if (%_ArgumentsLength() < 2) {
find_initial: if (argumentsLength < 2) {
for (; i >= 0; i--) {
if (HAS_INDEX(array, i, is_array)) {
current = array[i--];
@ -1491,6 +1565,18 @@ function ArrayReduceRight(callback, current) {
return current;
}
function ArrayReduceRight(callback, current) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reduceRight");
// Pull out the length so that side effects are visible before the
// callback function is checked.
var array = $toObject(this);
var length = $toUint32(array.length);
return InnerArrayReduceRight(callback, current, array, length,
%_ArgumentsLength());
}
// ES5, 15.4.3.2
function ArrayIsArray(obj) {
return IS_ARRAY(obj);
@ -1519,7 +1605,7 @@ var unscopables = {
DONT_ENUM | READ_ONLY);
// Set up non-enumerable functions on the Array object.
$installFunctions(GlobalArray, DONT_ENUM, [
utils.InstallFunctions(GlobalArray, DONT_ENUM, [
"isArray", ArrayIsArray
]);
@ -1540,7 +1626,7 @@ var getFunction = function(name, jsBuiltin, len) {
// set their names.
// Manipulate the length of some of the functions to meet
// expectations set by ECMA-262 or Mozilla.
$installFunctions(GlobalArray.prototype, DONT_ENUM, [
utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"toString", getFunction("toString", ArrayToString),
"toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
"join", getFunction("join", ArrayJoin),
@ -1569,7 +1655,7 @@ $installFunctions(GlobalArray.prototype, DONT_ENUM, [
// The internal Array prototype doesn't need to be fancy, since it's never
// exposed to user code.
// Adding only the functions that are actually used.
$setUpLockedPrototype(InternalArray, GlobalArray(), [
utils.SetUpLockedPrototype(InternalArray, GlobalArray(), [
"concat", getFunction("concat", ArrayConcatJS),
"indexOf", getFunction("indexOf", ArrayIndexOf),
"join", getFunction("join", ArrayJoin),
@ -1579,15 +1665,36 @@ $setUpLockedPrototype(InternalArray, GlobalArray(), [
"splice", getFunction("splice", ArraySplice)
]);
$setUpLockedPrototype(InternalPackedArray, GlobalArray(), [
utils.SetUpLockedPrototype(InternalPackedArray, GlobalArray(), [
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush),
"shift", getFunction("shift", ArrayShift)
]);
// -------------------------------------------------------------------
// Exports
utils.Export(function(to) {
to.ArrayIndexOf = ArrayIndexOf;
to.ArrayJoin = ArrayJoin;
to.ArrayToString = ArrayToString;
to.InnerArrayEvery = InnerArrayEvery;
to.InnerArrayFilter = InnerArrayFilter;
to.InnerArrayForEach = InnerArrayForEach;
to.InnerArrayIndexOf = InnerArrayIndexOf;
to.InnerArrayJoin = InnerArrayJoin;
to.InnerArrayLastIndexOf = InnerArrayLastIndexOf;
to.InnerArrayMap = InnerArrayMap;
to.InnerArrayReduce = InnerArrayReduce;
to.InnerArrayReduceRight = InnerArrayReduceRight;
to.InnerArrayReverse = InnerArrayReverse;
to.InnerArraySome = InnerArraySome;
to.InnerArraySort = InnerArraySort;
to.InnerArrayToLocaleString = InnerArrayToLocaleString;
});
$arrayConcat = ArrayConcatJS;
$arrayJoin = ArrayJoin;
$arrayPush = ArrayPush;
$arrayPop = ArrayPop;
$arrayShift = ArrayShift;
@ -1595,4 +1702,4 @@ $arraySlice = ArraySlice;
$arraySplice = ArraySplice;
$arrayUnshift = ArrayUnshift;
})
});

Some files were not shown because too many files have changed in this diff Show More