diff --git a/Cargo.lock b/Cargo.lock index 5326152b43..b4cbaea08f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,6 +23,60 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" +[[package]] +name = "aead" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" +dependencies = [ + "generic-array", +] + +[[package]] +name = "aes" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" +dependencies = [ + "aes-soft", + "aesni", + "cipher", +] + +[[package]] +name = "aes-gcm" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5278b5fabbb9bd46e24aa69b2fdea62c99088e0a950a9be40e3e0101298f88da" +dependencies = [ + "aead", + "aes", + "cipher", + "ctr", + "ghash", + "subtle", +] + +[[package]] +name = "aes-soft" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" +dependencies = [ + "cipher", + "opaque-debug", +] + +[[package]] +name = "aesni" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" +dependencies = [ + "cipher", + "opaque-debug", +] + [[package]] name = "ahash" version = "0.8.8" @@ -423,12 +477,6 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" -[[package]] -name = "base64ct" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" - [[package]] name = "bit-set" version = "0.8.0" @@ -542,16 +590,16 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.34" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" +checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "wasm-bindgen", - "windows-targets 0.52.0", + "windows-link", ] [[package]] @@ -560,6 +608,15 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e4de3bc4ea267985becf712dc6d9eed8b04c953b3fcfb339ebc87acd9804901" +[[package]] +name = "cipher" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" +dependencies = [ + "generic-array", +] + [[package]] name = "clap" version = "4.5.0" @@ -610,7 +667,6 @@ dependencies = [ "lazy_static", "mutants", "rand 0.8.5", - "rand_chacha 0.3.1", "regex", "rstest", "rstest_reuse", @@ -620,30 +676,7 @@ dependencies = [ "serde_json", "serde_stacker", "slog", - "stacks-common 0.0.1", -] - -[[package]] -name = "clarity" -version = "0.0.1" -source = "git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2#8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2" -dependencies = [ - "hashbrown 0.15.2", - "integer-sqrt", - "lazy_static", - "rand 0.8.5", - "rand_chacha 0.3.1", - "regex", - "rstest", - "rstest_reuse", - "rusqlite", - "serde", - "serde_derive", - "serde_json", - "serde_stacker", - "slog", - "stacks-common 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "time 0.2.27", + "stacks-common", ] [[package]] @@ -685,18 +718,29 @@ dependencies = [ "tempfile", ] -[[package]] -name = "const-oid" -version = "0.9.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" - [[package]] name = "const_fn" version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f8a2ca5ac02d09563609681103aada9e1777d54fc57a5acd7a41404f9c93b6e" +[[package]] +name = "cookie" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03a5d7b21829bc7b4bf4754a978a241ae54ea55a40f92bb20216e54096f4b951" +dependencies = [ + "aes-gcm", + "base64 0.13.1", + "hkdf", + "hmac 0.10.1", + "percent-encoding", + "rand 0.8.5", + "sha2 0.9.9", + "time 0.2.27", + "version_check", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -722,6 +766,12 @@ dependencies = [ "libc", ] +[[package]] +name = "cpuid-bool" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" + [[package]] name = "crc32fast" version = "1.4.0" @@ -763,6 +813,25 @@ dependencies = [ "subtle", ] +[[package]] +name = "crypto-mac" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6" +dependencies = [ + "generic-array", + "subtle", +] + +[[package]] +name = "ctr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" +dependencies = [ + "cipher", +] + [[package]] name = "curve25519-dalek" version = "4.1.3" @@ -777,7 +846,6 @@ dependencies = [ "rustc_version 0.4.0", "serde", "subtle", - "zeroize", ] [[package]] @@ -797,16 +865,6 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" -[[package]] -name = "der" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" -dependencies = [ - "const-oid", - "zeroize", -] - [[package]] name = "deranged" version = "0.4.0" @@ -880,8 +938,6 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "pkcs8", - "serde", "signature", ] @@ -893,11 +949,8 @@ checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core 0.6.4", - "serde", "sha2 0.10.8", "subtle", - "zeroize", ] [[package]] @@ -1206,8 +1259,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if 1.0.0", + "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "ghash" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" +dependencies = [ + "opaque-debug", + "polyval", ] [[package]] @@ -1341,13 +1406,33 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd" +[[package]] +name = "hkdf" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51ab2f639c231793c5f6114bdb9bbe50a7dbbfcd7c7c6bd8475dec2d991e964f" +dependencies = [ + "digest 0.9.0", + "hmac 0.10.1", +] + [[package]] name = "hmac" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" dependencies = [ - "crypto-mac", + "crypto-mac 0.8.0", + "digest 0.9.0", +] + +[[package]] +name = "hmac" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" +dependencies = [ + "crypto-mac 0.10.0", "digest 0.9.0", ] @@ -1359,7 +1444,7 @@ checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ "digest 0.9.0", "generic-array", - "hmac", + "hmac 0.8.1", ] [[package]] @@ -1426,7 +1511,9 @@ checksum = "6e9b187a72d63adbfba487f48095306ac823049cb504ee195541e91c7775f5ad" dependencies = [ "anyhow", "async-channel 1.9.0", + "async-std", "base64 0.13.1", + "cookie", "futures-lite 1.13.0", "infer", "pin-project-lite", @@ -1754,11 +1841,11 @@ dependencies = [ name = "libsigner" version = "0.0.1" dependencies = [ - "clarity 0.0.1", + "clarity", "hashbrown 0.15.2", "lazy_static", "libc", - "libstackerdb 0.0.1", + "libstackerdb", "mutants", "rand 0.8.5", "rand_core 0.6.4", @@ -1766,28 +1853,8 @@ dependencies = [ "serde_json", "sha2 0.10.8", "slog", - "stacks-common 0.0.1", - "stackslib 0.0.1", - "thiserror", - "tiny_http", -] - -[[package]] -name = "libsigner" -version = "0.0.1" -source = "git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2#8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2" -dependencies = [ - "clarity 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "hashbrown 0.15.2", - "lazy_static", - "libc", - "libstackerdb 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "serde", - "serde_json", - "sha2 0.10.8", - "slog", - "stacks-common 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "stackslib 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", + "stacks-common", + "stackslib", "thiserror", "tiny_http", ] @@ -1807,23 +1874,11 @@ dependencies = [ name = "libstackerdb" version = "0.0.1" dependencies = [ - "clarity 0.0.1", - "secp256k1", - "serde", - "sha2 0.10.8", - "stacks-common 0.0.1", -] - -[[package]] -name = "libstackerdb" -version = "0.0.1" -source = "git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2#8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2" -dependencies = [ - "clarity 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", + "clarity", "secp256k1", "serde", "sha2 0.10.8", - "stacks-common 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", + "stacks-common", ] [[package]] @@ -2196,16 +2251,6 @@ dependencies = [ "futures-io", ] -[[package]] -name = "pkcs8" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" -dependencies = [ - "der", - "spki", -] - [[package]] name = "pkg-config" version = "0.3.30" @@ -2242,6 +2287,17 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "polyval" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" +dependencies = [ + "cpuid-bool", + "opaque-debug", + "universal-hash", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -2252,20 +2308,10 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" name = "pox-locking" version = "2.4.0" dependencies = [ - "clarity 0.0.1", + "clarity", "mutants", "slog", - "stacks-common 0.0.1", -] - -[[package]] -name = "pox-locking" -version = "2.4.0" -source = "git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2#8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2" -dependencies = [ - "clarity 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "slog", - "stacks-common 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", + "stacks-common", ] [[package]] @@ -3045,9 +3091,6 @@ name = "signature" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" -dependencies = [ - "rand_core 0.6.4", -] [[package]] name = "similar" @@ -3139,16 +3182,6 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -[[package]] -name = "spki" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" -dependencies = [ - "base64ct", - "der", -] - [[package]] name = "stacker" version = "0.1.15" @@ -3169,13 +3202,13 @@ dependencies = [ "chrono", "curve25519-dalek", "ed25519-dalek", + "getrandom 0.2.12", "hashbrown 0.15.2", "lazy_static", "libsecp256k1", "nix", "proptest", "rand 0.8.5", - "rand_core 0.6.4", "ripemd", "rusqlite", "secp256k1", @@ -3191,34 +3224,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "stacks-common" -version = "0.0.1" -source = "git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2#8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2" -dependencies = [ - "chrono", - "curve25519-dalek", - "ed25519-dalek", - "hashbrown 0.15.2", - "lazy_static", - "libsecp256k1", - "nix", - "rand 0.8.5", - "ripemd", - "rusqlite", - "secp256k1", - "serde", - "serde_derive", - "serde_json", - "sha2 0.10.8", - "sha3", - "slog", - "slog-term", - "time 0.2.27", - "toml", - "winapi 0.3.9", -] - [[package]] name = "stacks-node" version = "0.1.0" @@ -3228,13 +3233,12 @@ dependencies = [ "backtrace", "base64 0.12.3", "chrono", - "clarity 0.0.1", + "clarity", "hashbrown 0.15.2", "http-types", "lazy_static", "libc", - "libsigner 0.0.1", - "libsigner 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", + "libsigner", "madhouse", "mockito", "mutants", @@ -3250,12 +3254,9 @@ dependencies = [ "serde_json", "serial_test", "slog", - "stacks-common 0.0.1", - "stacks-common 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "stacks-signer 0.0.1", - "stacks-signer 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "stackslib 0.0.1", - "stackslib 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", + "stacks-common", + "stacks-signer", + "stackslib", "stdext", "stx-genesis", "tempfile", @@ -3275,11 +3276,11 @@ version = "0.0.1" dependencies = [ "backoff", "clap", - "clarity 0.0.1", + "clarity", "hashbrown 0.15.2", "lazy_static", - "libsigner 0.0.1", - "libstackerdb 0.0.1", + "libsigner", + "libstackerdb", "num-traits", "prometheus", "rand 0.8.5", @@ -3292,8 +3293,8 @@ dependencies = [ "slog", "slog-json", "slog-term", - "stacks-common 0.0.1", - "stackslib 0.0.1", + "stacks-common", + "stackslib", "stdext", "thiserror", "tiny_http", @@ -3303,52 +3304,22 @@ dependencies = [ "url", ] -[[package]] -name = "stacks-signer" -version = "0.0.1" -source = "git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2#8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2" -dependencies = [ - "backoff", - "clap", - "clarity 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "hashbrown 0.15.2", - "lazy_static", - "libsigner 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "libstackerdb 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "rand 0.8.5", - "rand_core 0.6.4", - "reqwest", - "rusqlite", - "secp256k1", - "serde", - "serde_json", - "slog", - "slog-term", - "stacks-common 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "stackslib 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "thiserror", - "toml", - "tracing", - "tracing-subscriber", - "url", -] - [[package]] name = "stackslib" version = "0.0.1" dependencies = [ "assert-json-diff 1.1.0", "chrono", - "clarity 0.0.1", + "clarity", "ed25519-dalek", "hashbrown 0.15.2", "lazy_static", - "libstackerdb 0.0.1", + "libstackerdb", "mio 0.6.23", "mutants", "nix", "percent-encoding", - "pox-locking 2.4.0", + "pox-locking", "prometheus", "rand 0.8.5", "rand_chacha 0.3.1", @@ -3366,7 +3337,7 @@ dependencies = [ "sha2 0.10.8", "siphasher", "slog", - "stacks-common 0.0.1", + "stacks-common", "stdext", "stx-genesis", "tempfile", @@ -3377,42 +3348,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "stackslib" -version = "0.0.1" -source = "git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2#8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2" -dependencies = [ - "chrono", - "clarity 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "ed25519-dalek", - "hashbrown 0.15.2", - "lazy_static", - "libstackerdb 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "mio 0.6.23", - "nix", - "percent-encoding", - "pox-locking 2.4.0 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_core 0.6.4", - "regex", - "ripemd", - "rusqlite", - "secp256k1", - "serde", - "serde_derive", - "serde_json", - "sha2 0.10.8", - "siphasher", - "slog", - "stacks-common 0.0.1 (git+https://github.com/stacks-network/stacks-core.git?rev=8a79aaa7df0f13dfc5ab0d0d0bcb8201c90bcba2)", - "tikv-jemallocator", - "time 0.2.27", - "toml", - "url", - "winapi 0.3.9", -] - [[package]] name = "standback" version = "0.2.17" @@ -3932,6 +3867,16 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "universal-hash" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" +dependencies = [ + "generic-array", + "subtle", +] + [[package]] name = "untrusted" version = "0.9.0" @@ -4178,6 +4123,12 @@ dependencies = [ "windows-targets 0.52.0", ] +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + [[package]] name = "windows-sys" version = "0.48.0" @@ -4349,9 +4300,3 @@ dependencies = [ "quote", "syn 2.0.58", ] - -[[package]] -name = "zeroize" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" diff --git a/Cargo.toml b/Cargo.toml index d604038f94..838e9d5000 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,24 +14,29 @@ members = [ # Dependencies we want to keep the same between workspace members [workspace.dependencies] -ed25519-dalek = { version = "2.1.1", features = ["serde", "rand_core"] } +ed25519-dalek = { version = "2.1.1", default-features = false } hashbrown = { version = "0.15.2", features = ["serde"] } +lazy_static = "1.4.0" rand_core = "0.6.4" rand = "0.8" rand_chacha = "0.3.1" -tikv-jemallocator = "0.5.4" +serde = "1" +serde_derive = "1" +serde_json = { version = "1.0", features = ["arbitrary_precision", "unbounded_depth"] } +slog = { version = "2.5.2", features = [ "max_level_trace" ] } rusqlite = { version = "0.31.0", features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] } +tikv-jemallocator = "0.5.4" thiserror = "1.0.65" toml = "0.5.6" # Use a bit more than default optimization for -# dev builds to speed up test execution +# dev builds to speed up test execution [profile.dev] opt-level = 1 # Use release-level optimization for dependencies # This slows down "first" builds on development environments, -# but won't impact subsequent builds. +# but won't impact subsequent builds. [profile.dev.package."*"] opt-level = 3 diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 37b3f0ed6f..8b714fd86b 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -18,38 +18,46 @@ name = "clarity" path = "./src/libclarity.rs" [dependencies] -rand = { workspace = true } -rand_chacha = { workspace = true } -serde = "1" -serde_derive = "1" -serde_stacker = "0.1" -regex = "1" -lazy_static = "1.4.0" -integer-sqrt = "0.1.3" -slog = { version = "2.5.2", features = [ "max_level_trace" ] } -stacks_common = { package = "stacks-common", path = "../stacks-common", default-features = false } -rstest = "0.17.0" -rstest_reuse = "0.5.0" hashbrown = { workspace = true } -rusqlite = { workspace = true, optional = true } +lazy_static = { workspace = true } +regex = { version = "1", default-features = false } +serde = { workspace = true } +serde_derive = { workspace = true } +serde_json = { workspace = true } +slog = { workspace = true } +stacks_common = { package = "stacks-common", path = "../stacks-common", default-features = false } -[dependencies.serde_json] -version = "1.0" -features = ["arbitrary_precision", "unbounded_depth"] +# Optional dependencies +rand = { workspace = true, optional = true } +serde_stacker = { version = "0.1", default-features = false, optional = true } +integer-sqrt = { version = "0.1.3", default-features = false, optional = true } +rusqlite = { workspace = true, optional = true } +rstest = { version = "0.17.0", default-features = false, optional = true } +rstest_reuse = { version = "0.5.0", default-features = false, optional = true } [dev-dependencies] assert-json-diff = "1.0.0" mutants = "0.0.3" -# a nightly rustc regression (35dbef235 2021-03-02) prevents criterion from compiling -# but it isn't necessary for tests: only benchmarks. therefore, commenting out for now. -# criterion = "0.3" [features] -default = ["rusqlite"] -developer-mode = ["stacks_common/developer-mode"] +# The default feature set provides the full Clarity virtual machine with its SQLite-based +# database backend. +# To use `clarity` as a lightweight serialization/deserialization library, +# depend on it with `default-features = false`. +default = ["vm", "rusqlite"] +# Enables the complete Clarity Virtual Machine. This includes the parser, analyzer, +# cost-checking system, and execution engine. It transitively enables all necessary +# dependencies for running smart contracts. This feature is required for any on-chain +# contract execution or local contract testing. +vm = ["dep:rand", "dep:serde_stacker", "dep:integer-sqrt"] +developer-mode = ["vm", "stacks_common/developer-mode"] +devtools = ["vm"] +testing = ["vm", "dep:rstest", "dep:rstest_reuse", "rusqlite"] +rusqlite = ["vm", "stacks_common/rusqlite", "dep:rusqlite"] slog_json = ["stacks_common/slog_json"] -rusqlite = ["stacks_common/rusqlite", "dep:rusqlite"] -testing = [] -devtools = [] -rollback_value_check = [] -disable-costs = [] +rollback_value_check = ["vm"] +disable-costs = ["vm"] + +# Wasm-specific features for easier configuration +wasm-web = ["stacks_common/wasm-web"] +wasm-deterministic = ["stacks_common/wasm-deterministic"] diff --git a/clarity/src/libclarity.rs b/clarity/src/libclarity.rs index 7ce2a4f903..4b3bbf269c 100644 --- a/clarity/src/libclarity.rs +++ b/clarity/src/libclarity.rs @@ -20,10 +20,6 @@ #![allow(non_upper_case_globals)] #![cfg_attr(test, allow(unused_variables, unused_assignments))] -#[allow(unused_imports)] -#[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] -extern crate slog; - #[macro_use] extern crate serde_derive; @@ -49,6 +45,7 @@ pub use stacks_common::{ /// The Clarity virtual machine pub mod vm; +#[cfg(feature = "vm")] pub mod boot_util { use stacks_common::types::chainstate::StacksAddress; diff --git a/clarity/src/vm/analysis/arithmetic_checker/tests.rs b/clarity/src/vm/analysis/arithmetic_checker/tests.rs index 0e7d520cb3..6453402f9b 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/tests.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/tests.rs @@ -215,13 +215,13 @@ fn test_functions_clarity1() { Err(FunctionNotPermitted(NativeFunctions::SetVar))), ("(define-private (foo (a principal)) (ft-get-balance tokaroos a))", Err(FunctionNotPermitted(NativeFunctions::GetTokenBalance))), - ("(define-private (foo (a principal)) + ("(define-private (foo (a principal)) (ft-transfer? stackaroo u50 'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR 'SPAXYA5XS51713FDTQ8H94EJ4V579CXMTRNBZKSF))", Err(FunctionNotPermitted(NativeFunctions::TransferToken))), - ("(define-private (foo (a principal)) + ("(define-private (foo (a principal)) (ft-mint? stackaroo u100 'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR))", Err(FunctionNotPermitted(NativeFunctions::MintToken))), - ("(define-private (foo (a principal)) + ("(define-private (foo (a principal)) (nft-mint? stackaroo \"Roo\" 'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR))", Err(FunctionNotPermitted(NativeFunctions::MintAsset))), ("(nft-transfer? stackaroo \"Roo\" 'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR 'SPAXYA5XS51713FDTQ8H94EJ4V579CXMTRNBZKSF)", @@ -293,7 +293,7 @@ fn test_functions_clarity1() { Ok(())), ("(buff-to-uint-be 0x0001)", Ok(())), - ("(is-standard 'STB44HYPYAT2BB2QE513NSP81HTMYWBJP02HPGK6)", + ("(is-standard 'STB44HYPYAT2BB2QE513NSP81HTMYWBJP02HPGK6)", Ok(())), ("(principal-destruct? 'STB44HYPYAT2BB2QE513NSP81HTMYWBJP02HPGK6)", Ok(())), @@ -358,7 +358,7 @@ fn test_functions_clarity2() { Err(FunctionNotPermitted(NativeFunctions::IsStandard))), ("(principal-destruct? 'STB44HYPYAT2BB2QE513NSP81HTMYWBJP02HPGK6)", Err(FunctionNotPermitted(NativeFunctions::PrincipalDestruct))), - ("(principal-construct? 0x22 0xfa6bf38ed557fe417333710d6033e9419391a320)", + ("(principal-construct? 0x22 0xfa6bf38ed557fe417333710d6033e9419391a320)", Err(FunctionNotPermitted(NativeFunctions::PrincipalConstruct))), ("(string-to-int? \"-1\")", Err(FunctionNotPermitted(NativeFunctions::StringToInt))), diff --git a/clarity/src/vm/analysis/engine.rs b/clarity/src/vm/analysis/engine.rs new file mode 100644 index 0000000000..4355c8ac9e --- /dev/null +++ b/clarity/src/vm/analysis/engine.rs @@ -0,0 +1,172 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use stacks_common::types::StacksEpochId; + +pub use crate::vm::analysis::analysis_db::AnalysisDatabase; +use crate::vm::analysis::arithmetic_checker::ArithmeticOnlyChecker; +use crate::vm::analysis::contract_interface_builder::build_contract_interface; +pub use crate::vm::analysis::errors::{CheckError, CheckErrors, CheckResult}; +use crate::vm::analysis::read_only_checker::ReadOnlyChecker; +use crate::vm::analysis::trait_checker::TraitChecker; +use crate::vm::analysis::type_checker::v2_05::TypeChecker as TypeChecker2_05; +use crate::vm::analysis::type_checker::v2_1::TypeChecker as TypeChecker2_1; +pub use crate::vm::analysis::types::{AnalysisPass, ContractAnalysis}; +#[cfg(feature = "rusqlite")] +use crate::vm::ast::{build_ast_with_rules, ASTRules}; +use crate::vm::costs::LimitedCostTracker; +#[cfg(feature = "rusqlite")] +use crate::vm::database::MemoryBackingStore; +use crate::vm::database::STORE_CONTRACT_SRC_INTERFACE; +use crate::vm::representations::SymbolicExpression; +use crate::vm::types::QualifiedContractIdentifier; +#[cfg(feature = "rusqlite")] +use crate::vm::types::TypeSignature; +use crate::vm::ClarityVersion; + +/// Used by CLI tools like the docs generator. Not used in production +#[cfg(feature = "rusqlite")] +pub fn mem_type_check( + snippet: &str, + version: ClarityVersion, + epoch: StacksEpochId, +) -> CheckResult<(Option, ContractAnalysis)> { + let contract_identifier: QualifiedContractIdentifier = QualifiedContractIdentifier::transient(); + let contract: Vec = build_ast_with_rules( + &contract_identifier, + snippet, + &mut (), + version, + epoch, + ASTRules::PrecheckSize, + ) + .map_err(|_| CheckErrors::Expects("Failed to build AST".into()))? + .expressions; + + let mut marf: MemoryBackingStore = MemoryBackingStore::new(); + let mut analysis_db = marf.as_analysis_db(); + let cost_tracker = LimitedCostTracker::new_free(); + match run_analysis( + &QualifiedContractIdentifier::transient(), + &contract, + &mut analysis_db, + false, + cost_tracker, + epoch, + version, + true, + ) { + Ok(x) => { + // return the first type result of the type checker + let first_type = x + .type_map + .as_ref() + .ok_or_else(|| CheckErrors::Expects("Should be non-empty".into()))? + .get_type_expected( + x.expressions + .last() + .ok_or_else(|| CheckErrors::Expects("Should be non-empty".into()))?, + ) + .cloned(); + Ok((first_type, x)) + } + Err((e, _)) => Err(e), + } +} + +// Legacy function +// The analysis is not just checking type. +#[cfg(test)] +pub fn type_check( + contract_identifier: &QualifiedContractIdentifier, + expressions: &mut [SymbolicExpression], + analysis_db: &mut AnalysisDatabase, + insert_contract: bool, + epoch: &StacksEpochId, + version: &ClarityVersion, +) -> CheckResult { + run_analysis( + contract_identifier, + expressions, + analysis_db, + insert_contract, + // for the type check tests, the cost tracker's epoch doesn't + // matter: the costs in those tests are all free anyways. + LimitedCostTracker::new_free(), + *epoch, + *version, + true, + ) + .map_err(|(e, _cost_tracker)| e) +} + +#[allow(clippy::too_many_arguments)] +pub fn run_analysis( + contract_identifier: &QualifiedContractIdentifier, + expressions: &[SymbolicExpression], + analysis_db: &mut AnalysisDatabase, + save_contract: bool, + cost_tracker: LimitedCostTracker, + epoch: StacksEpochId, + version: ClarityVersion, + build_type_map: bool, +) -> Result { + let mut contract_analysis = ContractAnalysis::new( + contract_identifier.clone(), + expressions.to_vec(), + cost_tracker, + epoch, + version, + ); + let result = analysis_db.execute(|db| { + ReadOnlyChecker::run_pass(&epoch, &mut contract_analysis, db)?; + match epoch { + StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { + TypeChecker2_05::run_pass(&epoch, &mut contract_analysis, db, build_type_map) + } + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => { + TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db, build_type_map) + } + StacksEpochId::Epoch10 => { + return Err(CheckErrors::Expects( + "Epoch 1.0 is not a valid epoch for analysis".into(), + ) + .into()) + } + }?; + TraitChecker::run_pass(&epoch, &mut contract_analysis, db)?; + ArithmeticOnlyChecker::check_contract_cost_eligible(&mut contract_analysis); + + if STORE_CONTRACT_SRC_INTERFACE { + let interface = build_contract_interface(&contract_analysis)?; + contract_analysis.contract_interface = Some(interface); + } + if save_contract { + db.insert_contract(contract_identifier, &contract_analysis)?; + } + Ok(()) + }); + match result { + Ok(_) => Ok(contract_analysis), + Err(e) => Err((e, contract_analysis.take_contract_cost_tracker())), + } +} diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 19183f5f67..450f927e3c 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -14,171 +14,27 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +pub mod errors; + +#[cfg(feature = "vm")] pub mod analysis_db; +#[cfg(feature = "vm")] pub mod arithmetic_checker; +#[cfg(feature = "vm")] pub mod contract_interface_builder; -pub mod errors; +#[cfg(feature = "vm")] pub mod read_only_checker; +#[cfg(feature = "vm")] pub mod trait_checker; +#[cfg(feature = "vm")] pub mod type_checker; +#[cfg(feature = "vm")] pub mod types; -use stacks_common::types::StacksEpochId; - -pub use self::analysis_db::AnalysisDatabase; -use self::arithmetic_checker::ArithmeticOnlyChecker; -use self::contract_interface_builder::build_contract_interface; -pub use self::errors::{CheckError, CheckErrors, CheckResult}; -use self::read_only_checker::ReadOnlyChecker; -use self::trait_checker::TraitChecker; -use self::type_checker::v2_05::TypeChecker as TypeChecker2_05; -use self::type_checker::v2_1::TypeChecker as TypeChecker2_1; -pub use self::types::{AnalysisPass, ContractAnalysis}; -#[cfg(feature = "rusqlite")] -use crate::vm::ast::{build_ast_with_rules, ASTRules}; -use crate::vm::costs::LimitedCostTracker; -#[cfg(feature = "rusqlite")] -use crate::vm::database::MemoryBackingStore; -use crate::vm::database::STORE_CONTRACT_SRC_INTERFACE; -use crate::vm::representations::SymbolicExpression; -use crate::vm::types::QualifiedContractIdentifier; -#[cfg(feature = "rusqlite")] -use crate::vm::types::TypeSignature; -use crate::vm::ClarityVersion; - -/// Used by CLI tools like the docs generator. Not used in production -#[cfg(feature = "rusqlite")] -pub fn mem_type_check( - snippet: &str, - version: ClarityVersion, - epoch: StacksEpochId, -) -> CheckResult<(Option, ContractAnalysis)> { - let contract_identifier = QualifiedContractIdentifier::transient(); - let contract = build_ast_with_rules( - &contract_identifier, - snippet, - &mut (), - version, - epoch, - ASTRules::PrecheckSize, - ) - .map_err(|_| CheckErrors::Expects("Failed to build AST".into()))? - .expressions; - - let mut marf = MemoryBackingStore::new(); - let mut analysis_db = marf.as_analysis_db(); - let cost_tracker = LimitedCostTracker::new_free(); - match run_analysis( - &QualifiedContractIdentifier::transient(), - &contract, - &mut analysis_db, - false, - cost_tracker, - epoch, - version, - true, - ) { - Ok(x) => { - // return the first type result of the type checker - let first_type = x - .type_map - .as_ref() - .ok_or_else(|| CheckErrors::Expects("Should be non-empty".into()))? - .get_type_expected( - x.expressions - .last() - .ok_or_else(|| CheckErrors::Expects("Should be non-empty".into()))?, - ) - .cloned(); - Ok((first_type, x)) - } - Err((e, _)) => Err(e), - } -} - -// Legacy function -// The analysis is not just checking type. -#[cfg(test)] -pub fn type_check( - contract_identifier: &QualifiedContractIdentifier, - expressions: &mut [SymbolicExpression], - analysis_db: &mut AnalysisDatabase, - insert_contract: bool, - epoch: &StacksEpochId, - version: &ClarityVersion, -) -> CheckResult { - run_analysis( - contract_identifier, - expressions, - analysis_db, - insert_contract, - // for the type check tests, the cost tracker's epoch doesn't - // matter: the costs in those tests are all free anyways. - LimitedCostTracker::new_free(), - *epoch, - *version, - true, - ) - .map_err(|(e, _cost_tracker)| e) -} - -#[allow(clippy::too_many_arguments)] -pub fn run_analysis( - contract_identifier: &QualifiedContractIdentifier, - expressions: &[SymbolicExpression], - analysis_db: &mut AnalysisDatabase, - save_contract: bool, - cost_tracker: LimitedCostTracker, - epoch: StacksEpochId, - version: ClarityVersion, - build_type_map: bool, -) -> Result { - let mut contract_analysis = ContractAnalysis::new( - contract_identifier.clone(), - expressions.to_vec(), - cost_tracker, - epoch, - version, - ); - let result = analysis_db.execute(|db| { - ReadOnlyChecker::run_pass(&epoch, &mut contract_analysis, db)?; - match epoch { - StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { - TypeChecker2_05::run_pass(&epoch, &mut contract_analysis, db, build_type_map) - } - StacksEpochId::Epoch21 - | StacksEpochId::Epoch22 - | StacksEpochId::Epoch23 - | StacksEpochId::Epoch24 - | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 - | StacksEpochId::Epoch31 => { - TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db, build_type_map) - } - StacksEpochId::Epoch10 => { - return Err(CheckErrors::Expects( - "Epoch 1.0 is not a valid epoch for analysis".into(), - ) - .into()) - } - }?; - TraitChecker::run_pass(&epoch, &mut contract_analysis, db)?; - ArithmeticOnlyChecker::check_contract_cost_eligible(&mut contract_analysis); - - if STORE_CONTRACT_SRC_INTERFACE { - let interface = build_contract_interface(&contract_analysis)?; - contract_analysis.contract_interface = Some(interface); - } - if save_contract { - db.insert_contract(contract_identifier, &contract_analysis)?; - } - Ok(()) - }); - match result { - Ok(_) => Ok(contract_analysis), - Err(e) => Err((e, contract_analysis.take_contract_cost_tracker())), - } -} +#[cfg(feature = "vm")] +pub mod engine; +#[cfg(feature = "vm")] +pub use engine::*; #[cfg(test)] mod tests; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs index 838be9e6bb..6e6f57df8f 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs @@ -111,8 +111,8 @@ const SIMPLE_TOKENS: &str = "(define-map tokens { account: principal } { balance const SIMPLE_NAMES: &str = "(define-constant burn-address 'SP000000000000000000002Q6VF78) (define-private (price-function (name uint)) (if (< name u100000) u1000 u100)) - - (define-map name-map + + (define-map name-map { name: uint } { owner: principal }) (define-map preorder-map { name-hash: (buff 20) } @@ -121,7 +121,7 @@ const SIMPLE_NAMES: &str = "(define-constant burn-address 'SP0000000000000000000 (define-private (check-balance) (contract-call? .tokens my-get-token-balance tx-sender)) - (define-public (preorder + (define-public (preorder (name-hash (buff 20)) (name-price uint)) (let ((xfer-result (contract-call? .tokens token-transfer @@ -145,13 +145,13 @@ const SIMPLE_NAMES: &str = "(define-constant burn-address 'SP0000000000000000000 ;; preorder entry must exist! (unwrap! (map-get? preorder-map (tuple (name-hash (hash160 (xor name salt))))) (err 2))) - (name-entry + (name-entry (map-get? name-map (tuple (name name))))) (if (and ;; name shouldn't *already* exist (is-none name-entry) ;; preorder must have paid enough - (<= (price-function name) + (<= (price-function name) (get paid preorder-entry)) ;; preorder must have been the current principal (is-eq tx-sender @@ -280,7 +280,7 @@ fn test_names_tokens_contracts_interface() { { "name": "tn1", "type": "bool" }, { "name": "tn2", "type": "int128" }, { "name": "tn3", "type": { "buffer": { "length": 1 } }} - ] } } + ] } } }, { "name": "f11", "access": "private", @@ -413,7 +413,7 @@ fn test_names_tokens_contracts_interface() { "name": "n2", "type": "bool" } - ] + ] } }] } @@ -1478,10 +1478,10 @@ fn test_trait_to_subtrait_and_back() { )) (define-private (foo-0 (impl-contract )) (foo-1 impl-contract)) - + (define-private (foo-1 (impl-contract )) (foo-2 impl-contract)) - + (define-private (foo-2 (impl-contract )) true)"; diff --git a/clarity/src/vm/ast/ast_builder.rs b/clarity/src/vm/ast/ast_builder.rs new file mode 100644 index 0000000000..dc56482382 --- /dev/null +++ b/clarity/src/vm/ast/ast_builder.rs @@ -0,0 +1,616 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use stacks_common::types::StacksEpochId; + +use crate::vm::ast::definition_sorter::DefinitionSorter; +use crate::vm::ast::errors::ParseResult; +use crate::vm::ast::expression_identifier::ExpressionIdentifier; +use crate::vm::ast::parser::v1::{ + parse as parse_v1, parse_no_stack_limit as parse_v1_no_stack_limit, +}; +use crate::vm::ast::parser::v2::parse as parse_v2; +use crate::vm::ast::stack_depth_checker::{StackDepthChecker, VaryStackDepthChecker}; +use crate::vm::ast::sugar_expander::SugarExpander; +use crate::vm::ast::traits_resolver::TraitsResolver; +use crate::vm::ast::types::BuildASTPass; +pub use crate::vm::ast::types::ContractAST; +use crate::vm::costs::cost_functions::ClarityCostFunction; +use crate::vm::costs::{runtime_cost, CostTracker}; +use crate::vm::diagnostic::{Diagnostic, Level}; +use crate::vm::representations::PreSymbolicExpression; +use crate::vm::types::QualifiedContractIdentifier; +use crate::vm::ClarityVersion; + +/// Legacy function +#[cfg(any(test, feature = "testing"))] +pub fn parse( + contract_identifier: &QualifiedContractIdentifier, + source_code: &str, + version: ClarityVersion, + epoch: StacksEpochId, +) -> Result, crate::vm::errors::Error> { + let ast = build_ast(contract_identifier, source_code, &mut (), version, epoch)?; + Ok(ast.expressions) +} + +// AST parser rulesets to apply. +define_u8_enum!(ASTRules { + Typical = 0, + PrecheckSize = 1 +}); + +/// Parse a program based on which epoch is active +fn parse_in_epoch( + source_code: &str, + epoch_id: StacksEpochId, + ast_rules: ASTRules, +) -> ParseResult> { + if epoch_id >= StacksEpochId::Epoch21 { + parse_v2(source_code) + } else if ast_rules == ASTRules::Typical { + parse_v1_no_stack_limit(source_code) + } else { + parse_v1(source_code) + } +} + +/// This is the part of the AST parser that runs without respect to cost analysis, specifically +/// pertaining to verifying that the AST is reasonably-sized. +/// Used mainly to filter transactions that might be too costly, as an optimization heuristic. +pub fn ast_check_size( + contract_identifier: &QualifiedContractIdentifier, + source_code: &str, + clarity_version: ClarityVersion, + epoch_id: StacksEpochId, +) -> ParseResult { + let pre_expressions = parse_in_epoch(source_code, epoch_id, ASTRules::PrecheckSize)?; + let mut contract_ast = ContractAST::new(contract_identifier.clone(), pre_expressions); + StackDepthChecker::run_pass(&mut contract_ast, clarity_version)?; + VaryStackDepthChecker::run_pass(&mut contract_ast, clarity_version)?; + Ok(contract_ast) +} + +/// Build an AST according to a ruleset +pub fn build_ast_with_rules( + contract_identifier: &QualifiedContractIdentifier, + source_code: &str, + cost_track: &mut T, + clarity_version: ClarityVersion, + epoch: StacksEpochId, + ruleset: ASTRules, +) -> ParseResult { + match ruleset { + // After epoch 2.1, prechecking the size is required + ASTRules::Typical if epoch < StacksEpochId::Epoch21 => build_ast_typical( + contract_identifier, + source_code, + cost_track, + clarity_version, + epoch, + ), + _ => build_ast_precheck_size( + contract_identifier, + source_code, + cost_track, + clarity_version, + epoch, + ), + } +} + +/// Build an AST with the typical rules +fn build_ast_typical( + contract_identifier: &QualifiedContractIdentifier, + source_code: &str, + cost_track: &mut T, + clarity_version: ClarityVersion, + epoch: StacksEpochId, +) -> ParseResult { + let (contract, _, _) = inner_build_ast( + contract_identifier, + source_code, + cost_track, + clarity_version, + epoch, + ASTRules::Typical, + true, + )?; + Ok(contract) +} + +/// Used by developer tools only. Continues on through errors by inserting +/// placeholders into the AST. Collects as many diagnostics as possible. +/// Always returns a ContractAST, a vector of diagnostics, and a boolean +/// that indicates if the build was successful. +#[allow(clippy::unwrap_used)] +pub fn build_ast_with_diagnostics( + contract_identifier: &QualifiedContractIdentifier, + source_code: &str, + cost_track: &mut T, + clarity_version: ClarityVersion, + epoch: StacksEpochId, +) -> (ContractAST, Vec, bool) { + inner_build_ast( + contract_identifier, + source_code, + cost_track, + clarity_version, + epoch, + ASTRules::PrecheckSize, + false, + ) + .unwrap() +} + +fn inner_build_ast( + contract_identifier: &QualifiedContractIdentifier, + source_code: &str, + cost_track: &mut T, + clarity_version: ClarityVersion, + epoch: StacksEpochId, + ast_rules: ASTRules, + error_early: bool, +) -> ParseResult<(ContractAST, Vec, bool)> { + let cost_err = match runtime_cost( + ClarityCostFunction::AstParse, + cost_track, + source_code.len() as u64, + ) { + Err(e) if error_early => return Err(e.into()), + Err(e) => Some(e), + _ => None, + }; + + let (pre_expressions, mut diagnostics, mut success) = if epoch >= StacksEpochId::Epoch21 { + if error_early { + let exprs = crate::vm::ast::parser::v2::parse(source_code)?; + (exprs, Vec::new(), true) + } else { + crate::vm::ast::parser::v2::parse_collect_diagnostics(source_code) + } + } else { + let parse_result = match ast_rules { + ASTRules::Typical => parse_v1_no_stack_limit(source_code), + ASTRules::PrecheckSize => parse_v1(source_code), + }; + match parse_result { + Ok(pre_expressions) => (pre_expressions, vec![], true), + Err(error) if error_early => return Err(error), + Err(error) => (vec![], vec![error.diagnostic], false), + } + }; + + if let Some(e) = cost_err { + diagnostics.insert( + 0, + Diagnostic { + level: Level::Error, + message: format!("runtime_cost error: {:?}", e), + spans: vec![], + suggestion: None, + }, + ); + } + + let mut contract_ast = ContractAST::new(contract_identifier.clone(), pre_expressions); + match StackDepthChecker::run_pass(&mut contract_ast, clarity_version) { + Err(e) if error_early => return Err(e), + Err(e) => { + diagnostics.push(e.diagnostic); + success = false; + } + _ => (), + } + + if ast_rules != ASTRules::Typical { + // run extra stack-depth pass for tuples + match VaryStackDepthChecker::run_pass(&mut contract_ast, clarity_version) { + Err(e) if error_early => return Err(e), + Err(e) => { + diagnostics.push(e.diagnostic); + success = false; + } + _ => (), + } + } + + match ExpressionIdentifier::run_pre_expression_pass(&mut contract_ast, clarity_version) { + Err(e) if error_early => return Err(e), + Err(e) => { + diagnostics.push(e.diagnostic); + success = false; + } + _ => (), + } + match DefinitionSorter::run_pass(&mut contract_ast, cost_track, clarity_version) { + Err(e) if error_early => return Err(e), + Err(e) => { + diagnostics.push(e.diagnostic); + success = false; + } + _ => (), + } + match TraitsResolver::run_pass(&mut contract_ast, clarity_version) { + Err(e) if error_early => return Err(e), + Err(e) => { + diagnostics.push(e.diagnostic); + success = false; + } + _ => (), + } + match SugarExpander::run_pass(&mut contract_ast, clarity_version) { + Err(e) if error_early => return Err(e), + Err(e) => { + diagnostics.push(e.diagnostic); + success = false; + } + _ => (), + } + match ExpressionIdentifier::run_expression_pass(&mut contract_ast, clarity_version) { + Err(e) if error_early => return Err(e), + Err(e) => { + diagnostics.push(e.diagnostic); + success = false; + } + _ => (), + } + Ok((contract_ast, diagnostics, success)) +} + +/// Built an AST, but pre-check the size of the AST before doing more work +fn build_ast_precheck_size( + contract_identifier: &QualifiedContractIdentifier, + source_code: &str, + cost_track: &mut T, + clarity_version: ClarityVersion, + epoch: StacksEpochId, +) -> ParseResult { + let (contract, _, _) = inner_build_ast( + contract_identifier, + source_code, + cost_track, + clarity_version, + epoch, + ASTRules::PrecheckSize, + true, + )?; + Ok(contract) +} + +/// Test compatibility +#[cfg(any(test, feature = "testing"))] +pub fn build_ast( + contract_identifier: &QualifiedContractIdentifier, + source_code: &str, + cost_track: &mut T, + clarity_version: ClarityVersion, + epoch_id: StacksEpochId, +) -> ParseResult { + build_ast_typical( + contract_identifier, + source_code, + cost_track, + clarity_version, + epoch_id, + ) +} + +#[cfg(test)] +mod test { + use hashbrown::HashMap; + use stacks_common::types::StacksEpochId; + + use crate::vm::ast::errors::ParseErrors; + use crate::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; + use crate::vm::ast::{build_ast, build_ast_with_rules, ASTRules}; + use crate::vm::costs::{LimitedCostTracker, *}; + use crate::vm::representations::depth_traverse; + use crate::vm::types::QualifiedContractIdentifier; + use crate::vm::{ClarityCostFunction, ClarityName, ClarityVersion, MAX_CALL_STACK_DEPTH}; + + #[derive(PartialEq, Debug)] + struct UnitTestTracker { + invoked_functions: Vec<(ClarityCostFunction, Vec)>, + invocation_count: u64, + cost_addition_count: u64, + } + impl UnitTestTracker { + pub fn new() -> Self { + UnitTestTracker { + invoked_functions: vec![], + invocation_count: 0, + cost_addition_count: 0, + } + } + } + impl CostTracker for UnitTestTracker { + fn compute_cost( + &mut self, + cost_f: ClarityCostFunction, + input: &[u64], + ) -> std::result::Result { + self.invoked_functions.push((cost_f, input.to_vec())); + self.invocation_count += 1; + Ok(ExecutionCost::ZERO) + } + fn add_cost(&mut self, _cost: ExecutionCost) -> std::result::Result<(), CostErrors> { + self.cost_addition_count += 1; + Ok(()) + } + fn add_memory(&mut self, _memory: u64) -> std::result::Result<(), CostErrors> { + Ok(()) + } + fn drop_memory(&mut self, _memory: u64) -> std::result::Result<(), CostErrors> { + Ok(()) + } + fn reset_memory(&mut self) {} + fn short_circuit_contract_call( + &mut self, + _contract: &QualifiedContractIdentifier, + _function: &ClarityName, + _input: &[u64], + ) -> Result { + Ok(false) + } + } + + #[test] + fn test_cost_tracking_deep_contracts_2_05() { + let clarity_version = ClarityVersion::Clarity1; + let stack_limit = + (AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1) as usize; + let exceeds_stack_depth_tuple = format!( + "{}u1 {}", + "{ a : ".repeat(stack_limit + 1), + "} ".repeat(stack_limit + 1) + ); + + // for deep lists, a test like this works: + // it can assert a limit, that you can also verify + // by disabling `VaryStackDepthChecker` and arbitrarily bumping up the parser lexer limits + // and see that it produces the same result + let exceeds_stack_depth_list = format!( + "{}u1 {}", + "(list ".repeat(stack_limit + 1), + ")".repeat(stack_limit + 1) + ); + + // with old rules, this is just ExpressionStackDepthTooDeep + let mut cost_track = UnitTestTracker::new(); + let err = build_ast_with_rules( + &QualifiedContractIdentifier::transient(), + &exceeds_stack_depth_list, + &mut cost_track, + clarity_version, + StacksEpochId::Epoch2_05, + ASTRules::Typical, + ) + .expect_err("Contract should error in parsing"); + + let expected_err = ParseErrors::ExpressionStackDepthTooDeep; + let expected_list_cost_state = UnitTestTracker { + invoked_functions: vec![(ClarityCostFunction::AstParse, vec![500])], + invocation_count: 1, + cost_addition_count: 1, + }; + + assert_eq!(&expected_err, &err.err); + assert_eq!(expected_list_cost_state, cost_track); + + // with new rules, this is now VaryExpressionStackDepthTooDeep + let mut cost_track = UnitTestTracker::new(); + let err = build_ast_with_rules( + &QualifiedContractIdentifier::transient(), + &exceeds_stack_depth_list, + &mut cost_track, + clarity_version, + StacksEpochId::Epoch2_05, + ASTRules::PrecheckSize, + ) + .expect_err("Contract should error in parsing"); + + let expected_err = ParseErrors::VaryExpressionStackDepthTooDeep; + let expected_list_cost_state = UnitTestTracker { + invoked_functions: vec![(ClarityCostFunction::AstParse, vec![500])], + invocation_count: 1, + cost_addition_count: 1, + }; + + assert_eq!(&expected_err, &err.err); + assert_eq!(expected_list_cost_state, cost_track); + + // you cannot do the same for tuples! + // in ASTRules::Typical, this passes + let mut cost_track = UnitTestTracker::new(); + let _ = build_ast_with_rules( + &QualifiedContractIdentifier::transient(), + &exceeds_stack_depth_tuple, + &mut cost_track, + clarity_version, + StacksEpochId::Epoch2_05, + ASTRules::Typical, + ) + .expect("Contract should parse with ASTRules::Typical"); + + // this actually won't even error without + // the VaryStackDepthChecker changes. + let mut cost_track = UnitTestTracker::new(); + let err = build_ast_with_rules( + &QualifiedContractIdentifier::transient(), + &exceeds_stack_depth_tuple, + &mut cost_track, + clarity_version, + StacksEpochId::Epoch2_05, + ASTRules::PrecheckSize, + ) + .expect_err("Contract should error in parsing with ASTRules::PrecheckSize"); + + let expected_err = ParseErrors::VaryExpressionStackDepthTooDeep; + let expected_list_cost_state = UnitTestTracker { + invoked_functions: vec![(ClarityCostFunction::AstParse, vec![571])], + invocation_count: 1, + cost_addition_count: 1, + }; + + assert_eq!(&expected_err, &err.err); + assert_eq!(expected_list_cost_state, cost_track); + } + + #[test] + fn test_cost_tracking_deep_contracts_2_1() { + for clarity_version in &[ClarityVersion::Clarity1, ClarityVersion::Clarity2] { + let stack_limit = + (AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1) as usize; + let exceeds_stack_depth_tuple = format!( + "{}u1 {}", + "{ a : ".repeat(stack_limit + 1), + "} ".repeat(stack_limit + 1) + ); + + // for deep lists, a test like this works: + // it can assert a limit, that you can also verify + // by disabling `VaryStackDepthChecker` and arbitrarily bumping up the parser lexer limits + // and see that it produces the same result + let exceeds_stack_depth_list = format!( + "{}u1 {}", + "(list ".repeat(stack_limit + 1), + ")".repeat(stack_limit + 1) + ); + + // with old rules, this is just ExpressionStackDepthTooDeep + let mut cost_track = UnitTestTracker::new(); + let err = build_ast_with_rules( + &QualifiedContractIdentifier::transient(), + &exceeds_stack_depth_list, + &mut cost_track, + *clarity_version, + StacksEpochId::Epoch21, + ASTRules::Typical, + ) + .expect_err("Contract should error in parsing"); + + let expected_err = ParseErrors::ExpressionStackDepthTooDeep; + let expected_list_cost_state = UnitTestTracker { + invoked_functions: vec![(ClarityCostFunction::AstParse, vec![500])], + invocation_count: 1, + cost_addition_count: 1, + }; + + assert_eq!(&expected_err, &err.err); + assert_eq!(expected_list_cost_state, cost_track); + + // in 2.1, this is still ExpressionStackDepthTooDeep + let mut cost_track = UnitTestTracker::new(); + let err = build_ast_with_rules( + &QualifiedContractIdentifier::transient(), + &exceeds_stack_depth_list, + &mut cost_track, + *clarity_version, + StacksEpochId::Epoch21, + ASTRules::PrecheckSize, + ) + .expect_err("Contract should error in parsing"); + + let expected_err = ParseErrors::ExpressionStackDepthTooDeep; + let expected_list_cost_state = UnitTestTracker { + invoked_functions: vec![(ClarityCostFunction::AstParse, vec![500])], + invocation_count: 1, + cost_addition_count: 1, + }; + + assert_eq!(&expected_err, &err.err); + assert_eq!(expected_list_cost_state, cost_track); + + // in 2.1, ASTRules::Typical is ignored -- this still fails to parse + let mut cost_track = UnitTestTracker::new(); + let _ = build_ast_with_rules( + &QualifiedContractIdentifier::transient(), + &exceeds_stack_depth_tuple, + &mut cost_track, + *clarity_version, + StacksEpochId::Epoch21, + ASTRules::Typical, + ) + .expect_err("Contract should error in parsing"); + + let expected_err = ParseErrors::ExpressionStackDepthTooDeep; + let expected_list_cost_state = UnitTestTracker { + invoked_functions: vec![(ClarityCostFunction::AstParse, vec![571])], + invocation_count: 1, + cost_addition_count: 1, + }; + + assert_eq!(&expected_err, &err.err); + assert_eq!(expected_list_cost_state, cost_track); + + // in 2.1, ASTRules::PrecheckSize is still ignored -- this still fails to parse + let mut cost_track = UnitTestTracker::new(); + let err = build_ast_with_rules( + &QualifiedContractIdentifier::transient(), + &exceeds_stack_depth_tuple, + &mut cost_track, + *clarity_version, + StacksEpochId::Epoch21, + ASTRules::PrecheckSize, + ) + .expect_err("Contract should error in parsing"); + + let expected_err = ParseErrors::ExpressionStackDepthTooDeep; + let expected_list_cost_state = UnitTestTracker { + invoked_functions: vec![(ClarityCostFunction::AstParse, vec![571])], + invocation_count: 1, + cost_addition_count: 1, + }; + + assert_eq!(&expected_err, &err.err); + assert_eq!(expected_list_cost_state, cost_track); + } + } + + #[test] + fn test_expression_identification_tuples() { + for version in &[ClarityVersion::Clarity1, ClarityVersion::Clarity2] { + for epoch in &[StacksEpochId::Epoch2_05, StacksEpochId::Epoch21] { + let progn = "{ a: (+ 1 2 3), + b: 1, + c: 3 }"; + + let mut cost_track = LimitedCostTracker::new_free(); + let ast = build_ast( + &QualifiedContractIdentifier::transient(), + progn, + &mut cost_track, + *version, + *epoch, + ) + .unwrap() + .expressions; + + let mut visited = HashMap::new(); + + for expr in ast.iter() { + depth_traverse::<_, _, ()>(expr, |x| { + assert!(!visited.contains_key(&x.id)); + visited.insert(x.id, true); + Ok(()) + }) + .unwrap(); + } + } + } + } +} diff --git a/clarity/src/vm/ast/mod.rs b/clarity/src/vm/ast/mod.rs index 263fc86526..c02c709bd0 100644 --- a/clarity/src/vm/ast/mod.rs +++ b/clarity/src/vm/ast/mod.rs @@ -14,610 +14,22 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#[cfg(feature = "vm")] pub mod definition_sorter; +pub mod errors; +#[cfg(feature = "vm")] pub mod expression_identifier; pub mod parser; -pub mod traits_resolver; - -pub mod errors; +#[cfg(feature = "vm")] pub mod stack_depth_checker; +#[cfg(feature = "vm")] pub mod sugar_expander; +#[cfg(feature = "vm")] +pub mod traits_resolver; +#[cfg(feature = "vm")] pub mod types; -use stacks_common::types::StacksEpochId; - -use self::definition_sorter::DefinitionSorter; -use self::errors::ParseResult; -use self::expression_identifier::ExpressionIdentifier; -use self::parser::v1::{parse as parse_v1, parse_no_stack_limit as parse_v1_no_stack_limit}; -use self::parser::v2::parse as parse_v2; -use self::stack_depth_checker::{StackDepthChecker, VaryStackDepthChecker}; -use self::sugar_expander::SugarExpander; -use self::traits_resolver::TraitsResolver; -use self::types::BuildASTPass; -pub use self::types::ContractAST; -use crate::vm::costs::cost_functions::ClarityCostFunction; -use crate::vm::costs::{runtime_cost, CostTracker}; -use crate::vm::diagnostic::{Diagnostic, Level}; -use crate::vm::representations::PreSymbolicExpression; -use crate::vm::types::QualifiedContractIdentifier; -use crate::vm::ClarityVersion; - -/// Legacy function -#[cfg(any(test, feature = "testing"))] -pub fn parse( - contract_identifier: &QualifiedContractIdentifier, - source_code: &str, - version: ClarityVersion, - epoch: StacksEpochId, -) -> Result, crate::vm::errors::Error> { - let ast = build_ast(contract_identifier, source_code, &mut (), version, epoch)?; - Ok(ast.expressions) -} - -// AST parser rulesets to apply. -define_u8_enum!(ASTRules { - Typical = 0, - PrecheckSize = 1 -}); - -/// Parse a program based on which epoch is active -fn parse_in_epoch( - source_code: &str, - epoch_id: StacksEpochId, - ast_rules: ASTRules, -) -> ParseResult> { - if epoch_id >= StacksEpochId::Epoch21 { - parse_v2(source_code) - } else if ast_rules == ASTRules::Typical { - parse_v1_no_stack_limit(source_code) - } else { - parse_v1(source_code) - } -} - -/// This is the part of the AST parser that runs without respect to cost analysis, specifically -/// pertaining to verifying that the AST is reasonably-sized. -/// Used mainly to filter transactions that might be too costly, as an optimization heuristic. -pub fn ast_check_size( - contract_identifier: &QualifiedContractIdentifier, - source_code: &str, - clarity_version: ClarityVersion, - epoch_id: StacksEpochId, -) -> ParseResult { - let pre_expressions = parse_in_epoch(source_code, epoch_id, ASTRules::PrecheckSize)?; - let mut contract_ast = ContractAST::new(contract_identifier.clone(), pre_expressions); - StackDepthChecker::run_pass(&mut contract_ast, clarity_version)?; - VaryStackDepthChecker::run_pass(&mut contract_ast, clarity_version)?; - Ok(contract_ast) -} - -/// Build an AST according to a ruleset -pub fn build_ast_with_rules( - contract_identifier: &QualifiedContractIdentifier, - source_code: &str, - cost_track: &mut T, - clarity_version: ClarityVersion, - epoch: StacksEpochId, - ruleset: ASTRules, -) -> ParseResult { - match ruleset { - // After epoch 2.1, prechecking the size is required - ASTRules::Typical if epoch < StacksEpochId::Epoch21 => build_ast_typical( - contract_identifier, - source_code, - cost_track, - clarity_version, - epoch, - ), - _ => build_ast_precheck_size( - contract_identifier, - source_code, - cost_track, - clarity_version, - epoch, - ), - } -} - -/// Build an AST with the typical rules -fn build_ast_typical( - contract_identifier: &QualifiedContractIdentifier, - source_code: &str, - cost_track: &mut T, - clarity_version: ClarityVersion, - epoch: StacksEpochId, -) -> ParseResult { - let (contract, _, _) = inner_build_ast( - contract_identifier, - source_code, - cost_track, - clarity_version, - epoch, - ASTRules::Typical, - true, - )?; - Ok(contract) -} - -/// Used by developer tools only. Continues on through errors by inserting -/// placeholders into the AST. Collects as many diagnostics as possible. -/// Always returns a ContractAST, a vector of diagnostics, and a boolean -/// that indicates if the build was successful. -#[allow(clippy::unwrap_used)] -pub fn build_ast_with_diagnostics( - contract_identifier: &QualifiedContractIdentifier, - source_code: &str, - cost_track: &mut T, - clarity_version: ClarityVersion, - epoch: StacksEpochId, -) -> (ContractAST, Vec, bool) { - inner_build_ast( - contract_identifier, - source_code, - cost_track, - clarity_version, - epoch, - ASTRules::PrecheckSize, - false, - ) - .unwrap() -} - -fn inner_build_ast( - contract_identifier: &QualifiedContractIdentifier, - source_code: &str, - cost_track: &mut T, - clarity_version: ClarityVersion, - epoch: StacksEpochId, - ast_rules: ASTRules, - error_early: bool, -) -> ParseResult<(ContractAST, Vec, bool)> { - let cost_err = match runtime_cost( - ClarityCostFunction::AstParse, - cost_track, - source_code.len() as u64, - ) { - Err(e) if error_early => return Err(e.into()), - Err(e) => Some(e), - _ => None, - }; - - let (pre_expressions, mut diagnostics, mut success) = if epoch >= StacksEpochId::Epoch21 { - if error_early { - let exprs = parser::v2::parse(source_code)?; - (exprs, Vec::new(), true) - } else { - parser::v2::parse_collect_diagnostics(source_code) - } - } else { - let parse_result = match ast_rules { - ASTRules::Typical => parse_v1_no_stack_limit(source_code), - ASTRules::PrecheckSize => parse_v1(source_code), - }; - match parse_result { - Ok(pre_expressions) => (pre_expressions, vec![], true), - Err(error) if error_early => return Err(error), - Err(error) => (vec![], vec![error.diagnostic], false), - } - }; - - if let Some(e) = cost_err { - diagnostics.insert( - 0, - Diagnostic { - level: Level::Error, - message: format!("runtime_cost error: {:?}", e), - spans: vec![], - suggestion: None, - }, - ); - } - - let mut contract_ast = ContractAST::new(contract_identifier.clone(), pre_expressions); - match StackDepthChecker::run_pass(&mut contract_ast, clarity_version) { - Err(e) if error_early => return Err(e), - Err(e) => { - diagnostics.push(e.diagnostic); - success = false; - } - _ => (), - } - - if ast_rules != ASTRules::Typical { - // run extra stack-depth pass for tuples - match VaryStackDepthChecker::run_pass(&mut contract_ast, clarity_version) { - Err(e) if error_early => return Err(e), - Err(e) => { - diagnostics.push(e.diagnostic); - success = false; - } - _ => (), - } - } - - match ExpressionIdentifier::run_pre_expression_pass(&mut contract_ast, clarity_version) { - Err(e) if error_early => return Err(e), - Err(e) => { - diagnostics.push(e.diagnostic); - success = false; - } - _ => (), - } - match DefinitionSorter::run_pass(&mut contract_ast, cost_track, clarity_version) { - Err(e) if error_early => return Err(e), - Err(e) => { - diagnostics.push(e.diagnostic); - success = false; - } - _ => (), - } - match TraitsResolver::run_pass(&mut contract_ast, clarity_version) { - Err(e) if error_early => return Err(e), - Err(e) => { - diagnostics.push(e.diagnostic); - success = false; - } - _ => (), - } - match SugarExpander::run_pass(&mut contract_ast, clarity_version) { - Err(e) if error_early => return Err(e), - Err(e) => { - diagnostics.push(e.diagnostic); - success = false; - } - _ => (), - } - match ExpressionIdentifier::run_expression_pass(&mut contract_ast, clarity_version) { - Err(e) if error_early => return Err(e), - Err(e) => { - diagnostics.push(e.diagnostic); - success = false; - } - _ => (), - } - Ok((contract_ast, diagnostics, success)) -} - -/// Built an AST, but pre-check the size of the AST before doing more work -fn build_ast_precheck_size( - contract_identifier: &QualifiedContractIdentifier, - source_code: &str, - cost_track: &mut T, - clarity_version: ClarityVersion, - epoch: StacksEpochId, -) -> ParseResult { - let (contract, _, _) = inner_build_ast( - contract_identifier, - source_code, - cost_track, - clarity_version, - epoch, - ASTRules::PrecheckSize, - true, - )?; - Ok(contract) -} - -/// Test compatibility -#[cfg(any(test, feature = "testing"))] -pub fn build_ast( - contract_identifier: &QualifiedContractIdentifier, - source_code: &str, - cost_track: &mut T, - clarity_version: ClarityVersion, - epoch_id: StacksEpochId, -) -> ParseResult { - build_ast_typical( - contract_identifier, - source_code, - cost_track, - clarity_version, - epoch_id, - ) -} - -#[cfg(test)] -mod test { - use hashbrown::HashMap; - use stacks_common::types::StacksEpochId; - - use crate::vm::ast::errors::ParseErrors; - use crate::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; - use crate::vm::ast::{build_ast, build_ast_with_rules, ASTRules}; - use crate::vm::costs::{LimitedCostTracker, *}; - use crate::vm::representations::depth_traverse; - use crate::vm::types::QualifiedContractIdentifier; - use crate::vm::{ClarityCostFunction, ClarityName, ClarityVersion, MAX_CALL_STACK_DEPTH}; - - #[derive(PartialEq, Debug)] - struct UnitTestTracker { - invoked_functions: Vec<(ClarityCostFunction, Vec)>, - invocation_count: u64, - cost_addition_count: u64, - } - impl UnitTestTracker { - pub fn new() -> Self { - UnitTestTracker { - invoked_functions: vec![], - invocation_count: 0, - cost_addition_count: 0, - } - } - } - impl CostTracker for UnitTestTracker { - fn compute_cost( - &mut self, - cost_f: ClarityCostFunction, - input: &[u64], - ) -> std::result::Result { - self.invoked_functions.push((cost_f, input.to_vec())); - self.invocation_count += 1; - Ok(ExecutionCost::ZERO) - } - fn add_cost(&mut self, _cost: ExecutionCost) -> std::result::Result<(), CostErrors> { - self.cost_addition_count += 1; - Ok(()) - } - fn add_memory(&mut self, _memory: u64) -> std::result::Result<(), CostErrors> { - Ok(()) - } - fn drop_memory(&mut self, _memory: u64) -> std::result::Result<(), CostErrors> { - Ok(()) - } - fn reset_memory(&mut self) {} - fn short_circuit_contract_call( - &mut self, - _contract: &QualifiedContractIdentifier, - _function: &ClarityName, - _input: &[u64], - ) -> Result { - Ok(false) - } - } - - #[test] - fn test_cost_tracking_deep_contracts_2_05() { - let clarity_version = ClarityVersion::Clarity1; - let stack_limit = - (AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1) as usize; - let exceeds_stack_depth_tuple = format!( - "{}u1 {}", - "{ a : ".repeat(stack_limit + 1), - "} ".repeat(stack_limit + 1) - ); - - // for deep lists, a test like this works: - // it can assert a limit, that you can also verify - // by disabling `VaryStackDepthChecker` and arbitrarily bumping up the parser lexer limits - // and see that it produces the same result - let exceeds_stack_depth_list = format!( - "{}u1 {}", - "(list ".repeat(stack_limit + 1), - ")".repeat(stack_limit + 1) - ); - - // with old rules, this is just ExpressionStackDepthTooDeep - let mut cost_track = UnitTestTracker::new(); - let err = build_ast_with_rules( - &QualifiedContractIdentifier::transient(), - &exceeds_stack_depth_list, - &mut cost_track, - clarity_version, - StacksEpochId::Epoch2_05, - ASTRules::Typical, - ) - .expect_err("Contract should error in parsing"); - - let expected_err = ParseErrors::ExpressionStackDepthTooDeep; - let expected_list_cost_state = UnitTestTracker { - invoked_functions: vec![(ClarityCostFunction::AstParse, vec![500])], - invocation_count: 1, - cost_addition_count: 1, - }; - - assert_eq!(&expected_err, &err.err); - assert_eq!(expected_list_cost_state, cost_track); - - // with new rules, this is now VaryExpressionStackDepthTooDeep - let mut cost_track = UnitTestTracker::new(); - let err = build_ast_with_rules( - &QualifiedContractIdentifier::transient(), - &exceeds_stack_depth_list, - &mut cost_track, - clarity_version, - StacksEpochId::Epoch2_05, - ASTRules::PrecheckSize, - ) - .expect_err("Contract should error in parsing"); - - let expected_err = ParseErrors::VaryExpressionStackDepthTooDeep; - let expected_list_cost_state = UnitTestTracker { - invoked_functions: vec![(ClarityCostFunction::AstParse, vec![500])], - invocation_count: 1, - cost_addition_count: 1, - }; - - assert_eq!(&expected_err, &err.err); - assert_eq!(expected_list_cost_state, cost_track); - - // you cannot do the same for tuples! - // in ASTRules::Typical, this passes - let mut cost_track = UnitTestTracker::new(); - let _ = build_ast_with_rules( - &QualifiedContractIdentifier::transient(), - &exceeds_stack_depth_tuple, - &mut cost_track, - clarity_version, - StacksEpochId::Epoch2_05, - ASTRules::Typical, - ) - .expect("Contract should parse with ASTRules::Typical"); - - // this actually won't even error without - // the VaryStackDepthChecker changes. - let mut cost_track = UnitTestTracker::new(); - let err = build_ast_with_rules( - &QualifiedContractIdentifier::transient(), - &exceeds_stack_depth_tuple, - &mut cost_track, - clarity_version, - StacksEpochId::Epoch2_05, - ASTRules::PrecheckSize, - ) - .expect_err("Contract should error in parsing with ASTRules::PrecheckSize"); - - let expected_err = ParseErrors::VaryExpressionStackDepthTooDeep; - let expected_list_cost_state = UnitTestTracker { - invoked_functions: vec![(ClarityCostFunction::AstParse, vec![571])], - invocation_count: 1, - cost_addition_count: 1, - }; - - assert_eq!(&expected_err, &err.err); - assert_eq!(expected_list_cost_state, cost_track); - } - - #[test] - fn test_cost_tracking_deep_contracts_2_1() { - for clarity_version in &[ClarityVersion::Clarity1, ClarityVersion::Clarity2] { - let stack_limit = - (AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1) as usize; - let exceeds_stack_depth_tuple = format!( - "{}u1 {}", - "{ a : ".repeat(stack_limit + 1), - "} ".repeat(stack_limit + 1) - ); - - // for deep lists, a test like this works: - // it can assert a limit, that you can also verify - // by disabling `VaryStackDepthChecker` and arbitrarily bumping up the parser lexer limits - // and see that it produces the same result - let exceeds_stack_depth_list = format!( - "{}u1 {}", - "(list ".repeat(stack_limit + 1), - ")".repeat(stack_limit + 1) - ); - - // with old rules, this is just ExpressionStackDepthTooDeep - let mut cost_track = UnitTestTracker::new(); - let err = build_ast_with_rules( - &QualifiedContractIdentifier::transient(), - &exceeds_stack_depth_list, - &mut cost_track, - *clarity_version, - StacksEpochId::Epoch21, - ASTRules::Typical, - ) - .expect_err("Contract should error in parsing"); - - let expected_err = ParseErrors::ExpressionStackDepthTooDeep; - let expected_list_cost_state = UnitTestTracker { - invoked_functions: vec![(ClarityCostFunction::AstParse, vec![500])], - invocation_count: 1, - cost_addition_count: 1, - }; - - assert_eq!(&expected_err, &err.err); - assert_eq!(expected_list_cost_state, cost_track); - - // in 2.1, this is still ExpressionStackDepthTooDeep - let mut cost_track = UnitTestTracker::new(); - let err = build_ast_with_rules( - &QualifiedContractIdentifier::transient(), - &exceeds_stack_depth_list, - &mut cost_track, - *clarity_version, - StacksEpochId::Epoch21, - ASTRules::PrecheckSize, - ) - .expect_err("Contract should error in parsing"); - - let expected_err = ParseErrors::ExpressionStackDepthTooDeep; - let expected_list_cost_state = UnitTestTracker { - invoked_functions: vec![(ClarityCostFunction::AstParse, vec![500])], - invocation_count: 1, - cost_addition_count: 1, - }; - - assert_eq!(&expected_err, &err.err); - assert_eq!(expected_list_cost_state, cost_track); - - // in 2.1, ASTRules::Typical is ignored -- this still fails to parse - let mut cost_track = UnitTestTracker::new(); - let _ = build_ast_with_rules( - &QualifiedContractIdentifier::transient(), - &exceeds_stack_depth_tuple, - &mut cost_track, - *clarity_version, - StacksEpochId::Epoch21, - ASTRules::Typical, - ) - .expect_err("Contract should error in parsing"); - - let expected_err = ParseErrors::ExpressionStackDepthTooDeep; - let expected_list_cost_state = UnitTestTracker { - invoked_functions: vec![(ClarityCostFunction::AstParse, vec![571])], - invocation_count: 1, - cost_addition_count: 1, - }; - - assert_eq!(&expected_err, &err.err); - assert_eq!(expected_list_cost_state, cost_track); - - // in 2.1, ASTRules::PrecheckSize is still ignored -- this still fails to parse - let mut cost_track = UnitTestTracker::new(); - let err = build_ast_with_rules( - &QualifiedContractIdentifier::transient(), - &exceeds_stack_depth_tuple, - &mut cost_track, - *clarity_version, - StacksEpochId::Epoch21, - ASTRules::PrecheckSize, - ) - .expect_err("Contract should error in parsing"); - - let expected_err = ParseErrors::ExpressionStackDepthTooDeep; - let expected_list_cost_state = UnitTestTracker { - invoked_functions: vec![(ClarityCostFunction::AstParse, vec![571])], - invocation_count: 1, - cost_addition_count: 1, - }; - - assert_eq!(&expected_err, &err.err); - assert_eq!(expected_list_cost_state, cost_track); - } - } - - #[test] - fn test_expression_identification_tuples() { - for version in &[ClarityVersion::Clarity1, ClarityVersion::Clarity2] { - for epoch in &[StacksEpochId::Epoch2_05, StacksEpochId::Epoch21] { - let progn = "{ a: (+ 1 2 3), - b: 1, - c: 3 }"; - - let mut cost_track = LimitedCostTracker::new_free(); - let ast = build_ast( - &QualifiedContractIdentifier::transient(), - progn, - &mut cost_track, - *version, - *epoch, - ) - .unwrap() - .expressions; - - let mut visited = HashMap::new(); - for expr in ast.iter() { - depth_traverse::<_, _, ()>(expr, |x| { - assert!(!visited.contains_key(&x.id)); - visited.insert(x.id, true); - Ok(()) - }) - .unwrap(); - } - } - } - } -} +#[cfg(feature = "vm")] +pub mod ast_builder; +#[cfg(feature = "vm")] +pub use ast_builder::*; diff --git a/clarity/src/vm/ast/parser/mod.rs b/clarity/src/vm/ast/parser/mod.rs index ced9f3aafc..0e54c950cc 100644 --- a/clarity/src/vm/ast/parser/mod.rs +++ b/clarity/src/vm/ast/parser/mod.rs @@ -14,5 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#[cfg(feature = "vm")] pub mod v1; pub mod v2; diff --git a/clarity/src/vm/ast/parser/v2/lexer/mod.rs b/clarity/src/vm/ast/parser/v2/lexer/mod.rs index bbd6136916..6775bee523 100644 --- a/clarity/src/vm/ast/parser/v2/lexer/mod.rs +++ b/clarity/src/vm/ast/parser/v2/lexer/mod.rs @@ -1425,7 +1425,7 @@ mod tests { +-*/ < <= > >=.: ;; comment "hello" u"world" 0x0123456789abcdeffedcba9876543210 - + foo-bar_ "#, diff --git a/clarity/src/vm/ast/parser/v2/mod.rs b/clarity/src/vm/ast/parser/v2/mod.rs index dd5a900364..046a151535 100644 --- a/clarity/src/vm/ast/parser/v2/mod.rs +++ b/clarity/src/vm/ast/parser/v2/mod.rs @@ -1,3613 +1,6 @@ pub mod lexer; -use stacks_common::util::hash::hex_bytes; - -use self::lexer::token::{PlacedToken, Token}; -use self::lexer::Lexer; -use crate::vm::ast::errors::{ParseError, ParseErrors, ParseResult, PlacedError}; -use crate::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; -use crate::vm::diagnostic::{DiagnosableError, Diagnostic, Level}; -use crate::vm::representations::{ClarityName, ContractName, PreSymbolicExpression, Span}; -use crate::vm::types::{ - CharType, PrincipalData, QualifiedContractIdentifier, SequenceData, TraitIdentifier, UTF8Data, - Value, -}; -use crate::vm::MAX_CALL_STACK_DEPTH; - -pub struct Parser<'a> { - lexer: Lexer<'a>, - tokens: Vec, - next_token: usize, - diagnostics: Vec, - success: bool, - // `fail_fast` mode indicates that the parser should not report warnings - // and should exit on the first error. This is useful for parsing in the - // context of a stacks-node, while normal mode is useful for developers. - fail_fast: bool, - nesting_depth: u64, -} - -pub const MAX_STRING_LEN: usize = 128; -pub const MAX_CONTRACT_NAME_LEN: usize = 40; -pub const MAX_NESTING_DEPTH: u64 = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1; - -enum OpenTupleStatus { - /// The next thing to parse is a key - ParseKey, - /// The next thing to parse is a value - ParseValue, -} - -enum SetupTupleResult { - OpenTuple(OpenTuple), - Closed(PreSymbolicExpression), -} - -struct OpenTuple { - nodes: Vec, - span: Span, - /// Is the next node is expected to be a key or value? All of the preparatory work is done _before_ the parse loop tries to digest the next - /// node (i.e., whitespace ingestion and checking for commas) - expects: OpenTupleStatus, - /// This is the last peeked token before trying to parse a key or value node, used for - /// diagnostic reporting - diagnostic_token: PlacedToken, -} - -enum ParserStackElement { - OpenList { - nodes: Vec, - span: Span, - whitespace: bool, - }, - OpenTuple(OpenTuple), -} - -impl<'a> Parser<'a> { - pub fn new(input: &'a str, fail_fast: bool) -> Result { - let lexer = match Lexer::new(input, fail_fast) { - Ok(lexer) => lexer, - Err(e) => return Err(ParseErrors::Lexer(e)), - }; - let mut p = Self { - lexer, - tokens: vec![], - next_token: 0, - diagnostics: vec![], - success: true, - fail_fast, - nesting_depth: 0, - }; - - loop { - let token = match p.lexer.read_token() { - Ok(token) => token, - Err(e) => { - assert!( - fail_fast, - "Parser::read_token should not return an error when not in fail_fast mode" - ); - p.success = false; - return Err(ParseErrors::Lexer(e)); - } - }; - if token.token == Token::Eof { - p.tokens.push(token); - break; - } - p.tokens.push(token); - } - p.diagnostics = p - .lexer - .diagnostics - .iter() - .map(|lex_error| PlacedError { - e: ParseErrors::Lexer(lex_error.e.clone()), - span: lex_error.span.clone(), - }) - .collect(); - p.success = p.lexer.success; - Ok(p) - } - - fn add_diagnostic(&mut self, e: ParseErrors, span: Span) -> ParseResult<()> { - if self.fail_fast { - return Err(ParseError::new(e)); - } else { - if e.level() == Level::Error { - self.success = false; - } - self.diagnostics.push(PlacedError { e, span }); - } - Ok(()) - } - - fn next_token(&mut self) -> Option { - if self.next_token >= self.tokens.len() { - return None; - } - let token = self.tokens[self.next_token].clone(); - self.next_token += 1; - Some(token) - } - - fn peek_next_token(&mut self) -> PlacedToken { - if self.next_token >= self.tokens.len() { - PlacedToken { - span: Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 1, - }, - token: Token::Eof, - } - } else { - self.tokens[self.next_token].clone() - } - } - - /// Get a reference to the last processed token. If there is no last token, - /// raises an UnexpectedParserFailure. - fn peek_last_token(&self) -> ParseResult<&PlacedToken> { - if self.next_token == 0 { - return Err(ParseError::new(ParseErrors::UnexpectedParserFailure)); - } - self.tokens - .get(self.next_token - 1) - .ok_or_else(|| ParseError::new(ParseErrors::UnexpectedParserFailure)) - } - - fn skip_to_end(&mut self) { - self.next_token = self.tokens.len(); - } - - fn ignore_whitespace(&mut self) -> bool { - let mut found = false; - loop { - if self.next_token >= self.tokens.len() { - return found; - } - let token = &self.tokens[self.next_token]; - match &token.token { - Token::Whitespace => { - self.next_token += 1; - found = true; - } - _ => return found, - } - } - } - - fn ignore_whitespace_and_comments(&mut self) -> Vec { - let mut comments = Vec::new(); - loop { - if self.next_token >= self.tokens.len() { - return comments; - } - let token = &self.tokens[self.next_token]; - match &token.token { - Token::Whitespace => { - self.next_token += 1; - } - Token::Comment(comment) => { - let mut comment = PreSymbolicExpression::comment(comment.to_string()); - comment.copy_span(&token.span); - comments.push(comment); - self.next_token += 1; - } - _ => return comments, - } - } - } - - // TODO: add tests from mutation testing results #4829 - #[cfg_attr(test, mutants::skip)] - /// Process a new child node for an AST expression that is open and waiting for children nodes. For example, - /// a list or tuple expression that is waiting for child expressions. - /// - /// Returns Some(node) if the open node is finished and should be popped from the stack. - /// Returns None if the open node is not finished and should remain on the parser stack. - fn handle_open_node( - &mut self, - open_node: &mut ParserStackElement, - node_opt: Option, - ) -> ParseResult> { - match open_node { - ParserStackElement::OpenList { - ref mut nodes, - ref mut span, - ref mut whitespace, - } => { - if let Some(node) = node_opt { - if !*whitespace && node.match_comment().is_none() { - self.add_diagnostic(ParseErrors::ExpectedWhitespace, node.span().clone())?; - } - nodes.push(node); - *whitespace = self.ignore_whitespace(); - Ok(None) - } else { - let token = self.peek_last_token()?.clone(); - match token.token { - Token::Rparen => { - span.end_line = token.span.end_line; - span.end_column = token.span.end_column; - let out_nodes: Vec<_> = std::mem::take(nodes); - let mut e = PreSymbolicExpression::list(out_nodes); - e.copy_span(span); - Ok(Some(e)) - } - Token::Eof => { - // Report an error, but return the list and attempt to continue parsing - self.add_diagnostic( - ParseErrors::ExpectedClosing(Token::Rparen), - token.span.clone(), - )?; - self.add_diagnostic( - ParseErrors::NoteToMatchThis(Token::Lparen), - span.clone(), - )?; - span.end_line = token.span.end_line; - span.end_column = token.span.end_column; - let out_nodes: Vec<_> = std::mem::take(nodes); - let mut e = PreSymbolicExpression::list(out_nodes); - e.copy_span(span); - Ok(Some(e)) - } - _ => { - // Report an error, then skip this token - self.add_diagnostic( - ParseErrors::UnexpectedToken(token.token.clone()), - token.span, - )?; - *whitespace = self.ignore_whitespace(); - Ok(None) - } - } - } - } - ParserStackElement::OpenTuple(ref mut open_tuple) => { - self.handle_open_tuple(open_tuple, node_opt) - } - } - } - - // TODO: add tests from mutation testing results #4848 - #[cfg_attr(test, mutants::skip)] - fn handle_open_tuple( - &mut self, - open_tuple: &mut OpenTuple, - node_opt: Option, - ) -> ParseResult> { - match &open_tuple.expects { - OpenTupleStatus::ParseKey => { - // expecting to parse a key - let node = match node_opt { - Some(node) => node, - None => { - // mimic parse_node_or_eof() behavior - // if last token was an EOF, error out the tuple - // if the last token was something else, just yield back to the parse loop - let last_token = self.peek_last_token()?.clone(); - match last_token.token { - Token::Eof => { - self.add_diagnostic( - ParseErrors::ExpectedClosing(Token::Rbrace), - open_tuple.diagnostic_token.span.clone(), - )?; - self.add_diagnostic( - ParseErrors::NoteToMatchThis(Token::Lbrace), - open_tuple.span.clone(), - )?; - let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); - let mut e = PreSymbolicExpression::tuple(out_nodes); - let span_before_eof = &self.tokens[self.tokens.len() - 2].span; - open_tuple.span.end_line = span_before_eof.end_line; - open_tuple.span.end_column = span_before_eof.end_column; - e.copy_span(&open_tuple.span); - return Ok(Some(e)); - } - _ => { - // Report an error, then skip this token - self.add_diagnostic( - ParseErrors::UnexpectedToken(last_token.token), - last_token.span, - )?; - return Ok(None); // Ok(None) yields to the parse loop - } - } - } - }; - open_tuple.nodes.push(node); - // added key to the nodes list, now do all preprocessing to prepare to parse - // the value node - let mut comments = self.ignore_whitespace_and_comments(); - open_tuple.nodes.append(&mut comments); - - // Look for ':' - let token = self.peek_next_token(); - match token.token { - Token::Colon => { - self.next_token(); - } - Token::Eof => { - // This indicates we have reached the end of the input. - // Create a placeholder value so that parsing can continue, - // then return. - self.add_diagnostic(ParseErrors::TupleColonExpectedv2, token.span.clone())?; - let mut placeholder = PreSymbolicExpression::placeholder("".to_string()); - placeholder.copy_span(&token.span); - open_tuple.nodes.push(placeholder); // Placeholder value - let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); - let mut e = PreSymbolicExpression::tuple(out_nodes); - let span_before_eof = &self.tokens[self.tokens.len() - 2].span; - open_tuple.span.end_line = span_before_eof.end_line; - open_tuple.span.end_column = span_before_eof.end_column; - e.copy_span(&open_tuple.span); - return Ok(Some(e)); - } - _ => { - // Record an error, then continue to parse - self.add_diagnostic(ParseErrors::TupleColonExpectedv2, token.span.clone())?; - } - } - open_tuple.diagnostic_token = token; - - let mut comments = self.ignore_whitespace_and_comments(); - open_tuple.nodes.append(&mut comments); - - open_tuple.expects = OpenTupleStatus::ParseValue; - Ok(None) - } - OpenTupleStatus::ParseValue => { - // expecting to parse a value - let node = match node_opt { - Some(node) => node, - None => { - // mimic parse_node_or_eof() behavior - // if last token was an EOF, error out the tuple - // if the last token was something else, just yield back to the parse loop - let last_token = self.peek_last_token()?.clone(); - match last_token.token { - Token::Eof => { - // This indicates we have reached the end of the input. - // Create a placeholder value so that parsing can continue, - // then return. - let eof_span = last_token.span; - - self.add_diagnostic( - ParseErrors::TupleValueExpected, - open_tuple.diagnostic_token.span.clone(), - )?; - let mut placeholder = - PreSymbolicExpression::placeholder("".to_string()); - placeholder.copy_span(&eof_span); - open_tuple.nodes.push(placeholder); // Placeholder value - let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); - let mut e = PreSymbolicExpression::tuple(out_nodes); - open_tuple.span.end_line = - open_tuple.diagnostic_token.span.end_line; - open_tuple.span.end_column = - open_tuple.diagnostic_token.span.end_column; - e.copy_span(&open_tuple.span); - return Ok(Some(e)); - } - _ => { - // Report an error, then skip this token - self.add_diagnostic( - ParseErrors::UnexpectedToken(last_token.token), - last_token.span, - )?; - return Ok(None); // Ok(None) yields to the parse loop - } - } - } - }; - open_tuple.nodes.push(node); - - // now do all preprocessing to prepare to parse a key node - let mut comments = self.ignore_whitespace_and_comments(); - open_tuple.nodes.append(&mut comments); - - let token = self.peek_next_token(); - match token.token { - Token::Comma => { - self.next_token(); - } - Token::Rbrace => { - open_tuple.span.end_line = token.span.end_line; - open_tuple.span.end_column = token.span.end_column; - self.next_token(); - let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); - let mut e = PreSymbolicExpression::tuple(out_nodes); - e.copy_span(&open_tuple.span); - return Ok(Some(e)); - } - Token::Eof => (), - _ => self.add_diagnostic(ParseErrors::TupleCommaExpectedv2, token.span)?, - } - - let mut comments = self.ignore_whitespace_and_comments(); - open_tuple.nodes.append(&mut comments); - - // A comma is allowed after the last pair in the tuple -- check for this case. - let token = self.peek_next_token(); - if token.token == Token::Rbrace { - open_tuple.span.end_line = token.span.end_line; - open_tuple.span.end_column = token.span.end_column; - self.next_token(); - let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); - let mut e = PreSymbolicExpression::tuple(out_nodes); - e.copy_span(&open_tuple.span); - return Ok(Some(e)); - } - open_tuple.diagnostic_token = token; - - let mut comments = self.ignore_whitespace_and_comments(); - open_tuple.nodes.append(&mut comments); - - open_tuple.expects = OpenTupleStatus::ParseKey; - Ok(None) - } - } - } - - /// Do all the preprocessing required to setup tuple parsing. If the tuple immediately - /// closes, return the final expression here, otherwise, return OpenTuple. - fn open_tuple(&mut self, lbrace: PlacedToken) -> ParseResult { - let mut open_tuple = OpenTuple { - nodes: vec![], - span: lbrace.span, - expects: OpenTupleStatus::ParseKey, - diagnostic_token: self.peek_next_token(), - }; - - // do all the preprocessing required before the first key node - let mut comments = self.ignore_whitespace_and_comments(); - open_tuple.nodes.append(&mut comments); - let token = self.peek_next_token(); - match token.token { - Token::Comma => { - self.add_diagnostic(ParseErrors::UnexpectedToken(token.token), token.span)?; - self.next_token(); - } - Token::Rbrace => { - open_tuple.span.end_line = token.span.end_line; - open_tuple.span.end_column = token.span.end_column; - self.next_token(); - let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); - let mut e = PreSymbolicExpression::tuple(out_nodes); - e.copy_span(&open_tuple.span); - return Ok(SetupTupleResult::Closed(e)); - } - _ => (), - }; - - let mut comments = self.ignore_whitespace_and_comments(); - open_tuple.nodes.append(&mut comments); - - // A comma is allowed after the last pair in the tuple -- check for this case. - let token = self.peek_next_token(); - if token.token == Token::Rbrace { - open_tuple.span.end_line = token.span.end_line; - open_tuple.span.end_column = token.span.end_column; - self.next_token(); - let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); - let mut e = PreSymbolicExpression::tuple(out_nodes); - e.copy_span(&open_tuple.span); - return Ok(SetupTupleResult::Closed(e)); - } - open_tuple.diagnostic_token = token; - - let mut comments = self.ignore_whitespace_and_comments(); - open_tuple.nodes.append(&mut comments); - - Ok(SetupTupleResult::OpenTuple(open_tuple)) - } - - fn read_principal( - &mut self, - addr: String, - mut span: Span, - ) -> ParseResult { - let principal = match PrincipalData::parse_standard_principal(&addr) { - Ok(principal) => principal, - _ => { - self.add_diagnostic(ParseErrors::InvalidPrincipalLiteral, span.clone())?; - let mut placeholder = PreSymbolicExpression::placeholder(format!("'{}", addr)); - placeholder.copy_span(&span); - return Ok(placeholder); - } - }; - - // Peek ahead for a '.', indicating a contract identifier - if self.peek_next_token().token == Token::Dot { - #[allow(clippy::unwrap_used)] - let dot = self.next_token().unwrap(); // skip over the dot - let (name, contract_span) = match self.next_token() { - Some(PlacedToken { - span: contract_span, - token: Token::Ident(ident), - }) => { - span.end_line = contract_span.end_line; - span.end_column = contract_span.end_column; - (ident, contract_span) - } - Some(PlacedToken { - span: token_span, - token, - }) => { - span.end_line = token_span.end_line; - span.end_column = token_span.end_column; - self.add_diagnostic(ParseErrors::ExpectedContractIdentifier, token_span)?; - let mut placeholder = PreSymbolicExpression::placeholder(format!( - "'{}.{}", - principal, - token.reproduce() - )); - placeholder.copy_span(&span); - return Ok(placeholder); - } - None => { - self.add_diagnostic(ParseErrors::ExpectedContractIdentifier, dot.span)?; - let mut placeholder = - PreSymbolicExpression::placeholder(format!("'{}.", principal)); - placeholder.copy_span(&span); - return Ok(placeholder); - } - }; - - if name.len() > MAX_CONTRACT_NAME_LEN { - self.add_diagnostic( - ParseErrors::ContractNameTooLong(name.clone()), - contract_span, - )?; - let mut placeholder = - PreSymbolicExpression::placeholder(format!("'{}.{}", principal, name)); - placeholder.copy_span(&span); - return Ok(placeholder); - } - let contract_name = match ContractName::try_from(name.clone()) { - Ok(id) => id, - Err(_) => { - self.add_diagnostic( - ParseErrors::IllegalContractName(name.clone()), - contract_span, - )?; - let mut placeholder = - PreSymbolicExpression::placeholder(format!("'{}.{}", principal, name)); - placeholder.copy_span(&span); - return Ok(placeholder); - } - }; - let contract_id = QualifiedContractIdentifier::new(principal, contract_name); - - // Peek ahead for a '.', indicating a trait identifier - if self.peek_next_token().token == Token::Dot { - #[allow(clippy::unwrap_used)] - let dot = self.next_token().unwrap(); // skip over the dot - let (name, trait_span) = match self.next_token() { - Some(PlacedToken { - span: trait_span, - token: Token::Ident(ident), - }) => { - span.end_line = trait_span.end_line; - span.end_column = trait_span.end_column; - (ident, trait_span) - } - Some(PlacedToken { - span: token_span, - token, - }) => { - self.add_diagnostic( - ParseErrors::ExpectedTraitIdentifier, - token_span.clone(), - )?; - let mut placeholder = PreSymbolicExpression::placeholder(format!( - "'{}.{}", - contract_id, - token.reproduce(), - )); - span.end_line = token_span.end_line; - span.end_column = token_span.end_column; - placeholder.copy_span(&span); - return Ok(placeholder); - } - None => { - self.add_diagnostic( - ParseErrors::ExpectedTraitIdentifier, - dot.span.clone(), - )?; - let mut placeholder = - PreSymbolicExpression::placeholder(format!("'{}.", contract_id)); - span.end_line = dot.span.end_line; - span.end_column = dot.span.end_column; - placeholder.copy_span(&span); - return Ok(placeholder); - } - }; - if name.len() > MAX_STRING_LEN { - self.add_diagnostic(ParseErrors::NameTooLong(name.clone()), trait_span)?; - let mut placeholder = - PreSymbolicExpression::placeholder(format!("'{}.{}", contract_id, name,)); - placeholder.copy_span(&span); - return Ok(placeholder); - } - let trait_name = match ClarityName::try_from(name.clone()) { - Ok(id) => id, - Err(_) => { - self.add_diagnostic( - ParseErrors::IllegalTraitName(name.clone()), - trait_span, - )?; - let mut placeholder = PreSymbolicExpression::placeholder(format!( - "'{}.{}", - contract_id, name, - )); - placeholder.copy_span(&span); - return Ok(placeholder); - } - }; - let trait_id = TraitIdentifier { - name: trait_name, - contract_identifier: contract_id, - }; - let mut expr = PreSymbolicExpression::field_identifier(trait_id); - expr.copy_span(&span); - Ok(expr) - } else { - let contract_principal = PrincipalData::Contract(contract_id); - let mut expr = - PreSymbolicExpression::atom_value(Value::Principal(contract_principal)); - expr.copy_span(&span); - Ok(expr) - } - } else { - let mut expr = PreSymbolicExpression::atom_value(Value::Principal( - PrincipalData::Standard(principal), - )); - expr.copy_span(&span); - Ok(expr) - } - } - - fn read_sugared_principal(&mut self, mut span: Span) -> ParseResult { - let (name, contract_span) = match self.next_token() { - Some(PlacedToken { - span: contract_span, - token: Token::Ident(ident), - }) => { - span.end_line = contract_span.end_line; - span.end_column = contract_span.end_column; - (ident, contract_span) - } - Some(PlacedToken { - span: token_span, - token, - }) => { - self.add_diagnostic(ParseErrors::ExpectedContractIdentifier, token_span.clone())?; - let mut placeholder = - PreSymbolicExpression::placeholder(format!(".{}", token.reproduce())); - span.end_line = token_span.end_line; - span.end_column = token_span.end_column; - placeholder.copy_span(&span); - return Ok(placeholder); - } - None => { - self.add_diagnostic(ParseErrors::ExpectedContractIdentifier, span.clone())?; - let mut placeholder = PreSymbolicExpression::placeholder(".".to_string()); - placeholder.copy_span(&span); - return Ok(placeholder); - } - }; - - if name.len() > MAX_CONTRACT_NAME_LEN { - self.add_diagnostic(ParseErrors::ContractNameTooLong(name.clone()), span.clone())?; - let mut placeholder = PreSymbolicExpression::placeholder(format!(".{}", name)); - placeholder.copy_span(&span); - return Ok(placeholder); - } - - let contract_name = match ContractName::try_from(name.clone()) { - Ok(id) => id, - Err(_) => { - self.add_diagnostic( - ParseErrors::IllegalContractName(name.clone()), - contract_span, - )?; - let mut placeholder = PreSymbolicExpression::placeholder(format!(".{}", name)); - placeholder.copy_span(&span); - return Ok(placeholder); - } - }; - - // Peek ahead for a '.', indicating a trait identifier - if self.peek_next_token().token == Token::Dot { - #[allow(clippy::unwrap_used)] - let dot = self.next_token().unwrap(); // skip over the dot - let (name, trait_span) = match self.next_token() { - Some(PlacedToken { - span: trait_span, - token: Token::Ident(ident), - }) => { - span.end_line = trait_span.end_line; - span.end_column = trait_span.end_column; - (ident, trait_span) - } - Some(PlacedToken { - span: token_span, - token, - }) => { - self.add_diagnostic(ParseErrors::ExpectedTraitIdentifier, token_span.clone())?; - let mut placeholder = PreSymbolicExpression::placeholder(format!( - ".{}.{}", - contract_name, - token.reproduce(), - )); - span.end_line = token_span.end_line; - span.end_column = token_span.end_column; - placeholder.copy_span(&span); - return Ok(placeholder); - } - None => { - self.add_diagnostic(ParseErrors::ExpectedTraitIdentifier, dot.span.clone())?; - let mut placeholder = - PreSymbolicExpression::placeholder(format!(".{}.", contract_name)); - span.end_line = dot.span.end_line; - span.end_column = dot.span.end_column; - placeholder.copy_span(&span); - return Ok(placeholder); - } - }; - if name.len() > MAX_STRING_LEN { - self.add_diagnostic(ParseErrors::NameTooLong(name.clone()), trait_span)?; - let mut placeholder = - PreSymbolicExpression::placeholder(format!(".{}.{}", contract_name, name)); - placeholder.copy_span(&span); - return Ok(placeholder); - } - let trait_name = match ClarityName::try_from(name.clone()) { - Ok(id) => id, - Err(_) => { - self.add_diagnostic(ParseErrors::IllegalTraitName(name.clone()), trait_span)?; - let mut placeholder = - PreSymbolicExpression::placeholder(format!(".{}.{}", contract_name, name)); - placeholder.copy_span(&span); - return Ok(placeholder); - } - }; - let mut expr = - PreSymbolicExpression::sugared_field_identifier(contract_name, trait_name); - expr.copy_span(&span); - Ok(expr) - } else { - let mut expr = PreSymbolicExpression::sugared_contract_identifier(contract_name); - expr.copy_span(&span); - Ok(expr) - } - } - - /// Returns some valid expression. When None is returned, check the current - /// token from the caller. - pub fn parse_node(&mut self) -> ParseResult> { - // `parse_stack` stores information about any nodes which may contain interior AST nodes. - // because even though this function only returns a single node, that single node may contain others. - let mut parse_stack = vec![]; - let mut first_run = true; - // do-while loop until there are no more nodes waiting for children nodes - while first_run || !parse_stack.is_empty() { - first_run = false; - - self.ignore_whitespace(); - let token_opt = self.next_token(); - - let mut node = match token_opt { - None => None, - Some(token) => { - match &token.token { - Token::Lparen => { - self.nesting_depth += 1; - if self.nesting_depth > MAX_NESTING_DEPTH { - self.add_diagnostic( - ParseErrors::ExpressionStackDepthTooDeep, - token.span.clone(), - )?; - // Do not try to continue, exit cleanly now to avoid a stack overflow. - self.skip_to_end(); - return Ok(None); - } - // open the list on the parse_stack, and then continue to the next token - parse_stack.push(ParserStackElement::OpenList { - nodes: vec![], - span: token.span.clone(), - whitespace: true, - }); - continue; - } - Token::Lbrace => { - // This sugared syntax for tuple becomes a list of pairs, so depth is increased by 2. - if self.nesting_depth + 2 > MAX_NESTING_DEPTH { - self.add_diagnostic( - ParseErrors::ExpressionStackDepthTooDeep, - token.span.clone(), - )?; - // Do not try to continue, exit cleanly now to avoid a stack overflow. - self.skip_to_end(); - return Ok(None); - } - - match self.open_tuple(token)? { - SetupTupleResult::OpenTuple(open_tuple) => { - self.nesting_depth += 2; - parse_stack.push(ParserStackElement::OpenTuple(open_tuple)); - // open the tuple on the parse_stack, and then continue to the next token - continue; - } - SetupTupleResult::Closed(closed_tuple) => Some(closed_tuple), - } - } - Token::Int(val_string) => { - let mut expr = match val_string.parse::() { - Ok(val) => PreSymbolicExpression::atom_value(Value::Int(val)), - Err(_) => { - self.add_diagnostic( - ParseErrors::FailedParsingIntValue(val_string.clone()), - token.span.clone(), - )?; - PreSymbolicExpression::placeholder(token.token.reproduce()) - } - }; - expr.copy_span(&token.span); - Some(expr) - } - Token::Uint(val_string) => { - let mut expr = match val_string.parse::() { - Ok(val) => PreSymbolicExpression::atom_value(Value::UInt(val)), - Err(_) => { - self.add_diagnostic( - ParseErrors::FailedParsingUIntValue(val_string.clone()), - token.span.clone(), - )?; - PreSymbolicExpression::placeholder(token.token.reproduce()) - } - }; - expr.copy_span(&token.span); - Some(expr) - } - Token::AsciiString(val) => { - let mut expr = - match Value::string_ascii_from_bytes(val.clone().into_bytes()) { - Ok(s) => PreSymbolicExpression::atom_value(s), - Err(_) => { - self.add_diagnostic( - ParseErrors::IllegalASCIIString(val.clone()), - token.span.clone(), - )?; - PreSymbolicExpression::placeholder(token.token.reproduce()) - } - }; - expr.copy_span(&token.span); - Some(expr) - } - Token::Utf8String(s) => { - let data: Vec> = s - .chars() - .map(|ch| { - let mut bytes = vec![0; ch.len_utf8()]; - ch.encode_utf8(&mut bytes); - bytes - }) - .collect(); - let val = - Value::Sequence(SequenceData::String(CharType::UTF8(UTF8Data { - data, - }))); - let mut expr = PreSymbolicExpression::atom_value(val); - expr.copy_span(&token.span); - Some(expr) - } - Token::Ident(name) => { - let mut expr = if name.len() > MAX_STRING_LEN { - self.add_diagnostic( - ParseErrors::NameTooLong(name.clone()), - token.span.clone(), - )?; - PreSymbolicExpression::placeholder(token.token.reproduce()) - } else { - match ClarityName::try_from(name.clone()) { - Ok(name) => PreSymbolicExpression::atom(name), - Err(_) => { - self.add_diagnostic( - ParseErrors::IllegalClarityName(name.clone()), - token.span.clone(), - )?; - PreSymbolicExpression::placeholder(token.token.reproduce()) - } - } - }; - expr.copy_span(&token.span); - Some(expr) - } - Token::TraitIdent(name) => { - let mut expr = if name.len() > MAX_STRING_LEN { - self.add_diagnostic( - ParseErrors::NameTooLong(name.clone()), - token.span.clone(), - )?; - PreSymbolicExpression::placeholder(token.token.reproduce()) - } else { - match ClarityName::try_from(name.clone()) { - Ok(name) => PreSymbolicExpression::trait_reference(name), - Err(_) => { - self.add_diagnostic( - ParseErrors::IllegalTraitName(name.clone()), - token.span.clone(), - )?; - PreSymbolicExpression::placeholder(token.token.reproduce()) - } - } - }; - expr.copy_span(&token.span); - Some(expr) - } - Token::Bytes(data) => { - let mut expr = match hex_bytes(data) { - Ok(bytes) => match Value::buff_from(bytes) { - Ok(value) => PreSymbolicExpression::atom_value(value), - _ => { - self.add_diagnostic( - ParseErrors::InvalidBuffer, - token.span.clone(), - )?; - PreSymbolicExpression::placeholder(token.token.reproduce()) - } - }, - Err(_) => { - self.add_diagnostic( - ParseErrors::InvalidBuffer, - token.span.clone(), - )?; - PreSymbolicExpression::placeholder(token.token.reproduce()) - } - }; - expr.copy_span(&token.span); - Some(expr) - } - Token::Principal(addr) => { - let expr = self.read_principal(addr.clone(), token.span.clone())?; - Some(expr) - } - Token::Dot => { - let expr = self.read_sugared_principal(token.span.clone())?; - Some(expr) - } - Token::Plus - | Token::Minus - | Token::Multiply - | Token::Divide - | Token::Less - | Token::LessEqual - | Token::Greater - | Token::GreaterEqual => { - let name = ClarityName::try_from(token.token.to_string()) - .map_err(|_| ParseErrors::InterpreterFailure)?; - let mut e = PreSymbolicExpression::atom(name); - e.copy_span(&token.span); - Some(e) - } - Token::Placeholder(s) => { - let mut e = PreSymbolicExpression::placeholder(s.to_string()); - e.copy_span(&token.span); - Some(e) - } - Token::Comment(comment) => { - let mut e = PreSymbolicExpression::comment(comment.to_string()); - e.copy_span(&token.span); - Some(e) - } - Token::Eof => None, - _ => None, // Other tokens should be dealt with by the caller - } - } - }; - - // Here we check if we have any open nodes (tuples or lists) that `node` - // should be a component of. If so, add `node` to the open one and then iterate - // If there are no open nodes, then return `node` immediately. - - let mut new_node_received = true; - while new_node_received { - new_node_received = false; - - match parse_stack.as_mut_slice().last_mut() { - Some(ref mut open_list) => { - let nesting_adjustment = match open_list { - ParserStackElement::OpenList { .. } => 1, - ParserStackElement::OpenTuple(_) => 2, - }; - if let Some(finished_list) = - self.handle_open_node(open_list, node.take())? - { - new_node_received = true; - node.replace(finished_list); - parse_stack.pop(); - self.nesting_depth -= nesting_adjustment; - } - } - None => { - return Ok(node); - } - } - } - } - - // This should be unreachable -- the loop only exits if there are no open tuples or lists, - // but the last line of the loop also checks if there are no open tuples or lists and if not, - // returns the node. - Ok(None) - } - - pub fn parse_node_or_eof(&mut self) -> ParseResult> { - loop { - match self.parse_node()? { - Some(node) => break Ok(Some(node)), - None => { - let token = self.tokens[self.next_token - 1].clone(); - match token.token { - Token::Eof => break Ok(None), - _ => { - // Report an error, then skip this token - self.add_diagnostic( - ParseErrors::UnexpectedToken(token.token), - token.span, - )?; - } - } - } - } - } - } - - pub fn parse(&mut self) -> ParseResult> { - let mut nodes = vec![]; - - while let Some(node) = self.parse_node_or_eof()? { - nodes.push(node) - } - Ok(nodes) - } -} - -pub fn parse(input: &str) -> ParseResult> { - let mut parser = match Parser::new(input, true) { - Ok(parser) => parser, - Err(e) => return Err(ParseError::new(e)), - }; - let stmts = parser.parse()?; - if parser.success { - Ok(stmts) - } else { - let err = parser.diagnostics.remove(0); - Err(ParseError::new(err.e)) - } -} - -#[allow(clippy::unwrap_used)] -pub fn parse_collect_diagnostics( - input: &str, -) -> (Vec, Vec, bool) { - // When not in fail_fast mode, Parser::new always returns Ok. - let mut parser = Parser::new(input, false).unwrap(); - - // When not in fail_fast mode, Parser::parse always returns Ok. - let stmts = parser.parse().unwrap(); - let diagnostics = parser - .diagnostics - .iter() - .map(|e| Diagnostic { - level: e.e.level(), - message: e.e.message(), - spans: vec![e.span.clone()], - suggestion: None, - }) - .collect(); - (stmts, diagnostics, parser.success) -} - -#[cfg(test)] -#[cfg(feature = "developer-mode")] -mod tests { - use self::lexer::error::LexerError; - use super::*; - use crate::vm::diagnostic::Level; - use crate::vm::representations::PreSymbolicExpressionType; - use crate::vm::types::{ASCIIData, CharType, PrincipalData, SequenceData}; - - #[test] - fn test_parse_int() { - let (stmts, diagnostics, success) = parse_collect_diagnostics(" 123"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - if let Some(Value::Int(123)) = stmts[0].match_atom_value() { - } else { - panic!("failed to parse int value"); - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 5, - end_line: 1, - end_column: 7 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics(" -123"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - if let Some(Value::Int(-123)) = stmts[0].match_atom_value() { - } else { - panic!("failed to parse negative int value"); - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 5, - end_line: 1, - end_column: 8 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("42g "); - assert!(!success); - assert_eq!(stmts.len(), 1); - if let Some("42g") = stmts[0].match_placeholder() { - } else { - panic!("failed to parse int value with error"); - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 3 - } - ); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!( - diagnostics[0].message, - "invalid character, 'g', in int literal".to_string() - ); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 3, - end_line: 1, - end_column: 3 - } - ); - - // Exceed the range of a 128-bit integer. - let (stmts, diagnostics, success) = - parse_collect_diagnostics("340282366920938463463374607431768211456 "); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].match_placeholder().unwrap(), - "340282366920938463463374607431768211456" - ); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 39 - } - ); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!( - diagnostics[0].message, - "Failed to parse int literal '340282366920938463463374607431768211456'".to_string() - ); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 39 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("0000000000123"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - if let Some(Value::Int(123)) = stmts[0].match_atom_value() { - } else { - panic!("failed to parse int value"); - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 13 - } - ); - } - - #[test] - fn test_parse_uint() { - let (stmts, diagnostics, success) = parse_collect_diagnostics(" u98"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - if let Some(Value::UInt(98)) = stmts[0].match_atom_value() { - } else { - panic!("failed to parse uint value"); - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 5, - end_line: 1, - end_column: 7 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("\n u2*3"); - assert!(!success); - assert_eq!(stmts.len(), 1); - if let Some("u2*3") = stmts[0].match_placeholder() { - } else { - panic!("failed to parse uint value with error"); - } - assert_eq!( - stmts[0].span, - Span { - start_line: 2, - start_column: 2, - end_line: 2, - end_column: 5 - } - ); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!( - diagnostics[0].message, - "invalid character, '*', in uint literal".to_string() - ); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 2, - start_column: 4, - end_line: 2, - end_column: 5 - } - ); - - // Exceed the range of a 128-bit unsigned integer. - let (stmts, diagnostics, success) = - parse_collect_diagnostics("u340282366920938463463374607431768211457 "); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].match_placeholder().unwrap(), - "u340282366920938463463374607431768211457" - ); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 40 - } - ); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!( - diagnostics[0].message, - "Failed to parse uint literal 'u340282366920938463463374607431768211457'".to_string() - ); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 40 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("u00000000123"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - if let Some(Value::UInt(123)) = stmts[0].match_atom_value() { - } else { - panic!("failed to parse int value"); - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 12 - } - ); - } - - #[test] - fn test_parse_ascii_string() { - let (stmts, diagnostics, success) = parse_collect_diagnostics("\"new\\nline\""); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - if let Some(v) = stmts[0].match_atom_value() { - assert_eq!(v.clone().expect_ascii().unwrap(), "new\nline"); - } else { - panic!("failed to parse ascii string"); - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 11 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("\"👎 nope\""); - assert!(!success); - assert_eq!(stmts.len(), 1); - if let Some(s) = stmts[0].match_placeholder() { - assert_eq!(s, "\"👎 nope\""); - } else { - panic!("failed to parse ascii value with error"); - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 8 - } - ); - assert_eq!(diagnostics.len(), 2); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!( - diagnostics[0].message, - "illegal non-ASCII character, '👎'".to_string() - ); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 2 - } - ); - assert_eq!(diagnostics[1].level, Level::Error); - assert_eq!( - diagnostics[1].message, - "invalid character, '👎', in string literal".to_string() - ); - assert_eq!( - diagnostics[1].spans[0], - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 2 - } - ); - } - - #[test] - fn test_parse_utf8_string() { - let (stmts, diagnostics, success) = parse_collect_diagnostics("u\"new\\nline\\u{1f601}\""); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - if let Some(v) = stmts[0].match_atom_value() { - let s = match v { - Value::Sequence(SequenceData::String(CharType::UTF8(data))) => format!("{}", data), - _ => panic!("failed to parse UTF8 string "), - }; - assert_eq!(s, "u\"new\\nline\\u{f09f9881}\""); - } else { - panic!("failed to parse utf8 string value"); - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 21 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("u\"\\m nope\""); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!(stmts[0].match_placeholder().unwrap(), "u\"\\m nope\""); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 10 - } - ); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!( - diagnostics[0].message, - "unknown escape character, 'm'".to_string() - ); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 3, - end_line: 1, - end_column: 4 - } - ); - } - - #[test] - fn test_parse_identifier() { - let (stmts, diagnostics, success) = parse_collect_diagnostics("foo-bar"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - if let Some(v) = stmts[0].match_atom() { - assert_eq!(v.as_str(), "foo-bar"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 7 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("veryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylong"); - assert!(!success); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!( - diagnostics[0].message, - "illegal name (too long), 'veryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylong'".to_string() - ); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 132 - } - ); - } - - #[test] - fn test_parse_list() { - let (stmts, diagnostics, success) = parse_collect_diagnostics("(foo 1 u3 \"hel\tlo\")"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 19 - } - ); - let exprs = stmts[0].match_list().unwrap(); - match &exprs[0].pre_expr { - PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), "foo"), - _ => panic!("expected atom 'foo'"), - } - assert_eq!( - exprs[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - match exprs[1].pre_expr { - PreSymbolicExpressionType::AtomValue(Value::Int(1)) => (), - _ => panic!("expected Value(1)"), - } - assert_eq!( - exprs[1].span, - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 6 - } - ); - match exprs[2].pre_expr { - PreSymbolicExpressionType::AtomValue(Value::UInt(3)) => (), - _ => panic!("expected Value(u3)"), - } - assert_eq!( - exprs[2].span, - Span { - start_line: 1, - start_column: 8, - end_line: 1, - end_column: 9 - } - ); - match &exprs[3].pre_expr { - PreSymbolicExpressionType::AtomValue(Value::Sequence(SequenceData::String( - CharType::ASCII(ASCIIData { data: s }), - ))) => assert_eq!(s, "hel\tlo".as_bytes()), - _ => panic!("expected Value(\"hel\tlo\")"), - } - assert_eq!( - exprs[3].span, - Span { - start_line: 1, - start_column: 11, - end_line: 1, - end_column: 18 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("(1 2 3"); - assert!(!success); - assert_eq!(stmts.len(), 1); - let exprs = stmts[0].match_list().unwrap(); - match exprs[0].pre_expr { - PreSymbolicExpressionType::AtomValue(Value::Int(1)) => (), - _ => panic!("expected Value(1)"), - } - assert_eq!( - exprs[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 2 - } - ); - match exprs[1].pre_expr { - PreSymbolicExpressionType::AtomValue(Value::Int(2)) => (), - _ => panic!("expected Value(2)"), - } - assert_eq!( - exprs[1].span, - Span { - start_line: 1, - start_column: 4, - end_line: 1, - end_column: 4 - } - ); - match exprs[2].pre_expr { - PreSymbolicExpressionType::AtomValue(Value::Int(3)) => (), - _ => panic!("expected Value(3)"), - } - assert_eq!( - exprs[2].span, - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 6 - } - ); - assert_eq!(diagnostics.len(), 2); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!(diagnostics[0].message, "expected closing ')'".to_string()); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 7, - end_line: 1, - end_column: 7 - } - ); - - assert_eq!(diagnostics[1].level, Level::Note); - assert_eq!(diagnostics[1].message, "to match this '('".to_string()); - assert_eq!( - diagnostics[1].spans[0], - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 1 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("(1 2 3\n )"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 2, - end_column: 2 - } - ); - let exprs = stmts[0].match_list().unwrap(); - assert_eq!(exprs.len(), 3); - } - - #[test] - fn test_parse_list_comment() { - let (stmts, diagnostics, success) = parse_collect_diagnostics( - "(foo ;; first comment\n bar\n ;; second comment\n baz;; no space\n)", - ); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 5, - end_column: 1 - } - ); - let exprs = stmts[0].match_list().unwrap(); - assert_eq!(exprs.len(), 6); - assert_eq!(exprs[0].match_atom().unwrap().as_str(), "foo"); - assert_eq!( - exprs[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - assert_eq!(exprs[1].match_comment().unwrap(), "first comment"); - assert_eq!( - exprs[1].span, - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 21 - } - ); - assert_eq!(exprs[2].match_atom().unwrap().as_str(), "bar"); - assert_eq!( - exprs[2].span, - Span { - start_line: 2, - start_column: 3, - end_line: 2, - end_column: 5 - } - ); - assert_eq!(exprs[3].match_comment().unwrap(), "second comment"); - assert_eq!( - exprs[3].span, - Span { - start_line: 3, - start_column: 3, - end_line: 3, - end_column: 19 - } - ); - assert_eq!(exprs[4].match_atom().unwrap().as_str(), "baz"); - assert_eq!( - exprs[4].span, - Span { - start_line: 4, - start_column: 3, - end_line: 4, - end_column: 5 - } - ); - assert_eq!(exprs[5].match_comment().unwrap(), "no space"); - assert_eq!( - exprs[5].span, - Span { - start_line: 4, - start_column: 6, - end_line: 4, - end_column: 16 - } - ); - } - - #[test] - fn test_parse_tuple() { - let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo: bar}"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 10 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 2); - if let Some(name) = list[0].match_atom() { - assert_eq!(name.as_str(), "foo"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - if let Some(name) = list[1].match_atom() { - assert_eq!(name.as_str(), "bar"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[1].span, - Span { - start_line: 1, - start_column: 7, - end_line: 1, - end_column: 9, - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo: bar,}"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 11 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 2); - if let Some(name) = list[0].match_atom() { - assert_eq!(name.as_str(), "foo"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - if let Some(name) = list[1].match_atom() { - assert_eq!(name.as_str(), "bar"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[1].span, - Span { - start_line: 1, - start_column: 7, - end_line: 1, - end_column: 9, - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:bar}"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 9 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 2); - if let Some(name) = list[0].match_atom() { - assert_eq!(name.as_str(), "foo"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - if let Some(name) = list[1].match_atom() { - assert_eq!(name.as_str(), "bar"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[1].span, - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 8, - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:bar,}"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 10 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 2); - if let Some(name) = list[0].match_atom() { - assert_eq!(name.as_str(), "foo"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - if let Some(name) = list[1].match_atom() { - assert_eq!(name.as_str(), "bar"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[1].span, - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 8, - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:bar }"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 10 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 2); - if let Some(name) = list[0].match_atom() { - assert_eq!(name.as_str(), "foo"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - if let Some(name) = list[1].match_atom() { - assert_eq!(name.as_str(), "bar"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[1].span, - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 8, - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:bar ,}"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 11 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 2); - if let Some(name) = list[0].match_atom() { - assert_eq!(name.as_str(), "foo"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - if let Some(name) = list[1].match_atom() { - assert_eq!(name.as_str(), "bar"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[1].span, - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 8, - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:bar,baz:goo}"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 17 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 4); - if let Some(name) = list[0].match_atom() { - assert_eq!(name.as_str(), "foo"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - if let Some(name) = list[1].match_atom() { - assert_eq!(name.as_str(), "bar"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[1].span, - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 8, - } - ); - if let Some(name) = list[2].match_atom() { - assert_eq!(name.as_str(), "baz"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[2].span, - Span { - start_line: 1, - start_column: 10, - end_line: 1, - end_column: 12, - } - ); - if let Some(name) = list[3].match_atom() { - assert_eq!(name.as_str(), "goo"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[3].span, - Span { - start_line: 1, - start_column: 14, - end_line: 1, - end_column: 16, - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("{1: u2, 3: u4}"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 14 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 4); - match list[0].match_atom_value() { - Some(Value::Int(1)) => (), - _ => panic!("failed to parse tuple"), - } - match list[1].match_atom_value() { - Some(Value::UInt(2)) => (), - _ => panic!("failed to parse tuple"), - } - match list[2].match_atom_value() { - Some(Value::Int(3)) => (), - _ => panic!("failed to parse tuple"), - } - match list[3].match_atom_value() { - Some(Value::UInt(4)) => (), - _ => panic!("failed to parse tuple"), - } - - let (stmts, diagnostics, success) = parse_collect_diagnostics("{, foo: bar}"); - assert!(!success); - assert_eq!(stmts.len(), 1); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 2); - if let Some(name) = list[0].match_atom() { - assert_eq!(name.as_str(), "foo"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[0].span, - Span { - start_line: 1, - start_column: 4, - end_line: 1, - end_column: 6 - } - ); - if let Some(name) = list[1].match_atom() { - assert_eq!(name.as_str(), "bar"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[1].span, - Span { - start_line: 1, - start_column: 9, - end_line: 1, - end_column: 11 - } - ); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!(diagnostics[0].message, "unexpected ','".to_string()); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 2 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("{ "); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 3 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert!(list.is_empty()); - assert_eq!(diagnostics.len(), 2); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!(diagnostics[0].message, "expected closing '}'".to_string()); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 4, - end_line: 1, - end_column: 4 - } - ); - assert_eq!(diagnostics[1].level, Level::Note); - assert_eq!(diagnostics[1].message, "to match this '{'".to_string()); - assert_eq!( - diagnostics[1].spans[0], - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 1 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:"); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 5 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 2); - if let Some(name) = list[0].match_atom() { - assert_eq!(name.as_str(), "foo"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - assert_eq!(list[1].match_placeholder().unwrap(), ""); - assert_eq!( - list[1].span, - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 6 - } - ); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!( - diagnostics[0].message, - "expected value expression for tuple".to_string() - ); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 5, - end_line: 1, - end_column: 5 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:bar"); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 8 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 2); - if let Some(name) = list[0].match_atom() { - assert_eq!(name.as_str(), "foo"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - if let Some(name) = list[1].match_atom() { - assert_eq!(name.as_str(), "bar"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[1].span, - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 8 - } - ); - assert_eq!(diagnostics.len(), 2); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!(diagnostics[0].message, "expected closing '}'".to_string()); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 9, - end_line: 1, - end_column: 9 - } - ); - assert_eq!(diagnostics[1].level, Level::Note); - assert_eq!(diagnostics[1].message, "to match this '{'".to_string()); - assert_eq!( - diagnostics[1].spans[0], - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 1 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:bar boo:far}"); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 17 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 4); - if let Some(name) = list[2].match_atom() { - assert_eq!(name.as_str(), "boo"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[2].span, - Span { - start_line: 1, - start_column: 10, - end_line: 1, - end_column: 12 - } - ); - if let Some(name) = list[3].match_atom() { - assert_eq!(name.as_str(), "far"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[3].span, - Span { - start_line: 1, - start_column: 14, - end_line: 1, - end_column: 16 - } - ); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!( - diagnostics[0].message, - "expected ',' separating key-value pairs in tuple".to_string() - ); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 10, - end_line: 1, - end_column: 12 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo bar}"); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 9 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 2); - if let Some(name) = list[0].match_atom() { - assert_eq!(name.as_str(), "foo"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - if let Some(name) = list[1].match_atom() { - assert_eq!(name.as_str(), "bar"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[1].span, - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 8 - } - ); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!( - diagnostics[0].message, - "expected ':' after key in tuple".to_string() - ); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 8 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo"); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 4 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 2); - if let Some(name) = list[0].match_atom() { - assert_eq!(name.as_str(), "foo"); - } else { - panic!("failed to parse identifier"); - } - assert_eq!( - list[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - assert_eq!(list[1].match_placeholder().unwrap(), ""); - assert_eq!( - list[1].span, - Span { - start_line: 1, - start_column: 5, - end_line: 1, - end_column: 5 - } - ); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!( - diagnostics[0].message, - "expected ':' after key in tuple".to_string() - ); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 5, - end_line: 1, - end_column: 5 - } - ); - } - - #[test] - fn test_parse_tuple_comments() { - let (stmts, diagnostics, success) = parse_collect_diagnostics("{ ;; before the key\n foo ;; before the colon\n : ;; after the colon\n ;; comment on newline\n bar ;; before comma\n ,\n ;; after comma\n baz : qux ;; before closing\n}"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 9, - end_column: 1 - } - ); - let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { - PreSymbolicExpressionType::Tuple(ref list) => list, - _ => panic!("failed to parse tuple"), - }; - assert_eq!(list.len(), 11); - assert_eq!(list[0].match_comment().unwrap(), "before the key"); - assert_eq!( - list[0].span, - Span { - start_line: 1, - start_column: 3, - end_line: 1, - end_column: 19, - } - ); - assert_eq!(list[1].match_atom().unwrap().as_str(), "foo"); - assert_eq!( - list[1].span, - Span { - start_line: 2, - start_column: 3, - end_line: 2, - end_column: 5 - } - ); - assert_eq!(list[2].match_comment().unwrap(), "before the colon"); - assert_eq!( - list[2].span, - Span { - start_line: 2, - start_column: 7, - end_line: 2, - end_column: 25, - } - ); - assert_eq!(list[3].match_comment().unwrap(), "after the colon"); - assert_eq!( - list[3].span, - Span { - start_line: 3, - start_column: 5, - end_line: 3, - end_column: 22, - } - ); - assert_eq!(list[4].match_comment().unwrap(), "comment on newline"); - assert_eq!( - list[4].span, - Span { - start_line: 4, - start_column: 3, - end_line: 4, - end_column: 23, - } - ); - assert_eq!(list[5].match_atom().unwrap().as_str(), "bar"); - assert_eq!( - list[5].span, - Span { - start_line: 5, - start_column: 3, - end_line: 5, - end_column: 5 - } - ); - assert_eq!(list[6].match_comment().unwrap(), "before comma"); - assert_eq!( - list[6].span, - Span { - start_line: 5, - start_column: 7, - end_line: 5, - end_column: 21, - } - ); - assert_eq!(list[7].match_comment().unwrap(), "after comma"); - assert_eq!( - list[7].span, - Span { - start_line: 7, - start_column: 3, - end_line: 7, - end_column: 16, - } - ); - assert_eq!(list[8].match_atom().unwrap().as_str(), "baz"); - assert_eq!( - list[8].span, - Span { - start_line: 8, - start_column: 2, - end_line: 8, - end_column: 4 - } - ); - assert_eq!(list[9].match_atom().unwrap().as_str(), "qux"); - assert_eq!( - list[9].span, - Span { - start_line: 8, - start_column: 8, - end_line: 8, - end_column: 10 - } - ); - assert_eq!(list[10].match_comment().unwrap(), "before closing"); - assert_eq!( - list[10].span, - Span { - start_line: 8, - start_column: 12, - end_line: 8, - end_column: 28, - } - ); - } - - #[test] - fn test_parse_bad() { - let (stmts, diagnostics, success) = parse_collect_diagnostics("(1, 3)"); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 6 - } - ); - let exprs = stmts[0].match_list().unwrap(); - assert_eq!(exprs.len(), 2); - match exprs[0].pre_expr { - PreSymbolicExpressionType::AtomValue(Value::Int(1)) => (), - _ => panic!("expected Value(1)"), - } - match exprs[1].pre_expr { - PreSymbolicExpressionType::AtomValue(Value::Int(3)) => (), - _ => panic!("expected Value(3)"), - } - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].message, "unexpected ','"); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 3, - end_line: 1, - end_column: 3 - } - ); - } - - #[test] - fn test_parse_principal() { - let (stmts, diagnostics, success) = - parse_collect_diagnostics("'ST000000000000000000002AMW42H"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 30 - } - ); - if let Some(Value::Principal(data)) = stmts[0].match_atom_value() { - match data { - PrincipalData::Standard(data) => { - assert_eq!(data.to_address(), "ST000000000000000000002AMW42H") - } - _ => panic!("failed to parse principal"), - } - } - - let (stmts, diagnostics, success) = parse_collect_diagnostics("' u42"); - assert!(!success); - assert_eq!(stmts.len(), 2); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 1 - } - ); - assert_eq!(stmts[0].match_placeholder().unwrap(), "'"); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].message, "invalid principal literal"); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 1 - } - ); - match stmts[1].match_atom_value() { - Some(Value::UInt(42)) => (), - _ => panic!("failed to parse uint after principal"), - } - - let (stmts, diagnostics, success) = - parse_collect_diagnostics("'ST000000000000000000002AMW42H.silly-goose"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 42 - } - ); - if let Some(Value::Principal(data)) = stmts[0].match_atom_value() { - match data { - PrincipalData::Contract(data) => { - assert_eq!( - data.to_string(), - "ST000000000000000000002AMW42H.silly-goose" - ) - } - _ => panic!("failed to parse principal"), - } - } - assert!(diagnostics.is_empty()); - - let (stmts, diagnostics, success) = - parse_collect_diagnostics("'ST000000000000000000002AMW42H.123"); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].match_placeholder().unwrap(), - "'ST000000000000000000002AMW42H.123" - ); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].message, "expected contract identifier"); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 32, - end_line: 1, - end_column: 34 - } - ); - - let (stmts, diagnostics, success) = - parse_collect_diagnostics("'ST000000000000000000002AMW42H.illegal?name "); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].match_placeholder().unwrap(), - "'ST000000000000000000002AMW42H.illegal?name" - ); - assert_eq!(diagnostics.len(), 1); - assert_eq!( - diagnostics[0].message, - "Illegal contract name: 'illegal?name'" - ); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 32, - end_line: 1, - end_column: 43 - } - ); - - let (stmts, diagnostics, success) = - parse_collect_diagnostics("'ST000000000000000000002AMW42H.this-name-is-way-too-many-characters-to-be-a-legal-contract-name "); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!(stmts[0].match_placeholder().unwrap(), "'ST000000000000000000002AMW42H.this-name-is-way-too-many-characters-to-be-a-legal-contract-name"); - assert_eq!(diagnostics.len(), 1); - assert_eq!( - diagnostics[0].message, - "contract name 'this-name-is-way-too-many-characters-to-be-a-legal-contract-name' is too long" - ); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 32, - end_line: 1, - end_column: 95 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics(".fancy_pants"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 12 - } - ); - match &stmts[0].pre_expr { - PreSymbolicExpressionType::SugaredContractIdentifier(name) => { - assert_eq!(name.as_str(), "fancy_pants") - } - _ => panic!("failed to parse sugared contract identifier"), - } - assert!(diagnostics.is_empty()); - - let (stmts, diagnostics, success) = parse_collect_diagnostics(".123"); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!(stmts[0].match_placeholder().unwrap(), ".123"); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].message, "expected contract identifier"); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics(".illegal?name "); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!(stmts[0].match_placeholder().unwrap(), ".illegal?name"); - assert_eq!(diagnostics.len(), 1); - assert_eq!( - diagnostics[0].message, - "Illegal contract name: 'illegal?name'" - ); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 13 - } - ); - - let (stmts, diagnostics, success) = - parse_collect_diagnostics("'ST000000000000000000002AMW42H.foo.bar"); - assert!(success); - assert_eq!(stmts.len(), 1); - match &stmts[0].pre_expr { - PreSymbolicExpressionType::FieldIdentifier(trait_id) => { - assert_eq!( - format!("{}", trait_id), - "ST000000000000000000002AMW42H.foo.bar" - ); - } - _ => panic!("failed to parse field identifier"), - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 38 - } - ); - assert!(diagnostics.is_empty()); - - let (stmts, diagnostics, success) = - parse_collect_diagnostics("'ST000000000000000000002AMW42H.foo.123"); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].match_placeholder().unwrap(), - "'ST000000000000000000002AMW42H.foo.123" - ); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].message, "expected trait identifier"); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 36, - end_line: 1, - end_column: 38 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics(".foo.bar"); - assert!(success); - assert_eq!(stmts.len(), 1); - match &stmts[0].pre_expr { - PreSymbolicExpressionType::SugaredFieldIdentifier(contract_name, trait_name) => { - assert_eq!(contract_name.as_str(), "foo"); - assert_eq!(trait_name.as_str(), "bar"); - } - _ => panic!("failed to parse sugared trait identifier"), - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 8 - } - ); - assert!(diagnostics.is_empty()); - - let (stmts, diagnostics, success) = parse_collect_diagnostics(".foo.123"); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!(stmts[0].match_placeholder().unwrap(), ".foo.123"); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].message, "expected trait identifier"); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 8 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics( - ".this-name-is-way-too-many-characters-to-be-a-legal-contract-name", - ); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!( - stmts[0].match_placeholder().unwrap(), - ".this-name-is-way-too-many-characters-to-be-a-legal-contract-name" - ); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].message, "contract name 'this-name-is-way-too-many-characters-to-be-a-legal-contract-name' is too long"); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 65 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics(".foo.veryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylong"); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!(stmts[0].match_placeholder().unwrap(),".foo.veryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylong"); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].message, "illegal name (too long), 'veryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylong'"); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 137 - } - ); - } - - #[test] - fn test_parse_trait_reference() { - let (stmts, diagnostics, success) = parse_collect_diagnostics(""); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - if let Some(name) = stmts[0].match_trait_reference() { - assert_eq!(name.as_str(), "foo-bar"); - } else { - panic!("failed to parse trait reference"); - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 9 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("\n\t'"); - assert_eq!(diagnostics[1].level, Level::Note); - assert_eq!(diagnostics[1].message, "to match this '<'".to_string()); - assert_eq!( - diagnostics[1].spans[0], - Span { - start_line: 2, - start_column: 2, - end_line: 2, - end_column: 2 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("<123>"); - assert!(!success); - assert_eq!(stmts.len(), 2); - assert_eq!(diagnostics.len(), 2); - assert_eq!(diagnostics[0].message, "expected separator"); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 2 - } - ); - assert_eq!( - diagnostics[1].message, - "invalid character, '>', in int literal" - ); - assert_eq!( - diagnostics[1].spans[0], - Span { - start_line: 1, - start_column: 5, - end_line: 1, - end_column: 5 - } - ); - if let Some(name) = stmts[0].match_atom() { - assert_eq!(name.as_str(), "<"); - } else { - panic!("failed to parse invalid trait reference"); - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 1 - } - ); - if let Some(s) = stmts[1].match_placeholder() { - assert_eq!(s, "123>"); - } else { - panic!("failed to parse trait reference"); - } - assert_eq!( - stmts[1].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 5 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("<123 "); - assert!(!success); - assert_eq!(stmts.len(), 2); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].message, "expected separator"); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 2 - } - ); - if let Some(name) = stmts[0].match_atom() { - assert_eq!(name.as_str(), "<"); - } else { - panic!("failed to parse invalid trait reference"); - } - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 1 - } - ); - match stmts[1].match_atom_value() { - Some(Value::Int(123)) => (), - _ => panic!("failed to parse int with errors"), - } - assert_eq!( - stmts[1].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 4 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics(""); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!(diagnostics.len(), 1); - assert_eq!(diagnostics[0].message, "illegal name (too long), 'veryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylong'"); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 134 - } - ); - } - - #[test] - fn test_parse_ops() { - let (stmts, diagnostics, success) = parse_collect_diagnostics("(+ 1 2)"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 7 - } - ); - let exprs = stmts[0].match_list().unwrap(); - match &exprs[0].pre_expr { - PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), "+"), - _ => panic!("expected atom '+'"), - } - assert_eq!( - exprs[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 2 - } - ); - match exprs[1].match_atom_value() { - Some(Value::Int(1)) => (), - _ => panic!("failed to parse int in list"), - } - match exprs[2].match_atom_value() { - Some(Value::Int(2)) => (), - _ => panic!("failed to parse int in list"), - } - - let (stmts, diagnostics, success) = parse_collect_diagnostics("(- 1 2)"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - let exprs = stmts[0].match_list().unwrap(); - match &exprs[0].pre_expr { - PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), "-"), - _ => panic!("expected atom '-'"), - } - assert_eq!( - exprs[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 2 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("(* 1 2)"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - let exprs = stmts[0].match_list().unwrap(); - match &exprs[0].pre_expr { - PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), "*"), - _ => panic!("expected atom '*'"), - } - assert_eq!( - exprs[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 2 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("(/ 1 2)"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - let exprs = stmts[0].match_list().unwrap(); - match &exprs[0].pre_expr { - PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), "/"), - _ => panic!("expected atom '/'"), - } - assert_eq!( - exprs[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 2 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("(< 1 2)"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - let exprs = stmts[0].match_list().unwrap(); - match &exprs[0].pre_expr { - PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), "<"), - _ => panic!("expected atom '<'"), - } - assert_eq!( - exprs[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 2 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("(<= 1 2)"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - let exprs = stmts[0].match_list().unwrap(); - match &exprs[0].pre_expr { - PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), "<="), - _ => panic!("expected atom '<='"), - } - assert_eq!( - exprs[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 3 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("(> 1 2)"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - let exprs = stmts[0].match_list().unwrap(); - match &exprs[0].pre_expr { - PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), ">"), - _ => panic!("expected atom '>'"), - } - assert_eq!( - exprs[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 2 - } - ); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("(>= 1 2)"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - let exprs = stmts[0].match_list().unwrap(); - match &exprs[0].pre_expr { - PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), ">="), - _ => panic!("expected atom '>='"), - } - assert_eq!( - exprs[0].span, - Span { - start_line: 1, - start_column: 2, - end_line: 1, - end_column: 3 - } - ); - } - - #[test] - fn test_parse_buffer() { - let (stmts, diagnostics, success) = parse_collect_diagnostics("0x1234"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - assert_eq!( - stmts[0].span, - Span { - start_line: 1, - start_column: 1, - end_line: 1, - end_column: 6 - } - ); - let val = stmts[0].match_atom_value().unwrap().clone(); - assert_eq!(val.expect_buff(2).unwrap(), vec![0x12, 0x34]); - } - - #[test] - fn test_parse_errors() { - let (stmts, diagnostics, success) = parse_collect_diagnostics("123 }"); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!(diagnostics.len(), 1); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 5, - end_line: 1, - end_column: 5 - } - ); - assert_eq!(diagnostics[0].message, "unexpected '}'"); - - let (stmts, diagnostics, success) = parse_collect_diagnostics("(foo))"); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!(diagnostics.len(), 1); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 6, - end_line: 1, - end_column: 6 - } - ); - assert_eq!(diagnostics[0].message, "unexpected ')'"); - } - - #[test] - fn test_lexer_diagnostics() { - let (stmts, diagnostics, success) = - parse_collect_diagnostics("(print \"newline\n in string\")"); - assert!(!success); - assert_eq!(stmts.len(), 1); - assert_eq!(diagnostics.len(), 3); - assert_eq!(diagnostics[0].message, "expected closing '\"'"); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 16, - end_line: 1, - end_column: 16 - } - ); - - assert_eq!(diagnostics[1].message, "to match this '\"'"); - assert_eq!(diagnostics[1].level, Level::Note); - assert_eq!( - diagnostics[1].spans[0], - Span { - start_line: 1, - start_column: 8, - end_line: 1, - end_column: 8 - } - ); - - // This last error is because it doesn't know what to do with the next line - assert_eq!( - diagnostics[2].message, - "invalid character, '\"', in identifier" - ); - } - - #[test] - fn test_consume_invalid_symbols() { - let (stmts, diagnostics, success) = - parse_collect_diagnostics(" # here is a python comment\n\n # and another\n(foo)"); - assert!(!success); - assert_eq!(stmts.len(), 10); - } - - #[test] - fn test_handle_comments() { - let (stmts, diagnostics, success) = - parse_collect_diagnostics(" ;; here is a comment\n\n ;; and another\n(foo)"); - assert!(success); - assert_eq!(stmts.len(), 3); - assert!(diagnostics.is_empty()); - assert_eq!(stmts[0].match_comment().unwrap(), "here is a comment"); - assert_eq!(stmts[1].match_comment().unwrap(), "and another"); - stmts[2].match_list().unwrap(); - } - - #[test] - fn test_comment_in_list() { - let (stmts, diagnostics, success) = parse_collect_diagnostics( - "(\n foo ;; comment after\n ;; comment on its own line\n bar\n)", - ); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - let exprs = stmts[0].match_list().unwrap(); - assert_eq!(exprs.len(), 4); - assert_eq!(exprs[0].match_atom().unwrap().as_str(), "foo"); - assert_eq!(exprs[1].match_comment().unwrap(), "comment after"); - assert_eq!(exprs[2].match_comment().unwrap(), "comment on its own line"); - assert_eq!(exprs[3].match_atom().unwrap().as_str(), "bar"); - } - - #[test] - fn test_comma_at_end() { - let (stmts, diagnostics, success) = parse_collect_diagnostics("{this: is, a:tuple,}"); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - - let (stmts, diagnostics, success) = parse_collect_diagnostics( - r#" -{ - and: so, - is: this, -}"#, - ); - assert!(success); - assert_eq!(stmts.len(), 1); - assert!(diagnostics.is_empty()); - } - - #[test] - fn test_missing_whitespace() { - let (stmts, diagnostics, success) = parse_collect_diagnostics("(foo(bar))"); - assert!(!success); - assert_eq!(stmts.len(), 1); - let exprs = stmts[0].match_list().unwrap(); - assert_eq!(exprs.len(), 2); - assert_eq!(diagnostics.len(), 1); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 5, - end_line: 1, - end_column: 9 - } - ); - assert_eq!( - diagnostics[0].message, - "expected whitespace before expression" - ); - } - - #[test] - fn test_parse_fail_fast() { - match parse("42g !ok") { - Ok(_) => panic!("fail_fast mode should have returned an error"), - Err(e) => assert_eq!(e.err, ParseErrors::Lexer(LexerError::InvalidCharInt('g'))), - } - } - - #[test] - fn test_empty_contract() { - let (stmts, diagnostics, success) = parse_collect_diagnostics(""); - assert!(success); - assert!(stmts.is_empty()); - assert!(diagnostics.is_empty()); - } - - #[test] - fn test_stack_depth() { - let stack_limit = - (AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1) as usize; - let exceeds_stack_depth_tuple = format!( - "{}u1 {}", - "{ a : ".repeat(stack_limit / 2 + 1), - "} ".repeat(stack_limit / 2 + 1) - ); - let exceeds_stack_depth_list = format!( - "{}u1 {}", - "(list ".repeat(stack_limit + 1), - ")".repeat(stack_limit + 1) - ); - - assert!(match parse(&exceeds_stack_depth_list).unwrap_err().err { - ParseErrors::ExpressionStackDepthTooDeep => true, - x => panic!("expected a stack depth too deep error, got {:?}", x), - }); - - let (stmts, diagnostics, success) = parse_collect_diagnostics(&exceeds_stack_depth_list); - assert!(!success); - assert!(!diagnostics.is_empty()); - assert_eq!( - diagnostics[0].message, - "AST has too deep of an expression nesting. The maximum stack depth is 64" - ); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 421, - end_line: 1, - end_column: 421 - } - ); - - assert!(match parse(&exceeds_stack_depth_tuple).unwrap_err().err { - ParseErrors::ExpressionStackDepthTooDeep => true, - x => panic!("expected a stack depth too deep error, got {:?}", x), - }); - - let (stmts, diagnostics, success) = parse_collect_diagnostics(&exceeds_stack_depth_tuple); - assert!(!success); - assert!(!diagnostics.is_empty()); - assert_eq!( - diagnostics[0].message, - "AST has too deep of an expression nesting. The maximum stack depth is 64" - ); - assert_eq!(diagnostics[0].level, Level::Error); - assert_eq!( - diagnostics[0].spans[0], - Span { - start_line: 1, - start_column: 211, - end_line: 1, - end_column: 211 - } - ); - } -} +#[cfg(feature = "vm")] +pub mod parser_impl; +#[cfg(feature = "vm")] +pub use parser_impl::*; diff --git a/clarity/src/vm/ast/parser/v2/parser_impl.rs b/clarity/src/vm/ast/parser/v2/parser_impl.rs new file mode 100644 index 0000000000..63f44b1429 --- /dev/null +++ b/clarity/src/vm/ast/parser/v2/parser_impl.rs @@ -0,0 +1,3611 @@ +use stacks_common::util::hash::hex_bytes; + +use crate::vm::ast::errors::{ParseError, ParseErrors, ParseResult, PlacedError}; +use crate::vm::ast::parser::v2::lexer::token::{PlacedToken, Token}; +use crate::vm::ast::parser::v2::lexer::Lexer; +use crate::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; +use crate::vm::diagnostic::{DiagnosableError, Diagnostic, Level}; +use crate::vm::representations::{ClarityName, ContractName, PreSymbolicExpression, Span}; +use crate::vm::types::{ + CharType, PrincipalData, QualifiedContractIdentifier, SequenceData, TraitIdentifier, UTF8Data, + Value, +}; +use crate::vm::MAX_CALL_STACK_DEPTH; + +pub struct Parser<'a> { + lexer: Lexer<'a>, + tokens: Vec, + next_token: usize, + diagnostics: Vec, + success: bool, + // `fail_fast` mode indicates that the parser should not report warnings + // and should exit on the first error. This is useful for parsing in the + // context of a stacks-node, while normal mode is useful for developers. + fail_fast: bool, + nesting_depth: u64, +} + +pub const MAX_STRING_LEN: usize = 128; +pub const MAX_CONTRACT_NAME_LEN: usize = 40; +pub const MAX_NESTING_DEPTH: u64 = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1; + +enum OpenTupleStatus { + /// The next thing to parse is a key + ParseKey, + /// The next thing to parse is a value + ParseValue, +} + +enum SetupTupleResult { + OpenTuple(OpenTuple), + Closed(PreSymbolicExpression), +} + +struct OpenTuple { + nodes: Vec, + span: Span, + /// Is the next node is expected to be a key or value? All of the preparatory work is done _before_ the parse loop tries to digest the next + /// node (i.e., whitespace ingestion and checking for commas) + expects: OpenTupleStatus, + /// This is the last peeked token before trying to parse a key or value node, used for + /// diagnostic reporting + diagnostic_token: PlacedToken, +} + +enum ParserStackElement { + OpenList { + nodes: Vec, + span: Span, + whitespace: bool, + }, + OpenTuple(OpenTuple), +} + +impl<'a> Parser<'a> { + pub fn new(input: &'a str, fail_fast: bool) -> Result { + let lexer = match Lexer::new(input, fail_fast) { + Ok(lexer) => lexer, + Err(e) => return Err(ParseErrors::Lexer(e)), + }; + let mut p = Self { + lexer, + tokens: vec![], + next_token: 0, + diagnostics: vec![], + success: true, + fail_fast, + nesting_depth: 0, + }; + + loop { + let token = match p.lexer.read_token() { + Ok(token) => token, + Err(e) => { + assert!( + fail_fast, + "Parser::read_token should not return an error when not in fail_fast mode" + ); + p.success = false; + return Err(ParseErrors::Lexer(e)); + } + }; + if token.token == Token::Eof { + p.tokens.push(token); + break; + } + p.tokens.push(token); + } + p.diagnostics = p + .lexer + .diagnostics + .iter() + .map(|lex_error| PlacedError { + e: ParseErrors::Lexer(lex_error.e.clone()), + span: lex_error.span.clone(), + }) + .collect(); + p.success = p.lexer.success; + Ok(p) + } + + fn add_diagnostic(&mut self, e: ParseErrors, span: Span) -> ParseResult<()> { + if self.fail_fast { + return Err(ParseError::new(e)); + } else { + if e.level() == Level::Error { + self.success = false; + } + self.diagnostics.push(PlacedError { e, span }); + } + Ok(()) + } + + fn next_token(&mut self) -> Option { + if self.next_token >= self.tokens.len() { + return None; + } + let token = self.tokens[self.next_token].clone(); + self.next_token += 1; + Some(token) + } + + fn peek_next_token(&mut self) -> PlacedToken { + if self.next_token >= self.tokens.len() { + PlacedToken { + span: Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 1, + }, + token: Token::Eof, + } + } else { + self.tokens[self.next_token].clone() + } + } + + /// Get a reference to the last processed token. If there is no last token, + /// raises an UnexpectedParserFailure. + fn peek_last_token(&self) -> ParseResult<&PlacedToken> { + if self.next_token == 0 { + return Err(ParseError::new(ParseErrors::UnexpectedParserFailure)); + } + self.tokens + .get(self.next_token - 1) + .ok_or_else(|| ParseError::new(ParseErrors::UnexpectedParserFailure)) + } + + fn skip_to_end(&mut self) { + self.next_token = self.tokens.len(); + } + + fn ignore_whitespace(&mut self) -> bool { + let mut found = false; + loop { + if self.next_token >= self.tokens.len() { + return found; + } + let token = &self.tokens[self.next_token]; + match &token.token { + Token::Whitespace => { + self.next_token += 1; + found = true; + } + _ => return found, + } + } + } + + fn ignore_whitespace_and_comments(&mut self) -> Vec { + let mut comments = Vec::new(); + loop { + if self.next_token >= self.tokens.len() { + return comments; + } + let token = &self.tokens[self.next_token]; + match &token.token { + Token::Whitespace => { + self.next_token += 1; + } + Token::Comment(comment) => { + let mut comment = PreSymbolicExpression::comment(comment.to_string()); + comment.copy_span(&token.span); + comments.push(comment); + self.next_token += 1; + } + _ => return comments, + } + } + } + + // TODO: add tests from mutation testing results #4829 + #[cfg_attr(test, mutants::skip)] + /// Process a new child node for an AST expression that is open and waiting for children nodes. For example, + /// a list or tuple expression that is waiting for child expressions. + /// + /// Returns Some(node) if the open node is finished and should be popped from the stack. + /// Returns None if the open node is not finished and should remain on the parser stack. + fn handle_open_node( + &mut self, + open_node: &mut ParserStackElement, + node_opt: Option, + ) -> ParseResult> { + match open_node { + ParserStackElement::OpenList { + ref mut nodes, + ref mut span, + ref mut whitespace, + } => { + if let Some(node) = node_opt { + if !*whitespace && node.match_comment().is_none() { + self.add_diagnostic(ParseErrors::ExpectedWhitespace, node.span().clone())?; + } + nodes.push(node); + *whitespace = self.ignore_whitespace(); + Ok(None) + } else { + let token = self.peek_last_token()?.clone(); + match token.token { + Token::Rparen => { + span.end_line = token.span.end_line; + span.end_column = token.span.end_column; + let out_nodes: Vec<_> = std::mem::take(nodes); + let mut e = PreSymbolicExpression::list(out_nodes); + e.copy_span(span); + Ok(Some(e)) + } + Token::Eof => { + // Report an error, but return the list and attempt to continue parsing + self.add_diagnostic( + ParseErrors::ExpectedClosing(Token::Rparen), + token.span.clone(), + )?; + self.add_diagnostic( + ParseErrors::NoteToMatchThis(Token::Lparen), + span.clone(), + )?; + span.end_line = token.span.end_line; + span.end_column = token.span.end_column; + let out_nodes: Vec<_> = std::mem::take(nodes); + let mut e = PreSymbolicExpression::list(out_nodes); + e.copy_span(span); + Ok(Some(e)) + } + _ => { + // Report an error, then skip this token + self.add_diagnostic( + ParseErrors::UnexpectedToken(token.token.clone()), + token.span, + )?; + *whitespace = self.ignore_whitespace(); + Ok(None) + } + } + } + } + ParserStackElement::OpenTuple(ref mut open_tuple) => { + self.handle_open_tuple(open_tuple, node_opt) + } + } + } + + // TODO: add tests from mutation testing results #4848 + #[cfg_attr(test, mutants::skip)] + fn handle_open_tuple( + &mut self, + open_tuple: &mut OpenTuple, + node_opt: Option, + ) -> ParseResult> { + match &open_tuple.expects { + OpenTupleStatus::ParseKey => { + // expecting to parse a key + let node = match node_opt { + Some(node) => node, + None => { + // mimic parse_node_or_eof() behavior + // if last token was an EOF, error out the tuple + // if the last token was something else, just yield back to the parse loop + let last_token = self.peek_last_token()?.clone(); + match last_token.token { + Token::Eof => { + self.add_diagnostic( + ParseErrors::ExpectedClosing(Token::Rbrace), + open_tuple.diagnostic_token.span.clone(), + )?; + self.add_diagnostic( + ParseErrors::NoteToMatchThis(Token::Lbrace), + open_tuple.span.clone(), + )?; + let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); + let mut e = PreSymbolicExpression::tuple(out_nodes); + let span_before_eof = &self.tokens[self.tokens.len() - 2].span; + open_tuple.span.end_line = span_before_eof.end_line; + open_tuple.span.end_column = span_before_eof.end_column; + e.copy_span(&open_tuple.span); + return Ok(Some(e)); + } + _ => { + // Report an error, then skip this token + self.add_diagnostic( + ParseErrors::UnexpectedToken(last_token.token), + last_token.span, + )?; + return Ok(None); // Ok(None) yields to the parse loop + } + } + } + }; + open_tuple.nodes.push(node); + // added key to the nodes list, now do all preprocessing to prepare to parse + // the value node + let mut comments = self.ignore_whitespace_and_comments(); + open_tuple.nodes.append(&mut comments); + + // Look for ':' + let token = self.peek_next_token(); + match token.token { + Token::Colon => { + self.next_token(); + } + Token::Eof => { + // This indicates we have reached the end of the input. + // Create a placeholder value so that parsing can continue, + // then return. + self.add_diagnostic(ParseErrors::TupleColonExpectedv2, token.span.clone())?; + let mut placeholder = PreSymbolicExpression::placeholder("".to_string()); + placeholder.copy_span(&token.span); + open_tuple.nodes.push(placeholder); // Placeholder value + let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); + let mut e = PreSymbolicExpression::tuple(out_nodes); + let span_before_eof = &self.tokens[self.tokens.len() - 2].span; + open_tuple.span.end_line = span_before_eof.end_line; + open_tuple.span.end_column = span_before_eof.end_column; + e.copy_span(&open_tuple.span); + return Ok(Some(e)); + } + _ => { + // Record an error, then continue to parse + self.add_diagnostic(ParseErrors::TupleColonExpectedv2, token.span.clone())?; + } + } + open_tuple.diagnostic_token = token; + + let mut comments = self.ignore_whitespace_and_comments(); + open_tuple.nodes.append(&mut comments); + + open_tuple.expects = OpenTupleStatus::ParseValue; + Ok(None) + } + OpenTupleStatus::ParseValue => { + // expecting to parse a value + let node = match node_opt { + Some(node) => node, + None => { + // mimic parse_node_or_eof() behavior + // if last token was an EOF, error out the tuple + // if the last token was something else, just yield back to the parse loop + let last_token = self.peek_last_token()?.clone(); + match last_token.token { + Token::Eof => { + // This indicates we have reached the end of the input. + // Create a placeholder value so that parsing can continue, + // then return. + let eof_span = last_token.span; + + self.add_diagnostic( + ParseErrors::TupleValueExpected, + open_tuple.diagnostic_token.span.clone(), + )?; + let mut placeholder = + PreSymbolicExpression::placeholder("".to_string()); + placeholder.copy_span(&eof_span); + open_tuple.nodes.push(placeholder); // Placeholder value + let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); + let mut e = PreSymbolicExpression::tuple(out_nodes); + open_tuple.span.end_line = + open_tuple.diagnostic_token.span.end_line; + open_tuple.span.end_column = + open_tuple.diagnostic_token.span.end_column; + e.copy_span(&open_tuple.span); + return Ok(Some(e)); + } + _ => { + // Report an error, then skip this token + self.add_diagnostic( + ParseErrors::UnexpectedToken(last_token.token), + last_token.span, + )?; + return Ok(None); // Ok(None) yields to the parse loop + } + } + } + }; + open_tuple.nodes.push(node); + + // now do all preprocessing to prepare to parse a key node + let mut comments = self.ignore_whitespace_and_comments(); + open_tuple.nodes.append(&mut comments); + + let token = self.peek_next_token(); + match token.token { + Token::Comma => { + self.next_token(); + } + Token::Rbrace => { + open_tuple.span.end_line = token.span.end_line; + open_tuple.span.end_column = token.span.end_column; + self.next_token(); + let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); + let mut e = PreSymbolicExpression::tuple(out_nodes); + e.copy_span(&open_tuple.span); + return Ok(Some(e)); + } + Token::Eof => (), + _ => self.add_diagnostic(ParseErrors::TupleCommaExpectedv2, token.span)?, + } + + let mut comments = self.ignore_whitespace_and_comments(); + open_tuple.nodes.append(&mut comments); + + // A comma is allowed after the last pair in the tuple -- check for this case. + let token = self.peek_next_token(); + if token.token == Token::Rbrace { + open_tuple.span.end_line = token.span.end_line; + open_tuple.span.end_column = token.span.end_column; + self.next_token(); + let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); + let mut e = PreSymbolicExpression::tuple(out_nodes); + e.copy_span(&open_tuple.span); + return Ok(Some(e)); + } + open_tuple.diagnostic_token = token; + + let mut comments = self.ignore_whitespace_and_comments(); + open_tuple.nodes.append(&mut comments); + + open_tuple.expects = OpenTupleStatus::ParseKey; + Ok(None) + } + } + } + + /// Do all the preprocessing required to setup tuple parsing. If the tuple immediately + /// closes, return the final expression here, otherwise, return OpenTuple. + fn open_tuple(&mut self, lbrace: PlacedToken) -> ParseResult { + let mut open_tuple = OpenTuple { + nodes: vec![], + span: lbrace.span, + expects: OpenTupleStatus::ParseKey, + diagnostic_token: self.peek_next_token(), + }; + + // do all the preprocessing required before the first key node + let mut comments = self.ignore_whitespace_and_comments(); + open_tuple.nodes.append(&mut comments); + let token = self.peek_next_token(); + match token.token { + Token::Comma => { + self.add_diagnostic(ParseErrors::UnexpectedToken(token.token), token.span)?; + self.next_token(); + } + Token::Rbrace => { + open_tuple.span.end_line = token.span.end_line; + open_tuple.span.end_column = token.span.end_column; + self.next_token(); + let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); + let mut e = PreSymbolicExpression::tuple(out_nodes); + e.copy_span(&open_tuple.span); + return Ok(SetupTupleResult::Closed(e)); + } + _ => (), + }; + + let mut comments = self.ignore_whitespace_and_comments(); + open_tuple.nodes.append(&mut comments); + + // A comma is allowed after the last pair in the tuple -- check for this case. + let token = self.peek_next_token(); + if token.token == Token::Rbrace { + open_tuple.span.end_line = token.span.end_line; + open_tuple.span.end_column = token.span.end_column; + self.next_token(); + let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); + let mut e = PreSymbolicExpression::tuple(out_nodes); + e.copy_span(&open_tuple.span); + return Ok(SetupTupleResult::Closed(e)); + } + open_tuple.diagnostic_token = token; + + let mut comments = self.ignore_whitespace_and_comments(); + open_tuple.nodes.append(&mut comments); + + Ok(SetupTupleResult::OpenTuple(open_tuple)) + } + + fn read_principal( + &mut self, + addr: String, + mut span: Span, + ) -> ParseResult { + let principal = match PrincipalData::parse_standard_principal(&addr) { + Ok(principal) => principal, + _ => { + self.add_diagnostic(ParseErrors::InvalidPrincipalLiteral, span.clone())?; + let mut placeholder = PreSymbolicExpression::placeholder(format!("'{}", addr)); + placeholder.copy_span(&span); + return Ok(placeholder); + } + }; + + // Peek ahead for a '.', indicating a contract identifier + if self.peek_next_token().token == Token::Dot { + #[allow(clippy::unwrap_used)] + let dot = self.next_token().unwrap(); // skip over the dot + let (name, contract_span) = match self.next_token() { + Some(PlacedToken { + span: contract_span, + token: Token::Ident(ident), + }) => { + span.end_line = contract_span.end_line; + span.end_column = contract_span.end_column; + (ident, contract_span) + } + Some(PlacedToken { + span: token_span, + token, + }) => { + span.end_line = token_span.end_line; + span.end_column = token_span.end_column; + self.add_diagnostic(ParseErrors::ExpectedContractIdentifier, token_span)?; + let mut placeholder = PreSymbolicExpression::placeholder(format!( + "'{}.{}", + principal, + token.reproduce() + )); + placeholder.copy_span(&span); + return Ok(placeholder); + } + None => { + self.add_diagnostic(ParseErrors::ExpectedContractIdentifier, dot.span)?; + let mut placeholder = + PreSymbolicExpression::placeholder(format!("'{}.", principal)); + placeholder.copy_span(&span); + return Ok(placeholder); + } + }; + + if name.len() > MAX_CONTRACT_NAME_LEN { + self.add_diagnostic( + ParseErrors::ContractNameTooLong(name.clone()), + contract_span, + )?; + let mut placeholder = + PreSymbolicExpression::placeholder(format!("'{}.{}", principal, name)); + placeholder.copy_span(&span); + return Ok(placeholder); + } + let contract_name = match ContractName::try_from(name.clone()) { + Ok(id) => id, + Err(_) => { + self.add_diagnostic( + ParseErrors::IllegalContractName(name.clone()), + contract_span, + )?; + let mut placeholder = + PreSymbolicExpression::placeholder(format!("'{}.{}", principal, name)); + placeholder.copy_span(&span); + return Ok(placeholder); + } + }; + let contract_id = QualifiedContractIdentifier::new(principal, contract_name); + + // Peek ahead for a '.', indicating a trait identifier + if self.peek_next_token().token == Token::Dot { + #[allow(clippy::unwrap_used)] + let dot = self.next_token().unwrap(); // skip over the dot + let (name, trait_span) = match self.next_token() { + Some(PlacedToken { + span: trait_span, + token: Token::Ident(ident), + }) => { + span.end_line = trait_span.end_line; + span.end_column = trait_span.end_column; + (ident, trait_span) + } + Some(PlacedToken { + span: token_span, + token, + }) => { + self.add_diagnostic( + ParseErrors::ExpectedTraitIdentifier, + token_span.clone(), + )?; + let mut placeholder = PreSymbolicExpression::placeholder(format!( + "'{}.{}", + contract_id, + token.reproduce(), + )); + span.end_line = token_span.end_line; + span.end_column = token_span.end_column; + placeholder.copy_span(&span); + return Ok(placeholder); + } + None => { + self.add_diagnostic( + ParseErrors::ExpectedTraitIdentifier, + dot.span.clone(), + )?; + let mut placeholder = + PreSymbolicExpression::placeholder(format!("'{}.", contract_id)); + span.end_line = dot.span.end_line; + span.end_column = dot.span.end_column; + placeholder.copy_span(&span); + return Ok(placeholder); + } + }; + if name.len() > MAX_STRING_LEN { + self.add_diagnostic(ParseErrors::NameTooLong(name.clone()), trait_span)?; + let mut placeholder = + PreSymbolicExpression::placeholder(format!("'{}.{}", contract_id, name,)); + placeholder.copy_span(&span); + return Ok(placeholder); + } + let trait_name = match ClarityName::try_from(name.clone()) { + Ok(id) => id, + Err(_) => { + self.add_diagnostic( + ParseErrors::IllegalTraitName(name.clone()), + trait_span, + )?; + let mut placeholder = PreSymbolicExpression::placeholder(format!( + "'{}.{}", + contract_id, name, + )); + placeholder.copy_span(&span); + return Ok(placeholder); + } + }; + let trait_id = TraitIdentifier { + name: trait_name, + contract_identifier: contract_id, + }; + let mut expr = PreSymbolicExpression::field_identifier(trait_id); + expr.copy_span(&span); + Ok(expr) + } else { + let contract_principal = PrincipalData::Contract(contract_id); + let mut expr = + PreSymbolicExpression::atom_value(Value::Principal(contract_principal)); + expr.copy_span(&span); + Ok(expr) + } + } else { + let mut expr = PreSymbolicExpression::atom_value(Value::Principal( + PrincipalData::Standard(principal), + )); + expr.copy_span(&span); + Ok(expr) + } + } + + fn read_sugared_principal(&mut self, mut span: Span) -> ParseResult { + let (name, contract_span) = match self.next_token() { + Some(PlacedToken { + span: contract_span, + token: Token::Ident(ident), + }) => { + span.end_line = contract_span.end_line; + span.end_column = contract_span.end_column; + (ident, contract_span) + } + Some(PlacedToken { + span: token_span, + token, + }) => { + self.add_diagnostic(ParseErrors::ExpectedContractIdentifier, token_span.clone())?; + let mut placeholder = + PreSymbolicExpression::placeholder(format!(".{}", token.reproduce())); + span.end_line = token_span.end_line; + span.end_column = token_span.end_column; + placeholder.copy_span(&span); + return Ok(placeholder); + } + None => { + self.add_diagnostic(ParseErrors::ExpectedContractIdentifier, span.clone())?; + let mut placeholder = PreSymbolicExpression::placeholder(".".to_string()); + placeholder.copy_span(&span); + return Ok(placeholder); + } + }; + + if name.len() > MAX_CONTRACT_NAME_LEN { + self.add_diagnostic(ParseErrors::ContractNameTooLong(name.clone()), span.clone())?; + let mut placeholder = PreSymbolicExpression::placeholder(format!(".{}", name)); + placeholder.copy_span(&span); + return Ok(placeholder); + } + + let contract_name = match ContractName::try_from(name.clone()) { + Ok(id) => id, + Err(_) => { + self.add_diagnostic( + ParseErrors::IllegalContractName(name.clone()), + contract_span, + )?; + let mut placeholder = PreSymbolicExpression::placeholder(format!(".{}", name)); + placeholder.copy_span(&span); + return Ok(placeholder); + } + }; + + // Peek ahead for a '.', indicating a trait identifier + if self.peek_next_token().token == Token::Dot { + #[allow(clippy::unwrap_used)] + let dot = self.next_token().unwrap(); // skip over the dot + let (name, trait_span) = match self.next_token() { + Some(PlacedToken { + span: trait_span, + token: Token::Ident(ident), + }) => { + span.end_line = trait_span.end_line; + span.end_column = trait_span.end_column; + (ident, trait_span) + } + Some(PlacedToken { + span: token_span, + token, + }) => { + self.add_diagnostic(ParseErrors::ExpectedTraitIdentifier, token_span.clone())?; + let mut placeholder = PreSymbolicExpression::placeholder(format!( + ".{}.{}", + contract_name, + token.reproduce(), + )); + span.end_line = token_span.end_line; + span.end_column = token_span.end_column; + placeholder.copy_span(&span); + return Ok(placeholder); + } + None => { + self.add_diagnostic(ParseErrors::ExpectedTraitIdentifier, dot.span.clone())?; + let mut placeholder = + PreSymbolicExpression::placeholder(format!(".{}.", contract_name)); + span.end_line = dot.span.end_line; + span.end_column = dot.span.end_column; + placeholder.copy_span(&span); + return Ok(placeholder); + } + }; + if name.len() > MAX_STRING_LEN { + self.add_diagnostic(ParseErrors::NameTooLong(name.clone()), trait_span)?; + let mut placeholder = + PreSymbolicExpression::placeholder(format!(".{}.{}", contract_name, name)); + placeholder.copy_span(&span); + return Ok(placeholder); + } + let trait_name = match ClarityName::try_from(name.clone()) { + Ok(id) => id, + Err(_) => { + self.add_diagnostic(ParseErrors::IllegalTraitName(name.clone()), trait_span)?; + let mut placeholder = + PreSymbolicExpression::placeholder(format!(".{}.{}", contract_name, name)); + placeholder.copy_span(&span); + return Ok(placeholder); + } + }; + let mut expr = + PreSymbolicExpression::sugared_field_identifier(contract_name, trait_name); + expr.copy_span(&span); + Ok(expr) + } else { + let mut expr = PreSymbolicExpression::sugared_contract_identifier(contract_name); + expr.copy_span(&span); + Ok(expr) + } + } + + /// Returns some valid expression. When None is returned, check the current + /// token from the caller. + pub fn parse_node(&mut self) -> ParseResult> { + // `parse_stack` stores information about any nodes which may contain interior AST nodes. + // because even though this function only returns a single node, that single node may contain others. + let mut parse_stack = vec![]; + let mut first_run = true; + // do-while loop until there are no more nodes waiting for children nodes + while first_run || !parse_stack.is_empty() { + first_run = false; + + self.ignore_whitespace(); + let token_opt = self.next_token(); + + let mut node = match token_opt { + None => None, + Some(token) => { + match &token.token { + Token::Lparen => { + self.nesting_depth += 1; + if self.nesting_depth > MAX_NESTING_DEPTH { + self.add_diagnostic( + ParseErrors::ExpressionStackDepthTooDeep, + token.span.clone(), + )?; + // Do not try to continue, exit cleanly now to avoid a stack overflow. + self.skip_to_end(); + return Ok(None); + } + // open the list on the parse_stack, and then continue to the next token + parse_stack.push(ParserStackElement::OpenList { + nodes: vec![], + span: token.span.clone(), + whitespace: true, + }); + continue; + } + Token::Lbrace => { + // This sugared syntax for tuple becomes a list of pairs, so depth is increased by 2. + if self.nesting_depth + 2 > MAX_NESTING_DEPTH { + self.add_diagnostic( + ParseErrors::ExpressionStackDepthTooDeep, + token.span.clone(), + )?; + // Do not try to continue, exit cleanly now to avoid a stack overflow. + self.skip_to_end(); + return Ok(None); + } + + match self.open_tuple(token)? { + SetupTupleResult::OpenTuple(open_tuple) => { + self.nesting_depth += 2; + parse_stack.push(ParserStackElement::OpenTuple(open_tuple)); + // open the tuple on the parse_stack, and then continue to the next token + continue; + } + SetupTupleResult::Closed(closed_tuple) => Some(closed_tuple), + } + } + Token::Int(val_string) => { + let mut expr = match val_string.parse::() { + Ok(val) => PreSymbolicExpression::atom_value(Value::Int(val)), + Err(_) => { + self.add_diagnostic( + ParseErrors::FailedParsingIntValue(val_string.clone()), + token.span.clone(), + )?; + PreSymbolicExpression::placeholder(token.token.reproduce()) + } + }; + expr.copy_span(&token.span); + Some(expr) + } + Token::Uint(val_string) => { + let mut expr = match val_string.parse::() { + Ok(val) => PreSymbolicExpression::atom_value(Value::UInt(val)), + Err(_) => { + self.add_diagnostic( + ParseErrors::FailedParsingUIntValue(val_string.clone()), + token.span.clone(), + )?; + PreSymbolicExpression::placeholder(token.token.reproduce()) + } + }; + expr.copy_span(&token.span); + Some(expr) + } + Token::AsciiString(val) => { + let mut expr = + match Value::string_ascii_from_bytes(val.clone().into_bytes()) { + Ok(s) => PreSymbolicExpression::atom_value(s), + Err(_) => { + self.add_diagnostic( + ParseErrors::IllegalASCIIString(val.clone()), + token.span.clone(), + )?; + PreSymbolicExpression::placeholder(token.token.reproduce()) + } + }; + expr.copy_span(&token.span); + Some(expr) + } + Token::Utf8String(s) => { + let data: Vec> = s + .chars() + .map(|ch| { + let mut bytes = vec![0; ch.len_utf8()]; + ch.encode_utf8(&mut bytes); + bytes + }) + .collect(); + let val = + Value::Sequence(SequenceData::String(CharType::UTF8(UTF8Data { + data, + }))); + let mut expr = PreSymbolicExpression::atom_value(val); + expr.copy_span(&token.span); + Some(expr) + } + Token::Ident(name) => { + let mut expr = if name.len() > MAX_STRING_LEN { + self.add_diagnostic( + ParseErrors::NameTooLong(name.clone()), + token.span.clone(), + )?; + PreSymbolicExpression::placeholder(token.token.reproduce()) + } else { + match ClarityName::try_from(name.clone()) { + Ok(name) => PreSymbolicExpression::atom(name), + Err(_) => { + self.add_diagnostic( + ParseErrors::IllegalClarityName(name.clone()), + token.span.clone(), + )?; + PreSymbolicExpression::placeholder(token.token.reproduce()) + } + } + }; + expr.copy_span(&token.span); + Some(expr) + } + Token::TraitIdent(name) => { + let mut expr = if name.len() > MAX_STRING_LEN { + self.add_diagnostic( + ParseErrors::NameTooLong(name.clone()), + token.span.clone(), + )?; + PreSymbolicExpression::placeholder(token.token.reproduce()) + } else { + match ClarityName::try_from(name.clone()) { + Ok(name) => PreSymbolicExpression::trait_reference(name), + Err(_) => { + self.add_diagnostic( + ParseErrors::IllegalTraitName(name.clone()), + token.span.clone(), + )?; + PreSymbolicExpression::placeholder(token.token.reproduce()) + } + } + }; + expr.copy_span(&token.span); + Some(expr) + } + Token::Bytes(data) => { + let mut expr = match hex_bytes(data) { + Ok(bytes) => match Value::buff_from(bytes) { + Ok(value) => PreSymbolicExpression::atom_value(value), + _ => { + self.add_diagnostic( + ParseErrors::InvalidBuffer, + token.span.clone(), + )?; + PreSymbolicExpression::placeholder(token.token.reproduce()) + } + }, + Err(_) => { + self.add_diagnostic( + ParseErrors::InvalidBuffer, + token.span.clone(), + )?; + PreSymbolicExpression::placeholder(token.token.reproduce()) + } + }; + expr.copy_span(&token.span); + Some(expr) + } + Token::Principal(addr) => { + let expr = self.read_principal(addr.clone(), token.span.clone())?; + Some(expr) + } + Token::Dot => { + let expr = self.read_sugared_principal(token.span.clone())?; + Some(expr) + } + Token::Plus + | Token::Minus + | Token::Multiply + | Token::Divide + | Token::Less + | Token::LessEqual + | Token::Greater + | Token::GreaterEqual => { + let name = ClarityName::try_from(token.token.to_string()) + .map_err(|_| ParseErrors::InterpreterFailure)?; + let mut e = PreSymbolicExpression::atom(name); + e.copy_span(&token.span); + Some(e) + } + Token::Placeholder(s) => { + let mut e = PreSymbolicExpression::placeholder(s.to_string()); + e.copy_span(&token.span); + Some(e) + } + Token::Comment(comment) => { + let mut e = PreSymbolicExpression::comment(comment.to_string()); + e.copy_span(&token.span); + Some(e) + } + Token::Eof => None, + _ => None, // Other tokens should be dealt with by the caller + } + } + }; + + // Here we check if we have any open nodes (tuples or lists) that `node` + // should be a component of. If so, add `node` to the open one and then iterate + // If there are no open nodes, then return `node` immediately. + + let mut new_node_received = true; + while new_node_received { + new_node_received = false; + + match parse_stack.as_mut_slice().last_mut() { + Some(ref mut open_list) => { + let nesting_adjustment = match open_list { + ParserStackElement::OpenList { .. } => 1, + ParserStackElement::OpenTuple(_) => 2, + }; + if let Some(finished_list) = + self.handle_open_node(open_list, node.take())? + { + new_node_received = true; + node.replace(finished_list); + parse_stack.pop(); + self.nesting_depth -= nesting_adjustment; + } + } + None => { + return Ok(node); + } + } + } + } + + // This should be unreachable -- the loop only exits if there are no open tuples or lists, + // but the last line of the loop also checks if there are no open tuples or lists and if not, + // returns the node. + Ok(None) + } + + pub fn parse_node_or_eof(&mut self) -> ParseResult> { + loop { + match self.parse_node()? { + Some(node) => break Ok(Some(node)), + None => { + let token = self.tokens[self.next_token - 1].clone(); + match token.token { + Token::Eof => break Ok(None), + _ => { + // Report an error, then skip this token + self.add_diagnostic( + ParseErrors::UnexpectedToken(token.token), + token.span, + )?; + } + } + } + } + } + } + + pub fn parse(&mut self) -> ParseResult> { + let mut nodes = vec![]; + + while let Some(node) = self.parse_node_or_eof()? { + nodes.push(node) + } + Ok(nodes) + } +} + +pub fn parse(input: &str) -> ParseResult> { + let mut parser = match Parser::new(input, true) { + Ok(parser) => parser, + Err(e) => return Err(ParseError::new(e)), + }; + let stmts = parser.parse()?; + if parser.success { + Ok(stmts) + } else { + let err = parser.diagnostics.remove(0); + Err(ParseError::new(err.e)) + } +} + +#[allow(clippy::unwrap_used)] +pub fn parse_collect_diagnostics( + input: &str, +) -> (Vec, Vec, bool) { + // When not in fail_fast mode, Parser::new always returns Ok. + let mut parser = Parser::new(input, false).unwrap(); + + // When not in fail_fast mode, Parser::parse always returns Ok. + let stmts = parser.parse().unwrap(); + let diagnostics = parser + .diagnostics + .iter() + .map(|e| Diagnostic { + level: e.e.level(), + message: e.e.message(), + spans: vec![e.span.clone()], + suggestion: None, + }) + .collect(); + (stmts, diagnostics, parser.success) +} + +#[cfg(test)] +#[cfg(feature = "developer-mode")] +mod tests { + use super::*; + use crate::vm::ast::parser::v2::lexer::error::LexerError; + use crate::vm::diagnostic::Level; + use crate::vm::representations::PreSymbolicExpressionType; + use crate::vm::types::{ASCIIData, CharType, PrincipalData, SequenceData}; + + #[test] + fn test_parse_int() { + let (stmts, diagnostics, success) = parse_collect_diagnostics(" 123"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + if let Some(Value::Int(123)) = stmts[0].match_atom_value() { + } else { + panic!("failed to parse int value"); + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 5, + end_line: 1, + end_column: 7 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics(" -123"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + if let Some(Value::Int(-123)) = stmts[0].match_atom_value() { + } else { + panic!("failed to parse negative int value"); + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 5, + end_line: 1, + end_column: 8 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("42g "); + assert!(!success); + assert_eq!(stmts.len(), 1); + if let Some("42g") = stmts[0].match_placeholder() { + } else { + panic!("failed to parse int value with error"); + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 3 + } + ); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!( + diagnostics[0].message, + "invalid character, 'g', in int literal".to_string() + ); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 3, + end_line: 1, + end_column: 3 + } + ); + + // Exceed the range of a 128-bit integer. + let (stmts, diagnostics, success) = + parse_collect_diagnostics("340282366920938463463374607431768211456 "); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].match_placeholder().unwrap(), + "340282366920938463463374607431768211456" + ); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 39 + } + ); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!( + diagnostics[0].message, + "Failed to parse int literal '340282366920938463463374607431768211456'".to_string() + ); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 39 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("0000000000123"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + if let Some(Value::Int(123)) = stmts[0].match_atom_value() { + } else { + panic!("failed to parse int value"); + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 13 + } + ); + } + + #[test] + fn test_parse_uint() { + let (stmts, diagnostics, success) = parse_collect_diagnostics(" u98"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + if let Some(Value::UInt(98)) = stmts[0].match_atom_value() { + } else { + panic!("failed to parse uint value"); + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 5, + end_line: 1, + end_column: 7 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("\n u2*3"); + assert!(!success); + assert_eq!(stmts.len(), 1); + if let Some("u2*3") = stmts[0].match_placeholder() { + } else { + panic!("failed to parse uint value with error"); + } + assert_eq!( + stmts[0].span, + Span { + start_line: 2, + start_column: 2, + end_line: 2, + end_column: 5 + } + ); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!( + diagnostics[0].message, + "invalid character, '*', in uint literal".to_string() + ); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 2, + start_column: 4, + end_line: 2, + end_column: 5 + } + ); + + // Exceed the range of a 128-bit unsigned integer. + let (stmts, diagnostics, success) = + parse_collect_diagnostics("u340282366920938463463374607431768211457 "); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].match_placeholder().unwrap(), + "u340282366920938463463374607431768211457" + ); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 40 + } + ); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!( + diagnostics[0].message, + "Failed to parse uint literal 'u340282366920938463463374607431768211457'".to_string() + ); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 40 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("u00000000123"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + if let Some(Value::UInt(123)) = stmts[0].match_atom_value() { + } else { + panic!("failed to parse int value"); + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 12 + } + ); + } + + #[test] + fn test_parse_ascii_string() { + let (stmts, diagnostics, success) = parse_collect_diagnostics("\"new\\nline\""); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + if let Some(v) = stmts[0].match_atom_value() { + assert_eq!(v.clone().expect_ascii().unwrap(), "new\nline"); + } else { + panic!("failed to parse ascii string"); + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 11 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("\"👎 nope\""); + assert!(!success); + assert_eq!(stmts.len(), 1); + if let Some(s) = stmts[0].match_placeholder() { + assert_eq!(s, "\"👎 nope\""); + } else { + panic!("failed to parse ascii value with error"); + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 8 + } + ); + assert_eq!(diagnostics.len(), 2); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!( + diagnostics[0].message, + "illegal non-ASCII character, '👎'".to_string() + ); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 2 + } + ); + assert_eq!(diagnostics[1].level, Level::Error); + assert_eq!( + diagnostics[1].message, + "invalid character, '👎', in string literal".to_string() + ); + assert_eq!( + diagnostics[1].spans[0], + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 2 + } + ); + } + + #[test] + fn test_parse_utf8_string() { + let (stmts, diagnostics, success) = parse_collect_diagnostics("u\"new\\nline\\u{1f601}\""); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + if let Some(v) = stmts[0].match_atom_value() { + let s = match v { + Value::Sequence(SequenceData::String(CharType::UTF8(data))) => format!("{}", data), + _ => panic!("failed to parse UTF8 string "), + }; + assert_eq!(s, "u\"new\\nline\\u{f09f9881}\""); + } else { + panic!("failed to parse utf8 string value"); + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 21 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("u\"\\m nope\""); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!(stmts[0].match_placeholder().unwrap(), "u\"\\m nope\""); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 10 + } + ); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!( + diagnostics[0].message, + "unknown escape character, 'm'".to_string() + ); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 3, + end_line: 1, + end_column: 4 + } + ); + } + + #[test] + fn test_parse_identifier() { + let (stmts, diagnostics, success) = parse_collect_diagnostics("foo-bar"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + if let Some(v) = stmts[0].match_atom() { + assert_eq!(v.as_str(), "foo-bar"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 7 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("veryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylong"); + assert!(!success); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!( + diagnostics[0].message, + "illegal name (too long), 'veryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylong'".to_string() + ); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 132 + } + ); + } + + #[test] + fn test_parse_list() { + let (stmts, diagnostics, success) = parse_collect_diagnostics("(foo 1 u3 \"hel\tlo\")"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 19 + } + ); + let exprs = stmts[0].match_list().unwrap(); + match &exprs[0].pre_expr { + PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), "foo"), + _ => panic!("expected atom 'foo'"), + } + assert_eq!( + exprs[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + match exprs[1].pre_expr { + PreSymbolicExpressionType::AtomValue(Value::Int(1)) => (), + _ => panic!("expected Value(1)"), + } + assert_eq!( + exprs[1].span, + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 6 + } + ); + match exprs[2].pre_expr { + PreSymbolicExpressionType::AtomValue(Value::UInt(3)) => (), + _ => panic!("expected Value(u3)"), + } + assert_eq!( + exprs[2].span, + Span { + start_line: 1, + start_column: 8, + end_line: 1, + end_column: 9 + } + ); + match &exprs[3].pre_expr { + PreSymbolicExpressionType::AtomValue(Value::Sequence(SequenceData::String( + CharType::ASCII(ASCIIData { data: s }), + ))) => assert_eq!(s, "hel\tlo".as_bytes()), + _ => panic!("expected Value(\"hel\tlo\")"), + } + assert_eq!( + exprs[3].span, + Span { + start_line: 1, + start_column: 11, + end_line: 1, + end_column: 18 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("(1 2 3"); + assert!(!success); + assert_eq!(stmts.len(), 1); + let exprs = stmts[0].match_list().unwrap(); + match exprs[0].pre_expr { + PreSymbolicExpressionType::AtomValue(Value::Int(1)) => (), + _ => panic!("expected Value(1)"), + } + assert_eq!( + exprs[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 2 + } + ); + match exprs[1].pre_expr { + PreSymbolicExpressionType::AtomValue(Value::Int(2)) => (), + _ => panic!("expected Value(2)"), + } + assert_eq!( + exprs[1].span, + Span { + start_line: 1, + start_column: 4, + end_line: 1, + end_column: 4 + } + ); + match exprs[2].pre_expr { + PreSymbolicExpressionType::AtomValue(Value::Int(3)) => (), + _ => panic!("expected Value(3)"), + } + assert_eq!( + exprs[2].span, + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 6 + } + ); + assert_eq!(diagnostics.len(), 2); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!(diagnostics[0].message, "expected closing ')'".to_string()); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 7, + end_line: 1, + end_column: 7 + } + ); + + assert_eq!(diagnostics[1].level, Level::Note); + assert_eq!(diagnostics[1].message, "to match this '('".to_string()); + assert_eq!( + diagnostics[1].spans[0], + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 1 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("(1 2 3\n )"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 2, + end_column: 2 + } + ); + let exprs = stmts[0].match_list().unwrap(); + assert_eq!(exprs.len(), 3); + } + + #[test] + fn test_parse_list_comment() { + let (stmts, diagnostics, success) = parse_collect_diagnostics( + "(foo ;; first comment\n bar\n ;; second comment\n baz;; no space\n)", + ); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 5, + end_column: 1 + } + ); + let exprs = stmts[0].match_list().unwrap(); + assert_eq!(exprs.len(), 6); + assert_eq!(exprs[0].match_atom().unwrap().as_str(), "foo"); + assert_eq!( + exprs[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + assert_eq!(exprs[1].match_comment().unwrap(), "first comment"); + assert_eq!( + exprs[1].span, + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 21 + } + ); + assert_eq!(exprs[2].match_atom().unwrap().as_str(), "bar"); + assert_eq!( + exprs[2].span, + Span { + start_line: 2, + start_column: 3, + end_line: 2, + end_column: 5 + } + ); + assert_eq!(exprs[3].match_comment().unwrap(), "second comment"); + assert_eq!( + exprs[3].span, + Span { + start_line: 3, + start_column: 3, + end_line: 3, + end_column: 19 + } + ); + assert_eq!(exprs[4].match_atom().unwrap().as_str(), "baz"); + assert_eq!( + exprs[4].span, + Span { + start_line: 4, + start_column: 3, + end_line: 4, + end_column: 5 + } + ); + assert_eq!(exprs[5].match_comment().unwrap(), "no space"); + assert_eq!( + exprs[5].span, + Span { + start_line: 4, + start_column: 6, + end_line: 4, + end_column: 16 + } + ); + } + + #[test] + fn test_parse_tuple() { + let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo: bar}"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 10 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 2); + if let Some(name) = list[0].match_atom() { + assert_eq!(name.as_str(), "foo"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + if let Some(name) = list[1].match_atom() { + assert_eq!(name.as_str(), "bar"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[1].span, + Span { + start_line: 1, + start_column: 7, + end_line: 1, + end_column: 9, + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo: bar,}"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 11 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 2); + if let Some(name) = list[0].match_atom() { + assert_eq!(name.as_str(), "foo"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + if let Some(name) = list[1].match_atom() { + assert_eq!(name.as_str(), "bar"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[1].span, + Span { + start_line: 1, + start_column: 7, + end_line: 1, + end_column: 9, + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:bar}"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 9 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 2); + if let Some(name) = list[0].match_atom() { + assert_eq!(name.as_str(), "foo"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + if let Some(name) = list[1].match_atom() { + assert_eq!(name.as_str(), "bar"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[1].span, + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 8, + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:bar,}"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 10 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 2); + if let Some(name) = list[0].match_atom() { + assert_eq!(name.as_str(), "foo"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + if let Some(name) = list[1].match_atom() { + assert_eq!(name.as_str(), "bar"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[1].span, + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 8, + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:bar }"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 10 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 2); + if let Some(name) = list[0].match_atom() { + assert_eq!(name.as_str(), "foo"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + if let Some(name) = list[1].match_atom() { + assert_eq!(name.as_str(), "bar"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[1].span, + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 8, + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:bar ,}"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 11 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 2); + if let Some(name) = list[0].match_atom() { + assert_eq!(name.as_str(), "foo"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + if let Some(name) = list[1].match_atom() { + assert_eq!(name.as_str(), "bar"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[1].span, + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 8, + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:bar,baz:goo}"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 17 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 4); + if let Some(name) = list[0].match_atom() { + assert_eq!(name.as_str(), "foo"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + if let Some(name) = list[1].match_atom() { + assert_eq!(name.as_str(), "bar"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[1].span, + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 8, + } + ); + if let Some(name) = list[2].match_atom() { + assert_eq!(name.as_str(), "baz"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[2].span, + Span { + start_line: 1, + start_column: 10, + end_line: 1, + end_column: 12, + } + ); + if let Some(name) = list[3].match_atom() { + assert_eq!(name.as_str(), "goo"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[3].span, + Span { + start_line: 1, + start_column: 14, + end_line: 1, + end_column: 16, + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("{1: u2, 3: u4}"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 14 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 4); + match list[0].match_atom_value() { + Some(Value::Int(1)) => (), + _ => panic!("failed to parse tuple"), + } + match list[1].match_atom_value() { + Some(Value::UInt(2)) => (), + _ => panic!("failed to parse tuple"), + } + match list[2].match_atom_value() { + Some(Value::Int(3)) => (), + _ => panic!("failed to parse tuple"), + } + match list[3].match_atom_value() { + Some(Value::UInt(4)) => (), + _ => panic!("failed to parse tuple"), + } + + let (stmts, diagnostics, success) = parse_collect_diagnostics("{, foo: bar}"); + assert!(!success); + assert_eq!(stmts.len(), 1); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 2); + if let Some(name) = list[0].match_atom() { + assert_eq!(name.as_str(), "foo"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[0].span, + Span { + start_line: 1, + start_column: 4, + end_line: 1, + end_column: 6 + } + ); + if let Some(name) = list[1].match_atom() { + assert_eq!(name.as_str(), "bar"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[1].span, + Span { + start_line: 1, + start_column: 9, + end_line: 1, + end_column: 11 + } + ); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!(diagnostics[0].message, "unexpected ','".to_string()); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 2 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("{ "); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 3 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert!(list.is_empty()); + assert_eq!(diagnostics.len(), 2); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!(diagnostics[0].message, "expected closing '}'".to_string()); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 4, + end_line: 1, + end_column: 4 + } + ); + assert_eq!(diagnostics[1].level, Level::Note); + assert_eq!(diagnostics[1].message, "to match this '{'".to_string()); + assert_eq!( + diagnostics[1].spans[0], + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 1 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:"); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 5 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 2); + if let Some(name) = list[0].match_atom() { + assert_eq!(name.as_str(), "foo"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + assert_eq!(list[1].match_placeholder().unwrap(), ""); + assert_eq!( + list[1].span, + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 6 + } + ); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!( + diagnostics[0].message, + "expected value expression for tuple".to_string() + ); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 5, + end_line: 1, + end_column: 5 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:bar"); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 8 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 2); + if let Some(name) = list[0].match_atom() { + assert_eq!(name.as_str(), "foo"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + if let Some(name) = list[1].match_atom() { + assert_eq!(name.as_str(), "bar"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[1].span, + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 8 + } + ); + assert_eq!(diagnostics.len(), 2); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!(diagnostics[0].message, "expected closing '}'".to_string()); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 9, + end_line: 1, + end_column: 9 + } + ); + assert_eq!(diagnostics[1].level, Level::Note); + assert_eq!(diagnostics[1].message, "to match this '{'".to_string()); + assert_eq!( + diagnostics[1].spans[0], + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 1 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo:bar boo:far}"); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 17 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 4); + if let Some(name) = list[2].match_atom() { + assert_eq!(name.as_str(), "boo"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[2].span, + Span { + start_line: 1, + start_column: 10, + end_line: 1, + end_column: 12 + } + ); + if let Some(name) = list[3].match_atom() { + assert_eq!(name.as_str(), "far"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[3].span, + Span { + start_line: 1, + start_column: 14, + end_line: 1, + end_column: 16 + } + ); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!( + diagnostics[0].message, + "expected ',' separating key-value pairs in tuple".to_string() + ); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 10, + end_line: 1, + end_column: 12 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo bar}"); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 9 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 2); + if let Some(name) = list[0].match_atom() { + assert_eq!(name.as_str(), "foo"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + if let Some(name) = list[1].match_atom() { + assert_eq!(name.as_str(), "bar"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[1].span, + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 8 + } + ); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!( + diagnostics[0].message, + "expected ':' after key in tuple".to_string() + ); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 8 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("{foo"); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 4 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 2); + if let Some(name) = list[0].match_atom() { + assert_eq!(name.as_str(), "foo"); + } else { + panic!("failed to parse identifier"); + } + assert_eq!( + list[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + assert_eq!(list[1].match_placeholder().unwrap(), ""); + assert_eq!( + list[1].span, + Span { + start_line: 1, + start_column: 5, + end_line: 1, + end_column: 5 + } + ); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!( + diagnostics[0].message, + "expected ':' after key in tuple".to_string() + ); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 5, + end_line: 1, + end_column: 5 + } + ); + } + + #[test] + fn test_parse_tuple_comments() { + let (stmts, diagnostics, success) = parse_collect_diagnostics("{ ;; before the key\n foo ;; before the colon\n : ;; after the colon\n ;; comment on newline\n bar ;; before comma\n ,\n ;; after comma\n baz : qux ;; before closing\n}"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 9, + end_column: 1 + } + ); + let list: &[PreSymbolicExpression] = match stmts[0].pre_expr { + PreSymbolicExpressionType::Tuple(ref list) => list, + _ => panic!("failed to parse tuple"), + }; + assert_eq!(list.len(), 11); + assert_eq!(list[0].match_comment().unwrap(), "before the key"); + assert_eq!( + list[0].span, + Span { + start_line: 1, + start_column: 3, + end_line: 1, + end_column: 19, + } + ); + assert_eq!(list[1].match_atom().unwrap().as_str(), "foo"); + assert_eq!( + list[1].span, + Span { + start_line: 2, + start_column: 3, + end_line: 2, + end_column: 5 + } + ); + assert_eq!(list[2].match_comment().unwrap(), "before the colon"); + assert_eq!( + list[2].span, + Span { + start_line: 2, + start_column: 7, + end_line: 2, + end_column: 25, + } + ); + assert_eq!(list[3].match_comment().unwrap(), "after the colon"); + assert_eq!( + list[3].span, + Span { + start_line: 3, + start_column: 5, + end_line: 3, + end_column: 22, + } + ); + assert_eq!(list[4].match_comment().unwrap(), "comment on newline"); + assert_eq!( + list[4].span, + Span { + start_line: 4, + start_column: 3, + end_line: 4, + end_column: 23, + } + ); + assert_eq!(list[5].match_atom().unwrap().as_str(), "bar"); + assert_eq!( + list[5].span, + Span { + start_line: 5, + start_column: 3, + end_line: 5, + end_column: 5 + } + ); + assert_eq!(list[6].match_comment().unwrap(), "before comma"); + assert_eq!( + list[6].span, + Span { + start_line: 5, + start_column: 7, + end_line: 5, + end_column: 21, + } + ); + assert_eq!(list[7].match_comment().unwrap(), "after comma"); + assert_eq!( + list[7].span, + Span { + start_line: 7, + start_column: 3, + end_line: 7, + end_column: 16, + } + ); + assert_eq!(list[8].match_atom().unwrap().as_str(), "baz"); + assert_eq!( + list[8].span, + Span { + start_line: 8, + start_column: 2, + end_line: 8, + end_column: 4 + } + ); + assert_eq!(list[9].match_atom().unwrap().as_str(), "qux"); + assert_eq!( + list[9].span, + Span { + start_line: 8, + start_column: 8, + end_line: 8, + end_column: 10 + } + ); + assert_eq!(list[10].match_comment().unwrap(), "before closing"); + assert_eq!( + list[10].span, + Span { + start_line: 8, + start_column: 12, + end_line: 8, + end_column: 28, + } + ); + } + + #[test] + fn test_parse_bad() { + let (stmts, diagnostics, success) = parse_collect_diagnostics("(1, 3)"); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 6 + } + ); + let exprs = stmts[0].match_list().unwrap(); + assert_eq!(exprs.len(), 2); + match exprs[0].pre_expr { + PreSymbolicExpressionType::AtomValue(Value::Int(1)) => (), + _ => panic!("expected Value(1)"), + } + match exprs[1].pre_expr { + PreSymbolicExpressionType::AtomValue(Value::Int(3)) => (), + _ => panic!("expected Value(3)"), + } + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].message, "unexpected ','"); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 3, + end_line: 1, + end_column: 3 + } + ); + } + + #[test] + fn test_parse_principal() { + let (stmts, diagnostics, success) = + parse_collect_diagnostics("'ST000000000000000000002AMW42H"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 30 + } + ); + if let Some(Value::Principal(data)) = stmts[0].match_atom_value() { + match data { + PrincipalData::Standard(data) => { + assert_eq!(data.to_address(), "ST000000000000000000002AMW42H") + } + _ => panic!("failed to parse principal"), + } + } + + let (stmts, diagnostics, success) = parse_collect_diagnostics("' u42"); + assert!(!success); + assert_eq!(stmts.len(), 2); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 1 + } + ); + assert_eq!(stmts[0].match_placeholder().unwrap(), "'"); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].message, "invalid principal literal"); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 1 + } + ); + match stmts[1].match_atom_value() { + Some(Value::UInt(42)) => (), + _ => panic!("failed to parse uint after principal"), + } + + let (stmts, diagnostics, success) = + parse_collect_diagnostics("'ST000000000000000000002AMW42H.silly-goose"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 42 + } + ); + if let Some(Value::Principal(data)) = stmts[0].match_atom_value() { + match data { + PrincipalData::Contract(data) => { + assert_eq!( + data.to_string(), + "ST000000000000000000002AMW42H.silly-goose" + ) + } + _ => panic!("failed to parse principal"), + } + } + assert!(diagnostics.is_empty()); + + let (stmts, diagnostics, success) = + parse_collect_diagnostics("'ST000000000000000000002AMW42H.123"); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].match_placeholder().unwrap(), + "'ST000000000000000000002AMW42H.123" + ); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].message, "expected contract identifier"); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 32, + end_line: 1, + end_column: 34 + } + ); + + let (stmts, diagnostics, success) = + parse_collect_diagnostics("'ST000000000000000000002AMW42H.illegal?name "); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].match_placeholder().unwrap(), + "'ST000000000000000000002AMW42H.illegal?name" + ); + assert_eq!(diagnostics.len(), 1); + assert_eq!( + diagnostics[0].message, + "Illegal contract name: 'illegal?name'" + ); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 32, + end_line: 1, + end_column: 43 + } + ); + + let (stmts, diagnostics, success) = + parse_collect_diagnostics("'ST000000000000000000002AMW42H.this-name-is-way-too-many-characters-to-be-a-legal-contract-name "); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!(stmts[0].match_placeholder().unwrap(), "'ST000000000000000000002AMW42H.this-name-is-way-too-many-characters-to-be-a-legal-contract-name"); + assert_eq!(diagnostics.len(), 1); + assert_eq!( + diagnostics[0].message, + "contract name 'this-name-is-way-too-many-characters-to-be-a-legal-contract-name' is too long" + ); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 32, + end_line: 1, + end_column: 95 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics(".fancy_pants"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 12 + } + ); + match &stmts[0].pre_expr { + PreSymbolicExpressionType::SugaredContractIdentifier(name) => { + assert_eq!(name.as_str(), "fancy_pants") + } + _ => panic!("failed to parse sugared contract identifier"), + } + assert!(diagnostics.is_empty()); + + let (stmts, diagnostics, success) = parse_collect_diagnostics(".123"); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!(stmts[0].match_placeholder().unwrap(), ".123"); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].message, "expected contract identifier"); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics(".illegal?name "); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!(stmts[0].match_placeholder().unwrap(), ".illegal?name"); + assert_eq!(diagnostics.len(), 1); + assert_eq!( + diagnostics[0].message, + "Illegal contract name: 'illegal?name'" + ); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 13 + } + ); + + let (stmts, diagnostics, success) = + parse_collect_diagnostics("'ST000000000000000000002AMW42H.foo.bar"); + assert!(success); + assert_eq!(stmts.len(), 1); + match &stmts[0].pre_expr { + PreSymbolicExpressionType::FieldIdentifier(trait_id) => { + assert_eq!( + format!("{}", trait_id), + "ST000000000000000000002AMW42H.foo.bar" + ); + } + _ => panic!("failed to parse field identifier"), + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 38 + } + ); + assert!(diagnostics.is_empty()); + + let (stmts, diagnostics, success) = + parse_collect_diagnostics("'ST000000000000000000002AMW42H.foo.123"); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].match_placeholder().unwrap(), + "'ST000000000000000000002AMW42H.foo.123" + ); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].message, "expected trait identifier"); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 36, + end_line: 1, + end_column: 38 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics(".foo.bar"); + assert!(success); + assert_eq!(stmts.len(), 1); + match &stmts[0].pre_expr { + PreSymbolicExpressionType::SugaredFieldIdentifier(contract_name, trait_name) => { + assert_eq!(contract_name.as_str(), "foo"); + assert_eq!(trait_name.as_str(), "bar"); + } + _ => panic!("failed to parse sugared trait identifier"), + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 8 + } + ); + assert!(diagnostics.is_empty()); + + let (stmts, diagnostics, success) = parse_collect_diagnostics(".foo.123"); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!(stmts[0].match_placeholder().unwrap(), ".foo.123"); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].message, "expected trait identifier"); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 8 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics( + ".this-name-is-way-too-many-characters-to-be-a-legal-contract-name", + ); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!( + stmts[0].match_placeholder().unwrap(), + ".this-name-is-way-too-many-characters-to-be-a-legal-contract-name" + ); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].message, "contract name 'this-name-is-way-too-many-characters-to-be-a-legal-contract-name' is too long"); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 65 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics(".foo.veryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylong"); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!(stmts[0].match_placeholder().unwrap(),".foo.veryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylong"); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].message, "illegal name (too long), 'veryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylong'"); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 137 + } + ); + } + + #[test] + fn test_parse_trait_reference() { + let (stmts, diagnostics, success) = parse_collect_diagnostics(""); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + if let Some(name) = stmts[0].match_trait_reference() { + assert_eq!(name.as_str(), "foo-bar"); + } else { + panic!("failed to parse trait reference"); + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 9 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("\n\t'"); + assert_eq!(diagnostics[1].level, Level::Note); + assert_eq!(diagnostics[1].message, "to match this '<'".to_string()); + assert_eq!( + diagnostics[1].spans[0], + Span { + start_line: 2, + start_column: 2, + end_line: 2, + end_column: 2 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("<123>"); + assert!(!success); + assert_eq!(stmts.len(), 2); + assert_eq!(diagnostics.len(), 2); + assert_eq!(diagnostics[0].message, "expected separator"); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 2 + } + ); + assert_eq!( + diagnostics[1].message, + "invalid character, '>', in int literal" + ); + assert_eq!( + diagnostics[1].spans[0], + Span { + start_line: 1, + start_column: 5, + end_line: 1, + end_column: 5 + } + ); + if let Some(name) = stmts[0].match_atom() { + assert_eq!(name.as_str(), "<"); + } else { + panic!("failed to parse invalid trait reference"); + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 1 + } + ); + if let Some(s) = stmts[1].match_placeholder() { + assert_eq!(s, "123>"); + } else { + panic!("failed to parse trait reference"); + } + assert_eq!( + stmts[1].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 5 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("<123 "); + assert!(!success); + assert_eq!(stmts.len(), 2); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].message, "expected separator"); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 2 + } + ); + if let Some(name) = stmts[0].match_atom() { + assert_eq!(name.as_str(), "<"); + } else { + panic!("failed to parse invalid trait reference"); + } + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 1 + } + ); + match stmts[1].match_atom_value() { + Some(Value::Int(123)) => (), + _ => panic!("failed to parse int with errors"), + } + assert_eq!( + stmts[1].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 4 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics(""); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!(diagnostics.len(), 1); + assert_eq!(diagnostics[0].message, "illegal name (too long), 'veryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryveryverylong'"); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 134 + } + ); + } + + #[test] + fn test_parse_ops() { + let (stmts, diagnostics, success) = parse_collect_diagnostics("(+ 1 2)"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 7 + } + ); + let exprs = stmts[0].match_list().unwrap(); + match &exprs[0].pre_expr { + PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), "+"), + _ => panic!("expected atom '+'"), + } + assert_eq!( + exprs[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 2 + } + ); + match exprs[1].match_atom_value() { + Some(Value::Int(1)) => (), + _ => panic!("failed to parse int in list"), + } + match exprs[2].match_atom_value() { + Some(Value::Int(2)) => (), + _ => panic!("failed to parse int in list"), + } + + let (stmts, diagnostics, success) = parse_collect_diagnostics("(- 1 2)"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + let exprs = stmts[0].match_list().unwrap(); + match &exprs[0].pre_expr { + PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), "-"), + _ => panic!("expected atom '-'"), + } + assert_eq!( + exprs[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 2 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("(* 1 2)"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + let exprs = stmts[0].match_list().unwrap(); + match &exprs[0].pre_expr { + PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), "*"), + _ => panic!("expected atom '*'"), + } + assert_eq!( + exprs[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 2 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("(/ 1 2)"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + let exprs = stmts[0].match_list().unwrap(); + match &exprs[0].pre_expr { + PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), "/"), + _ => panic!("expected atom '/'"), + } + assert_eq!( + exprs[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 2 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("(< 1 2)"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + let exprs = stmts[0].match_list().unwrap(); + match &exprs[0].pre_expr { + PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), "<"), + _ => panic!("expected atom '<'"), + } + assert_eq!( + exprs[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 2 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("(<= 1 2)"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + let exprs = stmts[0].match_list().unwrap(); + match &exprs[0].pre_expr { + PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), "<="), + _ => panic!("expected atom '<='"), + } + assert_eq!( + exprs[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 3 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("(> 1 2)"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + let exprs = stmts[0].match_list().unwrap(); + match &exprs[0].pre_expr { + PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), ">"), + _ => panic!("expected atom '>'"), + } + assert_eq!( + exprs[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 2 + } + ); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("(>= 1 2)"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + let exprs = stmts[0].match_list().unwrap(); + match &exprs[0].pre_expr { + PreSymbolicExpressionType::Atom(cname) => assert_eq!(cname.as_str(), ">="), + _ => panic!("expected atom '>='"), + } + assert_eq!( + exprs[0].span, + Span { + start_line: 1, + start_column: 2, + end_line: 1, + end_column: 3 + } + ); + } + + #[test] + fn test_parse_buffer() { + let (stmts, diagnostics, success) = parse_collect_diagnostics("0x1234"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + assert_eq!( + stmts[0].span, + Span { + start_line: 1, + start_column: 1, + end_line: 1, + end_column: 6 + } + ); + let val = stmts[0].match_atom_value().unwrap().clone(); + assert_eq!(val.expect_buff(2).unwrap(), vec![0x12, 0x34]); + } + + #[test] + fn test_parse_errors() { + let (stmts, diagnostics, success) = parse_collect_diagnostics("123 }"); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!(diagnostics.len(), 1); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 5, + end_line: 1, + end_column: 5 + } + ); + assert_eq!(diagnostics[0].message, "unexpected '}'"); + + let (stmts, diagnostics, success) = parse_collect_diagnostics("(foo))"); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!(diagnostics.len(), 1); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 6, + end_line: 1, + end_column: 6 + } + ); + assert_eq!(diagnostics[0].message, "unexpected ')'"); + } + + #[test] + fn test_lexer_diagnostics() { + let (stmts, diagnostics, success) = + parse_collect_diagnostics("(print \"newline\n in string\")"); + assert!(!success); + assert_eq!(stmts.len(), 1); + assert_eq!(diagnostics.len(), 3); + assert_eq!(diagnostics[0].message, "expected closing '\"'"); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 16, + end_line: 1, + end_column: 16 + } + ); + + assert_eq!(diagnostics[1].message, "to match this '\"'"); + assert_eq!(diagnostics[1].level, Level::Note); + assert_eq!( + diagnostics[1].spans[0], + Span { + start_line: 1, + start_column: 8, + end_line: 1, + end_column: 8 + } + ); + + // This last error is because it doesn't know what to do with the next line + assert_eq!( + diagnostics[2].message, + "invalid character, '\"', in identifier" + ); + } + + #[test] + fn test_consume_invalid_symbols() { + let (stmts, diagnostics, success) = + parse_collect_diagnostics(" # here is a python comment\n\n # and another\n(foo)"); + assert!(!success); + assert_eq!(stmts.len(), 10); + } + + #[test] + fn test_handle_comments() { + let (stmts, diagnostics, success) = + parse_collect_diagnostics(" ;; here is a comment\n\n ;; and another\n(foo)"); + assert!(success); + assert_eq!(stmts.len(), 3); + assert!(diagnostics.is_empty()); + assert_eq!(stmts[0].match_comment().unwrap(), "here is a comment"); + assert_eq!(stmts[1].match_comment().unwrap(), "and another"); + stmts[2].match_list().unwrap(); + } + + #[test] + fn test_comment_in_list() { + let (stmts, diagnostics, success) = parse_collect_diagnostics( + "(\n foo ;; comment after\n ;; comment on its own line\n bar\n)", + ); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + let exprs = stmts[0].match_list().unwrap(); + assert_eq!(exprs.len(), 4); + assert_eq!(exprs[0].match_atom().unwrap().as_str(), "foo"); + assert_eq!(exprs[1].match_comment().unwrap(), "comment after"); + assert_eq!(exprs[2].match_comment().unwrap(), "comment on its own line"); + assert_eq!(exprs[3].match_atom().unwrap().as_str(), "bar"); + } + + #[test] + fn test_comma_at_end() { + let (stmts, diagnostics, success) = parse_collect_diagnostics("{this: is, a:tuple,}"); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + + let (stmts, diagnostics, success) = parse_collect_diagnostics( + r#" +{ + and: so, + is: this, +}"#, + ); + assert!(success); + assert_eq!(stmts.len(), 1); + assert!(diagnostics.is_empty()); + } + + #[test] + fn test_missing_whitespace() { + let (stmts, diagnostics, success) = parse_collect_diagnostics("(foo(bar))"); + assert!(!success); + assert_eq!(stmts.len(), 1); + let exprs = stmts[0].match_list().unwrap(); + assert_eq!(exprs.len(), 2); + assert_eq!(diagnostics.len(), 1); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 5, + end_line: 1, + end_column: 9 + } + ); + assert_eq!( + diagnostics[0].message, + "expected whitespace before expression" + ); + } + + #[test] + fn test_parse_fail_fast() { + match parse("42g !ok") { + Ok(_) => panic!("fail_fast mode should have returned an error"), + Err(e) => assert_eq!(e.err, ParseErrors::Lexer(LexerError::InvalidCharInt('g'))), + } + } + + #[test] + fn test_empty_contract() { + let (stmts, diagnostics, success) = parse_collect_diagnostics(""); + assert!(success); + assert!(stmts.is_empty()); + assert!(diagnostics.is_empty()); + } + + #[test] + fn test_stack_depth() { + let stack_limit = + (AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) + 1) as usize; + let exceeds_stack_depth_tuple = format!( + "{}u1 {}", + "{ a : ".repeat(stack_limit / 2 + 1), + "} ".repeat(stack_limit / 2 + 1) + ); + let exceeds_stack_depth_list = format!( + "{}u1 {}", + "(list ".repeat(stack_limit + 1), + ")".repeat(stack_limit + 1) + ); + + assert!(match parse(&exceeds_stack_depth_list).unwrap_err().err { + ParseErrors::ExpressionStackDepthTooDeep => true, + x => panic!("expected a stack depth too deep error, got {:?}", x), + }); + + let (stmts, diagnostics, success) = parse_collect_diagnostics(&exceeds_stack_depth_list); + assert!(!success); + assert!(!diagnostics.is_empty()); + assert_eq!( + diagnostics[0].message, + "AST has too deep of an expression nesting. The maximum stack depth is 64" + ); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 421, + end_line: 1, + end_column: 421 + } + ); + + assert!(match parse(&exceeds_stack_depth_tuple).unwrap_err().err { + ParseErrors::ExpressionStackDepthTooDeep => true, + x => panic!("expected a stack depth too deep error, got {:?}", x), + }); + + let (stmts, diagnostics, success) = parse_collect_diagnostics(&exceeds_stack_depth_tuple); + assert!(!success); + assert!(!diagnostics.is_empty()); + assert_eq!( + diagnostics[0].message, + "AST has too deep of an expression nesting. The maximum stack depth is 64" + ); + assert_eq!(diagnostics[0].level, Level::Error); + assert_eq!( + diagnostics[0].spans[0], + Span { + start_line: 1, + start_column: 211, + end_line: 1, + end_column: 211 + } + ); + } +} diff --git a/clarity/src/vm/callables.rs b/clarity/src/vm/callables.rs index 0eaf29c1fc..a343f0fadb 100644 --- a/clarity/src/vm/callables.rs +++ b/clarity/src/vm/callables.rs @@ -15,7 +15,6 @@ // along with this program. If not, see . use std::collections::BTreeMap; -use std::fmt; use stacks_common::types::StacksEpochId; @@ -28,6 +27,8 @@ use crate::vm::contexts::ContractContext; use crate::vm::costs::cost_functions::ClarityCostFunction; use crate::vm::costs::runtime_cost; use crate::vm::errors::{check_argument_count, Error, InterpreterResult as Result}; +// Re-export the for retro-compatibility. +pub use crate::vm::representations::FunctionIdentifier; use crate::vm::representations::{ClarityName, SymbolicExpression}; use crate::vm::types::{ CallableData, ListData, ListTypeData, OptionalData, PrincipalData, ResponseData, SequenceData, @@ -118,17 +119,6 @@ pub fn cost_input_sized_vararg(args: &[Value]) -> Result { .map_err(Error::from) } -#[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)] -pub struct FunctionIdentifier { - identifier: String, -} - -impl fmt::Display for FunctionIdentifier { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.identifier) - } -} - impl DefinedFunction { pub fn new( arguments: Vec<(ClarityName, TypeSignature)>, @@ -393,18 +383,6 @@ impl CallableType { } } -impl FunctionIdentifier { - fn new_native_function(name: &str) -> FunctionIdentifier { - let identifier = format!("_native_:{}", name); - FunctionIdentifier { identifier } - } - - fn new_user_function(name: &str, context: &str) -> FunctionIdentifier { - let identifier = format!("{}:{}", context, name); - FunctionIdentifier { identifier } - } -} - // Implicitly cast principals to traits and traits to other traits as needed, // recursing into compound types. This function does not check for legality of // these casts, as that is done in the type-checker. Note: depth of recursion diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index 3b98c4b828..b9a7220474 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -35,6 +35,8 @@ use crate::vm::database::{ ClarityDatabase, DataMapMetadata, DataVariableMetadata, FungibleTokenMetadata, NonFungibleTokenMetadata, }; +// Re-export the for retro-compatibility. +pub use crate::vm::errors::StackTrace; use crate::vm::errors::{ CheckErrors, InterpreterError, InterpreterResult as Result, RuntimeErrorType, }; @@ -248,8 +250,6 @@ pub struct CallStack { apply_depth: usize, } -pub type StackTrace = Vec; - pub const TRANSIENT_CONTRACT_NAME: &str = "__transient"; impl Default for AssetMap { diff --git a/clarity/src/vm/core.rs b/clarity/src/vm/core.rs new file mode 100644 index 0000000000..13f4485378 --- /dev/null +++ b/clarity/src/vm/core.rs @@ -0,0 +1,678 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::BTreeMap; + +use serde_json; +use stacks_common::types::StacksEpochId; + +pub use crate::vm::analysis::ContractAnalysis; +#[cfg(any(test, feature = "testing"))] +use crate::vm::ast; +pub use crate::vm::ast::ContractAST; +use crate::vm::callables::CallableType; +pub use crate::vm::contexts::{ + CallStack, ContractContext, Environment, ExecutionTimeTracker, GlobalContext, LocalContext, + MAX_CONTEXT_DEPTH, +}; +pub use crate::vm::costs::cost_functions::ClarityCostFunction; +pub use crate::vm::costs::{ + runtime_cost, CostErrors, CostOverflowingMath, CostTracker, ExecutionCost, LimitedCostTracker, + MemoryConsumer, +}; +// publish the non-generic StacksEpoch form for use throughout module +pub use crate::vm::database::clarity_db::StacksEpoch; +use crate::vm::diagnostic::Diagnostic; +pub use crate::vm::errors::{ + CheckErrors, Error, InterpreterError, InterpreterResult as Result, RuntimeErrorType, +}; +use crate::vm::functions::define::DefineResult; +pub use crate::vm::functions::stx_transfer_consolidated; +use crate::vm::{ + functions, variables, ClarityVersion, PrincipalData, SymbolicExpression, TypeSignature, Value, + MAX_CALL_STACK_DEPTH, +}; + +#[derive(Debug, Clone)] +pub struct ParsedContract { + pub contract_identifier: String, + pub code: String, + pub function_args: BTreeMap>, + pub ast: ContractAST, + pub analysis: ContractAnalysis, +} + +#[derive(Debug, Clone)] +pub struct ContractEvaluationResult { + pub result: Option, + pub contract: ParsedContract, +} + +#[derive(Debug, Clone)] +pub struct SnippetEvaluationResult { + pub result: Value, +} + +#[derive(Debug, Clone)] +#[allow(clippy::large_enum_variant)] +pub enum EvaluationResult { + Contract(ContractEvaluationResult), + Snippet(SnippetEvaluationResult), +} + +#[derive(Debug, Clone)] +pub struct ExecutionResult { + pub result: EvaluationResult, + pub events: Vec, + pub cost: Option, + pub diagnostics: Vec, +} + +#[derive(Clone, Debug)] +pub struct CostSynthesis { + pub total: ExecutionCost, + pub limit: ExecutionCost, + pub memory: u64, + pub memory_limit: u64, +} + +impl CostSynthesis { + pub fn from_cost_tracker(cost_tracker: &LimitedCostTracker) -> CostSynthesis { + CostSynthesis { + total: cost_tracker.get_total(), + limit: cost_tracker.get_limit(), + memory: cost_tracker.get_memory(), + memory_limit: cost_tracker.get_memory_limit(), + } + } +} + +/// EvalHook defines an interface for hooks to execute during evaluation. +pub trait EvalHook { + // Called before the expression is evaluated + fn will_begin_eval( + &mut self, + _env: &mut Environment, + _context: &LocalContext, + _expr: &SymbolicExpression, + ); + + // Called after the expression is evaluated + fn did_finish_eval( + &mut self, + _env: &mut Environment, + _context: &LocalContext, + _expr: &SymbolicExpression, + _res: &core::result::Result, + ); + + // Called upon completion of the execution + fn did_complete(&mut self, _result: core::result::Result<&mut ExecutionResult, String>); +} + +fn lookup_variable(name: &str, context: &LocalContext, env: &mut Environment) -> Result { + if name.starts_with(char::is_numeric) || name.starts_with('\'') { + Err(InterpreterError::BadSymbolicRepresentation(format!( + "Unexpected variable name: {}", + name + )) + .into()) + } else if let Some(value) = variables::lookup_reserved_variable(name, context, env)? { + Ok(value) + } else { + runtime_cost( + ClarityCostFunction::LookupVariableDepth, + env, + context.depth(), + )?; + if let Some(value) = context.lookup_variable(name) { + runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; + Ok(value.clone()) + } else if let Some(value) = env.contract_context.lookup_variable(name).cloned() { + runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; + let (value, _) = + Value::sanitize_value(env.epoch(), &TypeSignature::type_of(&value)?, value) + .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; + Ok(value) + } else if let Some(callable_data) = context.lookup_callable_contract(name) { + if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity2 { + Ok(callable_data.contract_identifier.clone().into()) + } else { + Ok(Value::CallableContract(callable_data.clone())) + } + } else { + Err(CheckErrors::UndefinedVariable(name.to_string()).into()) + } + } +} + +pub fn lookup_function(name: &str, env: &mut Environment) -> Result { + runtime_cost(ClarityCostFunction::LookupFunction, env, 0)?; + + if let Some(result) = + functions::lookup_reserved_functions(name, env.contract_context.get_clarity_version()) + { + Ok(result) + } else { + let user_function = env + .contract_context + .lookup_function(name) + .ok_or(CheckErrors::UndefinedFunction(name.to_string()))?; + Ok(CallableType::UserFunction(user_function)) + } +} + +fn add_stack_trace(result: &mut Result, env: &Environment) { + if let Err(Error::Runtime(_, ref mut stack_trace)) = result { + if stack_trace.is_none() { + stack_trace.replace(env.call_stack.make_stack_trace()); + } + } +} + +pub fn apply( + function: &CallableType, + args: &[SymbolicExpression], + env: &mut Environment, + context: &LocalContext, +) -> Result { + let identifier = function.get_identifier(); + // Aaron: in non-debug executions, we shouldn't track a full call-stack. + // only enough to do recursion detection. + + // do recursion check on user functions. + let track_recursion = matches!(function, CallableType::UserFunction(_)); + if track_recursion && env.call_stack.contains(&identifier) { + return Err(CheckErrors::CircularReference(vec![identifier.to_string()]).into()); + } + + if env.call_stack.depth() >= MAX_CALL_STACK_DEPTH { + return Err(RuntimeErrorType::MaxStackDepthReached.into()); + } + + if let CallableType::SpecialFunction(_, function) = function { + env.call_stack.insert(&identifier, track_recursion); + let mut resp = function(args, env, context); + add_stack_trace(&mut resp, env); + env.call_stack.remove(&identifier, track_recursion)?; + resp + } else { + let mut used_memory = 0; + let mut evaluated_args = Vec::with_capacity(args.len()); + env.call_stack.incr_apply_depth(); + for arg_x in args.iter() { + let arg_value = match eval(arg_x, env, context) { + Ok(x) => x, + Err(e) => { + env.drop_memory(used_memory)?; + env.call_stack.decr_apply_depth(); + return Err(e); + } + }; + let arg_use = arg_value.get_memory_use()?; + match env.add_memory(arg_use) { + Ok(_x) => {} + Err(e) => { + env.drop_memory(used_memory)?; + env.call_stack.decr_apply_depth(); + return Err(Error::from(e)); + } + }; + used_memory += arg_value.get_memory_use()?; + evaluated_args.push(arg_value); + } + env.call_stack.decr_apply_depth(); + + env.call_stack.insert(&identifier, track_recursion); + let mut resp = match function { + CallableType::NativeFunction(_, function, cost_function) => { + runtime_cost(*cost_function, env, evaluated_args.len()) + .map_err(Error::from) + .and_then(|_| function.apply(evaluated_args, env)) + } + CallableType::NativeFunction205(_, function, cost_function, cost_input_handle) => { + let cost_input = if env.epoch() >= &StacksEpochId::Epoch2_05 { + cost_input_handle(evaluated_args.as_slice())? + } else { + evaluated_args.len() as u64 + }; + runtime_cost(*cost_function, env, cost_input) + .map_err(Error::from) + .and_then(|_| function.apply(evaluated_args, env)) + } + CallableType::UserFunction(function) => function.apply(&evaluated_args, env), + _ => return Err(InterpreterError::Expect("Should be unreachable.".into()).into()), + }; + add_stack_trace(&mut resp, env); + env.drop_memory(used_memory)?; + env.call_stack.remove(&identifier, track_recursion)?; + resp + } +} + +fn check_max_execution_time_expired(global_context: &GlobalContext) -> Result<()> { + match global_context.execution_time_tracker { + ExecutionTimeTracker::NoTracking => Ok(()), + ExecutionTimeTracker::MaxTime { + start_time, + max_duration, + } => { + if start_time.elapsed() >= max_duration { + Err(CostErrors::ExecutionTimeExpired.into()) + } else { + Ok(()) + } + } + } +} + +pub fn eval( + exp: &SymbolicExpression, + env: &mut Environment, + context: &LocalContext, +) -> Result { + use crate::vm::representations::SymbolicExpressionType::{ + Atom, AtomValue, Field, List, LiteralValue, TraitReference, + }; + + check_max_execution_time_expired(env.global_context)?; + + if let Some(mut eval_hooks) = env.global_context.eval_hooks.take() { + for hook in eval_hooks.iter_mut() { + hook.will_begin_eval(env, context, exp); + } + env.global_context.eval_hooks = Some(eval_hooks); + } + + let res = match exp.expr { + AtomValue(ref value) | LiteralValue(ref value) => Ok(value.clone()), + Atom(ref value) => lookup_variable(value, context, env), + List(ref children) => { + let (function_variable, rest) = children + .split_first() + .ok_or(CheckErrors::NonFunctionApplication)?; + + let function_name = function_variable + .match_atom() + .ok_or(CheckErrors::BadFunctionName)?; + let f = lookup_function(function_name, env)?; + apply(&f, rest, env, context) + } + TraitReference(_, _) | Field(_) => { + return Err(InterpreterError::BadSymbolicRepresentation( + "Unexpected trait reference".into(), + ) + .into()) + } + }; + + if let Some(mut eval_hooks) = env.global_context.eval_hooks.take() { + for hook in eval_hooks.iter_mut() { + hook.did_finish_eval(env, context, exp, &res); + } + env.global_context.eval_hooks = Some(eval_hooks); + } + + res +} + +pub fn is_reserved(name: &str, version: &ClarityVersion) -> bool { + functions::lookup_reserved_functions(name, version).is_some() + || variables::is_reserved_name(name, version) +} + +/// This function evaluates a list of expressions, sharing a global context. +/// It returns the final evaluated result. +/// Used for the initialization of a new contract. +pub fn eval_all( + expressions: &[SymbolicExpression], + contract_context: &mut ContractContext, + global_context: &mut GlobalContext, + sponsor: Option, +) -> Result> { + let mut last_executed = None; + let context = LocalContext::new(); + let mut total_memory_use = 0; + + let publisher: PrincipalData = contract_context.contract_identifier.issuer.clone().into(); + + finally_drop_memory!(global_context, total_memory_use; { + for exp in expressions { + let try_define = global_context.execute(|context| { + let mut call_stack = CallStack::new(); + let mut env = Environment::new( + context, contract_context, &mut call_stack, Some(publisher.clone()), Some(publisher.clone()), sponsor.clone()); + functions::define::evaluate_define(exp, &mut env) + })?; + match try_define { + DefineResult::Variable(name, value) => { + runtime_cost(ClarityCostFunction::BindName, global_context, 0)?; + let value_memory_use = value.get_memory_use()?; + global_context.add_memory(value_memory_use)?; + total_memory_use += value_memory_use; + + contract_context.variables.insert(name, value); + }, + DefineResult::Function(name, value) => { + runtime_cost(ClarityCostFunction::BindName, global_context, 0)?; + + contract_context.functions.insert(name, value); + }, + DefineResult::PersistedVariable(name, value_type, value) => { + runtime_cost(ClarityCostFunction::CreateVar, global_context, value_type.size()?)?; + contract_context.persisted_names.insert(name.clone()); + + global_context.add_memory(value_type.type_size() + .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; + + global_context.add_memory(value.size()? as u64)?; + + let data_type = global_context.database.create_variable(&contract_context.contract_identifier, &name, value_type)?; + global_context.database.set_variable(&contract_context.contract_identifier, &name, value, &data_type, &global_context.epoch_id)?; + + contract_context.meta_data_var.insert(name, data_type); + }, + DefineResult::Map(name, key_type, value_type) => { + runtime_cost(ClarityCostFunction::CreateMap, global_context, + u64::from(key_type.size()?).cost_overflow_add( + u64::from(value_type.size()?))?)?; + contract_context.persisted_names.insert(name.clone()); + + global_context.add_memory(key_type.type_size() + .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; + global_context.add_memory(value_type.type_size() + .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; + + let data_type = global_context.database.create_map(&contract_context.contract_identifier, &name, key_type, value_type)?; + + contract_context.meta_data_map.insert(name, data_type); + }, + DefineResult::FungibleToken(name, total_supply) => { + runtime_cost(ClarityCostFunction::CreateFt, global_context, 0)?; + contract_context.persisted_names.insert(name.clone()); + + global_context.add_memory(TypeSignature::UIntType.type_size() + .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; + + let data_type = global_context.database.create_fungible_token(&contract_context.contract_identifier, &name, &total_supply)?; + + contract_context.meta_ft.insert(name, data_type); + }, + DefineResult::NonFungibleAsset(name, asset_type) => { + runtime_cost(ClarityCostFunction::CreateNft, global_context, asset_type.size()?)?; + contract_context.persisted_names.insert(name.clone()); + + global_context.add_memory(asset_type.type_size() + .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; + + let data_type = global_context.database.create_non_fungible_token(&contract_context.contract_identifier, &name, &asset_type)?; + + contract_context.meta_nft.insert(name, data_type); + }, + DefineResult::Trait(name, trait_type) => { + contract_context.defined_traits.insert(name, trait_type); + }, + DefineResult::UseTrait(_name, _trait_identifier) => {}, + DefineResult::ImplTrait(trait_identifier) => { + contract_context.implemented_traits.insert(trait_identifier); + }, + DefineResult::NoDefine => { + // not a define function, evaluate normally. + global_context.execute(|global_context| { + let mut call_stack = CallStack::new(); + let mut env = Environment::new( + global_context, contract_context, &mut call_stack, Some(publisher.clone()), Some(publisher.clone()), sponsor.clone()); + + let result = eval(exp, &mut env, &context)?; + last_executed = Some(result); + Ok(()) + })?; + } + } + } + + contract_context.data_size = total_memory_use; + Ok(last_executed) + }) +} + +/// Run provided program in a brand new environment, with a transient, empty +/// database. Only used for testing +/// This method executes the program in Epoch 2.0 *and* Epoch 2.05 and asserts +/// that the result is the same before returning the result +#[cfg(any(test, feature = "testing"))] +pub fn execute_on_network(program: &str, use_mainnet: bool) -> Result> { + use crate::vm::ast; + + let epoch_200_result = execute_with_parameters( + program, + ClarityVersion::Clarity2, + StacksEpochId::Epoch20, + ast::ASTRules::PrecheckSize, + use_mainnet, + ); + let epoch_205_result = execute_with_parameters( + program, + ClarityVersion::Clarity2, + StacksEpochId::Epoch2_05, + ast::ASTRules::PrecheckSize, + use_mainnet, + ); + + assert_eq!( + epoch_200_result, epoch_205_result, + "Epoch 2.0 and 2.05 should have same execution result, but did not for program `{}`", + program + ); + epoch_205_result +} + +/// Runs `program` in a test environment with the provided parameters. +#[cfg(any(test, feature = "testing"))] +pub fn execute_with_parameters_and_call_in_global_context( + program: &str, + clarity_version: ClarityVersion, + epoch: StacksEpochId, + ast_rules: ast::ASTRules, + use_mainnet: bool, + mut global_context_function: F, +) -> Result> +where + F: FnMut(&mut GlobalContext) -> Result<()>, +{ + use crate::vm::database::MemoryBackingStore; + use crate::vm::tests::test_only_mainnet_to_chain_id; + use crate::vm::types::QualifiedContractIdentifier; + + let contract_id = QualifiedContractIdentifier::transient(); + let mut contract_context = ContractContext::new(contract_id.clone(), clarity_version); + let mut marf = MemoryBackingStore::new(); + let conn = marf.as_clarity_db(); + let chain_id = test_only_mainnet_to_chain_id(use_mainnet); + let mut global_context = GlobalContext::new( + use_mainnet, + chain_id, + conn, + LimitedCostTracker::new_free(), + epoch, + ); + global_context.execute(|g| { + use crate::vm::ast; + + global_context_function(g)?; + let parsed = ast::build_ast_with_rules( + &contract_id, + program, + &mut (), + clarity_version, + epoch, + ast_rules, + )? + .expressions; + eval_all(&parsed, &mut contract_context, g, None) + }) +} + +#[cfg(any(test, feature = "testing"))] +pub fn execute_with_parameters( + program: &str, + clarity_version: ClarityVersion, + epoch: StacksEpochId, + ast_rules: ast::ASTRules, + use_mainnet: bool, +) -> Result> { + execute_with_parameters_and_call_in_global_context( + program, + clarity_version, + epoch, + ast_rules, + use_mainnet, + |_| Ok(()), + ) +} + +/// Execute for test with `version`, Epoch20, testnet. +#[cfg(any(test, feature = "testing"))] +pub fn execute_against_version(program: &str, version: ClarityVersion) -> Result> { + execute_with_parameters( + program, + version, + StacksEpochId::Epoch20, + ast::ASTRules::PrecheckSize, + false, + ) +} + +/// Execute for test in Clarity1, Epoch20, testnet. +#[cfg(any(test, feature = "testing"))] +pub fn execute(program: &str) -> Result> { + execute_with_parameters( + program, + ClarityVersion::Clarity1, + StacksEpochId::Epoch20, + ast::ASTRules::PrecheckSize, + false, + ) +} + +/// Execute for test in Clarity1, Epoch20, testnet. +#[cfg(any(test, feature = "testing"))] +pub fn execute_with_limited_execution_time( + program: &str, + max_execution_time: std::time::Duration, +) -> Result> { + execute_with_parameters_and_call_in_global_context( + program, + ClarityVersion::Clarity1, + StacksEpochId::Epoch20, + ast::ASTRules::PrecheckSize, + false, + |g| { + g.set_max_execution_time(max_execution_time); + Ok(()) + }, + ) +} + +/// Execute for test in Clarity2, Epoch21, testnet. +#[cfg(any(test, feature = "testing"))] +pub fn execute_v2(program: &str) -> Result> { + execute_with_parameters( + program, + ClarityVersion::Clarity2, + StacksEpochId::Epoch21, + ast::ASTRules::PrecheckSize, + false, + ) +} + +#[cfg(test)] +mod test { + use stacks_common::consts::CHAIN_ID_TESTNET; + use stacks_common::types::StacksEpochId; + + use super::ClarityVersion; + use crate::vm::callables::{DefineType, DefinedFunction}; + use crate::vm::costs::LimitedCostTracker; + use crate::vm::database::MemoryBackingStore; + use crate::vm::types::{QualifiedContractIdentifier, TypeSignature}; + use crate::vm::{ + eval, CallStack, ContractContext, Environment, GlobalContext, LocalContext, + SymbolicExpression, Value, + }; + + #[test] + fn test_simple_user_function() { + // + // test program: + // (define (do_work x) (+ 5 x)) + // (define a 59) + // (do_work a) + // + let content = [SymbolicExpression::list(vec![ + SymbolicExpression::atom("do_work".into()), + SymbolicExpression::atom("a".into()), + ])]; + + let func_body = SymbolicExpression::list(vec![ + SymbolicExpression::atom("+".into()), + SymbolicExpression::atom_value(Value::Int(5)), + SymbolicExpression::atom("x".into()), + ]); + + let func_args = vec![("x".into(), TypeSignature::IntType)]; + let user_function = DefinedFunction::new( + func_args, + func_body, + DefineType::Private, + &"do_work".into(), + "", + ); + + let context = LocalContext::new(); + let mut contract_context = ContractContext::new( + QualifiedContractIdentifier::transient(), + ClarityVersion::Clarity1, + ); + + let mut marf = MemoryBackingStore::new(); + let mut global_context = GlobalContext::new( + false, + CHAIN_ID_TESTNET, + marf.as_clarity_db(), + LimitedCostTracker::new_free(), + StacksEpochId::Epoch2_05, + ); + + contract_context + .variables + .insert("a".into(), Value::Int(59)); + contract_context + .functions + .insert("do_work".into(), user_function); + + let mut call_stack = CallStack::new(); + let mut env = Environment::new( + &mut global_context, + &contract_context, + &mut call_stack, + None, + None, + None, + ); + assert_eq!(Ok(Value::Int(64)), eval(&content[0], &mut env, &context)); + } +} diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 0b1559795f..e54d8e9a66 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -14,396 +14,31 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{cmp, fmt}; +use std::fmt; -use costs_1::Costs1; -use costs_2::Costs2; -use costs_2_testnet::Costs2Testnet; -use costs_3::Costs3; -use hashbrown::HashMap; -use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; -use stacks_common::types::StacksEpochId; - -use super::errors::{CheckErrors, RuntimeErrorType}; -use crate::boot_util::boot_code_id; -use crate::vm::contexts::{ContractContext, GlobalContext}; -use crate::vm::costs::cost_functions::ClarityCostFunction; -use crate::vm::database::clarity_store::NullBackingStore; -use crate::vm::database::ClarityDatabase; -use crate::vm::errors::InterpreterResult; -use crate::vm::types::signatures::FunctionType::Fixed; -use crate::vm::types::signatures::TupleTypeSignature; -use crate::vm::types::Value::UInt; -use crate::vm::types::{ - FunctionType, PrincipalData, QualifiedContractIdentifier, TupleData, TypeSignature, -}; -use crate::vm::{CallStack, ClarityName, Environment, LocalContext, SymbolicExpression, Value}; +#[cfg(feature = "vm")] pub mod constants; pub mod cost_functions; +#[cfg(feature = "vm")] #[allow(unused_variables)] pub mod costs_1; +#[cfg(feature = "vm")] #[allow(unused_variables)] pub mod costs_2; +#[cfg(feature = "vm")] #[allow(unused_variables)] pub mod costs_2_testnet; +#[cfg(feature = "vm")] #[allow(unused_variables)] pub mod costs_3; -type Result = std::result::Result; - -pub const CLARITY_MEMORY_LIMIT: u64 = 100 * 1000 * 1000; - -// TODO: factor out into a boot lib? -pub const COSTS_1_NAME: &str = "costs"; -pub const COSTS_2_NAME: &str = "costs-2"; -pub const COSTS_3_NAME: &str = "costs-3"; - -lazy_static! { - static ref COST_TUPLE_TYPE_SIGNATURE: TypeSignature = { - #[allow(clippy::expect_used)] - TypeSignature::TupleType( - TupleTypeSignature::try_from(vec![ - ("runtime".into(), TypeSignature::UIntType), - ("write_length".into(), TypeSignature::UIntType), - ("write_count".into(), TypeSignature::UIntType), - ("read_count".into(), TypeSignature::UIntType), - ("read_length".into(), TypeSignature::UIntType), - ]) - .expect("BUG: failed to construct type signature for cost tuple"), - ) - }; -} - -pub fn runtime_cost, C: CostTracker>( - cost_function: ClarityCostFunction, - tracker: &mut C, - input: T, -) -> Result<()> { - let size: u64 = input.try_into().map_err(|_| CostErrors::CostOverflow)?; - let cost = tracker.compute_cost(cost_function, &[size])?; - - tracker.add_cost(cost) -} - -macro_rules! finally_drop_memory { - ( $env: expr, $used_mem:expr; $exec:expr ) => {{ - let result = (|| $exec)(); - $env.drop_memory($used_mem)?; - result - }}; -} - -pub fn analysis_typecheck_cost( - track: &mut T, - t1: &TypeSignature, - t2: &TypeSignature, -) -> Result<()> { - let t1_size = t1.type_size().map_err(|_| CostErrors::CostOverflow)?; - let t2_size = t2.type_size().map_err(|_| CostErrors::CostOverflow)?; - let cost = track.compute_cost( - ClarityCostFunction::AnalysisTypeCheck, - &[cmp::max(t1_size, t2_size) as u64], - )?; - track.add_cost(cost) -} - -pub trait MemoryConsumer { - fn get_memory_use(&self) -> Result; -} - -impl MemoryConsumer for Value { - fn get_memory_use(&self) -> Result { - Ok(self - .size() - .map_err(|_| CostErrors::InterpreterFailure)? - .into()) - } -} - -pub trait CostTracker { - fn compute_cost( - &mut self, - cost_function: ClarityCostFunction, - input: &[u64], - ) -> Result; - fn add_cost(&mut self, cost: ExecutionCost) -> Result<()>; - fn add_memory(&mut self, memory: u64) -> Result<()>; - fn drop_memory(&mut self, memory: u64) -> Result<()>; - fn reset_memory(&mut self); - /// Check if the given contract-call should be short-circuited. - /// If so: this charges the cost to the CostTracker, and return true - /// If not: return false - fn short_circuit_contract_call( - &mut self, - contract: &QualifiedContractIdentifier, - function: &ClarityName, - input: &[u64], - ) -> Result; -} - -// Don't track! -impl CostTracker for () { - fn compute_cost( - &mut self, - _cost_function: ClarityCostFunction, - _input: &[u64], - ) -> std::result::Result { - Ok(ExecutionCost::ZERO) - } - fn add_cost(&mut self, _cost: ExecutionCost) -> std::result::Result<(), CostErrors> { - Ok(()) - } - fn add_memory(&mut self, _memory: u64) -> std::result::Result<(), CostErrors> { - Ok(()) - } - fn drop_memory(&mut self, _memory: u64) -> Result<()> { - Ok(()) - } - fn reset_memory(&mut self) {} - fn short_circuit_contract_call( - &mut self, - _contract: &QualifiedContractIdentifier, - _function: &ClarityName, - _input: &[u64], - ) -> Result { - Ok(false) - } -} - -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] -pub struct ClarityCostFunctionReference { - pub contract_id: QualifiedContractIdentifier, - pub function_name: String, -} - -impl ::std::fmt::Display for ClarityCostFunctionReference { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "{}.{}", &self.contract_id, &self.function_name) - } -} - -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq, Copy)] -pub enum DefaultVersion { - Costs1, - Costs2, - Costs2Testnet, - Costs3, -} - -impl DefaultVersion { - pub fn evaluate( - &self, - cost_function_ref: &ClarityCostFunctionReference, - f: &ClarityCostFunction, - input: &[u64], - ) -> Result { - let n = input.first().ok_or_else(|| { - CostErrors::Expect("Default cost function supplied with 0 args".into()) - })?; - let r = match self { - DefaultVersion::Costs1 => f.eval::(*n), - DefaultVersion::Costs2 => f.eval::(*n), - DefaultVersion::Costs2Testnet => f.eval::(*n), - DefaultVersion::Costs3 => f.eval::(*n), - }; - r.map_err(|e| { - let e = match e { - crate::vm::errors::Error::Runtime(RuntimeErrorType::NotImplemented, _) => { - CheckErrors::UndefinedFunction(cost_function_ref.function_name.clone()).into() - } - other => other, - }; - - CostErrors::CostComputationFailed(format!( - "Error evaluating result of cost function {cost_function_ref}: {e}", - )) - }) - } -} - -impl DefaultVersion { - pub fn try_from( - mainnet: bool, - value: &QualifiedContractIdentifier, - ) -> std::result::Result { - if !value.is_boot() { - return Err("Not a boot contract".into()); - } - if value.name.as_str() == COSTS_1_NAME { - Ok(Self::Costs1) - } else if value.name.as_str() == COSTS_2_NAME { - if mainnet { - Ok(Self::Costs2) - } else { - Ok(Self::Costs2Testnet) - } - } else if value.name.as_str() == COSTS_3_NAME { - Ok(Self::Costs3) - } else { - Err(format!("Unknown default contract {}", &value.name)) - } - } -} - -#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] -pub enum ClarityCostFunctionEvaluator { - Default( - ClarityCostFunctionReference, - ClarityCostFunction, - DefaultVersion, - ), - Clarity(ClarityCostFunctionReference), -} - -impl ClarityCostFunctionReference { - fn new(id: QualifiedContractIdentifier, name: String) -> ClarityCostFunctionReference { - ClarityCostFunctionReference { - contract_id: id, - function_name: name, - } - } -} - -#[derive(Debug, Clone)] -pub struct CostStateSummary { - pub contract_call_circuits: - HashMap<(QualifiedContractIdentifier, ClarityName), ClarityCostFunctionReference>, - pub cost_function_references: HashMap, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -struct SerializedCostStateSummary { - contract_call_circuits: Vec<( - (QualifiedContractIdentifier, ClarityName), - ClarityCostFunctionReference, - )>, - cost_function_references: Vec<(ClarityCostFunction, ClarityCostFunctionReference)>, -} - -impl From for SerializedCostStateSummary { - fn from(other: CostStateSummary) -> SerializedCostStateSummary { - let CostStateSummary { - contract_call_circuits, - cost_function_references, - } = other; - SerializedCostStateSummary { - contract_call_circuits: contract_call_circuits.into_iter().collect(), - cost_function_references: cost_function_references.into_iter().collect(), - } - } -} - -impl From for CostStateSummary { - fn from(other: SerializedCostStateSummary) -> CostStateSummary { - let SerializedCostStateSummary { - contract_call_circuits, - cost_function_references, - } = other; - CostStateSummary { - contract_call_circuits: contract_call_circuits.into_iter().collect(), - cost_function_references: cost_function_references.into_iter().collect(), - } - } -} - -impl CostStateSummary { - pub fn empty() -> CostStateSummary { - CostStateSummary { - contract_call_circuits: HashMap::new(), - cost_function_references: HashMap::new(), - } - } -} - -#[derive(Clone)] -/// This struct holds all of the data required for non-free LimitedCostTracker instances -pub struct TrackerData { - cost_function_references: HashMap<&'static ClarityCostFunction, ClarityCostFunctionEvaluator>, - cost_contracts: HashMap, - contract_call_circuits: - HashMap<(QualifiedContractIdentifier, ClarityName), ClarityCostFunctionReference>, - total: ExecutionCost, - limit: ExecutionCost, - memory: u64, - memory_limit: u64, - /// if the cost tracker is non-free, this holds the StacksEpochId that should be used to evaluate - /// the Clarity cost functions. If the tracker *is* free, then those functions do not need to be - /// evaluated, so no epoch identifier is necessary. - pub epoch: StacksEpochId, - mainnet: bool, - chain_id: u32, -} - -#[derive(Clone)] -#[allow(clippy::large_enum_variant)] -pub enum LimitedCostTracker { - Limited(TrackerData), - Free, -} - -#[cfg(any(test, feature = "testing"))] -impl LimitedCostTracker { - pub fn contract_call_circuits( - &self, - ) -> HashMap<(QualifiedContractIdentifier, ClarityName), ClarityCostFunctionReference> { - match self { - Self::Free => panic!("Cannot get contract call circuits on free tracker"), - Self::Limited(TrackerData { - ref contract_call_circuits, - .. - }) => contract_call_circuits.clone(), - } - } - pub fn cost_function_references( - &self, - ) -> HashMap<&'static ClarityCostFunction, ClarityCostFunctionEvaluator> { - match self { - Self::Free => panic!("Cannot get cost function references on free tracker"), - Self::Limited(TrackerData { - ref cost_function_references, - .. - }) => cost_function_references.clone(), - } - } -} - -impl fmt::Debug for LimitedCostTracker { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Free => f.debug_struct("LimitedCostTracker::Free").finish(), - Self::Limited(TrackerData { - total, - limit, - memory, - memory_limit, - .. - }) => f - .debug_struct("LimitedCostTracker") - .field("total", total) - .field("limit", limit) - .field("memory", memory) - .field("memory_limit", memory_limit) - .finish(), - } - } -} - -impl PartialEq for LimitedCostTracker { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Self::Free, Self::Free) => true, - (Self::Limited(self_data), Self::Limited(other_data)) => { - self_data.total == other_data.total - && other_data.limit == self_data.limit - && self_data.memory == other_data.memory - && self_data.memory_limit == other_data.memory_limit - } - (_, _) => false, - } - } -} +#[cfg(feature = "vm")] +#[macro_use] +pub mod tracker; +#[cfg(feature = "vm")] +pub use tracker::*; #[derive(Debug, PartialEq, Eq)] pub enum CostErrors { @@ -423,843 +58,6 @@ impl CostErrors { } } -fn load_state_summary(mainnet: bool, clarity_db: &mut ClarityDatabase) -> Result { - let cost_voting_contract = boot_code_id("cost-voting", mainnet); - - let clarity_epoch = clarity_db - .get_clarity_epoch_version() - .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; - let last_processed_at = match clarity_db.get_value( - "vm-costs::last-processed-at-height", - &TypeSignature::UIntType, - &clarity_epoch, - ) { - Ok(Some(v)) => u32::try_from( - v.value - .expect_u128() - .map_err(|_| CostErrors::InterpreterFailure)?, - ) - .map_err(|_| CostErrors::InterpreterFailure)?, - Ok(None) => return Ok(CostStateSummary::empty()), - Err(e) => return Err(CostErrors::CostComputationFailed(e.to_string())), - }; - - let metadata_result = clarity_db - .fetch_metadata_manual::( - last_processed_at, - &cost_voting_contract, - "::state_summary", - ) - .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; - let serialized: SerializedCostStateSummary = match metadata_result { - Some(serialized) => { - serde_json::from_str(&serialized).map_err(|_| CostErrors::InterpreterFailure)? - } - None => return Ok(CostStateSummary::empty()), - }; - Ok(CostStateSummary::from(serialized)) -} - -fn store_state_summary( - mainnet: bool, - clarity_db: &mut ClarityDatabase, - to_store: &CostStateSummary, -) -> Result<()> { - let block_height = clarity_db.get_current_block_height(); - let cost_voting_contract = boot_code_id("cost-voting", mainnet); - let epoch = clarity_db - .get_clarity_epoch_version() - .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; - clarity_db - .put_value( - "vm-costs::last-processed-at-height", - Value::UInt(block_height as u128), - &epoch, - ) - .map_err(|_e| CostErrors::CostContractLoadFailure)?; - let serialized_summary = - serde_json::to_string(&SerializedCostStateSummary::from(to_store.clone())) - .map_err(|_| CostErrors::InterpreterFailure)?; - clarity_db - .set_metadata( - &cost_voting_contract, - "::state_summary", - &serialized_summary, - ) - .map_err(|e| CostErrors::Expect(e.to_string()))?; - - Ok(()) -} - -/// -/// This method loads a cost state summary structure from the currently open stacks chain tip -/// In doing so, it reads from the cost-voting contract to find any newly confirmed proposals, -/// checks those proposals for validity, and then applies those changes to the cached set -/// of cost functions. -/// -/// `apply_updates` - tells this function to look for any changes in the cost voting contract -/// which would need to be applied. if `false`, just load the last computed cost state in this -/// fork. -/// -fn load_cost_functions( - mainnet: bool, - clarity_db: &mut ClarityDatabase, - apply_updates: bool, -) -> Result { - let clarity_epoch = clarity_db - .get_clarity_epoch_version() - .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; - let last_processed_count = clarity_db - .get_value( - "vm-costs::last_processed_count", - &TypeSignature::UIntType, - &clarity_epoch, - ) - .map_err(|_e| CostErrors::CostContractLoadFailure)? - .map(|result| result.value) - .unwrap_or(Value::UInt(0)) - .expect_u128() - .map_err(|_| CostErrors::InterpreterFailure)?; - let cost_voting_contract = boot_code_id("cost-voting", mainnet); - let confirmed_proposals_count = clarity_db - .lookup_variable_unknown_descriptor( - &cost_voting_contract, - "confirmed-proposal-count", - &clarity_epoch, - ) - .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))? - .expect_u128() - .map_err(|_| CostErrors::InterpreterFailure)?; - debug!("Check cost voting contract"; - "confirmed_proposal_count" => confirmed_proposals_count, - "last_processed_count" => last_processed_count); - - // we need to process any confirmed proposals in the range [fetch-start, fetch-end) - let (fetch_start, fetch_end) = (last_processed_count, confirmed_proposals_count); - let mut state_summary = load_state_summary(mainnet, clarity_db)?; - if !apply_updates { - return Ok(state_summary); - } - - for confirmed_proposal in fetch_start..fetch_end { - // fetch the proposal data - let entry = clarity_db - .fetch_entry_unknown_descriptor( - &cost_voting_contract, - "confirmed-proposals", - &Value::from( - TupleData::from_data(vec![( - "confirmed-id".into(), - Value::UInt(confirmed_proposal), - )]) - .map_err(|_| { - CostErrors::Expect("BUG: failed to construct simple tuple".into()) - })?, - ), - &clarity_epoch, - ) - .map_err(|_| CostErrors::Expect("BUG: Failed querying confirmed-proposals".into()))? - .expect_optional() - .map_err(|_| CostErrors::InterpreterFailure)? - .ok_or_else(|| { - CostErrors::Expect("BUG: confirmed-proposal-count exceeds stored proposals".into()) - })? - .expect_tuple() - .map_err(|_| CostErrors::InterpreterFailure)?; - let target_contract = match entry - .get("function-contract") - .map_err(|_| CostErrors::Expect("BUG: malformed cost proposal tuple".into()))? - .clone() - .expect_principal() - .map_err(|_| CostErrors::InterpreterFailure)? - { - PrincipalData::Contract(contract_id) => contract_id, - _ => { - warn!("Confirmed cost proposal invalid: function-contract is not a contract principal"; - "confirmed_proposal_id" => confirmed_proposal); - continue; - } - }; - let target_function = match ClarityName::try_from( - entry - .get("function-name") - .map_err(|_| CostErrors::Expect("BUG: malformed cost proposal tuple".into()))? - .clone() - .expect_ascii() - .map_err(|_| CostErrors::InterpreterFailure)?, - ) { - Ok(x) => x, - Err(_) => { - warn!("Confirmed cost proposal invalid: function-name is not a valid function name"; - "confirmed_proposal_id" => confirmed_proposal); - continue; - } - }; - let cost_contract = match entry - .get("cost-function-contract") - .map_err(|_| CostErrors::Expect("BUG: malformed cost proposal tuple".into()))? - .clone() - .expect_principal() - .map_err(|_| CostErrors::InterpreterFailure)? - { - PrincipalData::Contract(contract_id) => contract_id, - _ => { - warn!("Confirmed cost proposal invalid: cost-function-contract is not a contract principal"; - "confirmed_proposal_id" => confirmed_proposal); - continue; - } - }; - - let cost_function = match ClarityName::try_from( - entry - .get_owned("cost-function-name") - .map_err(|_| CostErrors::Expect("BUG: malformed cost proposal tuple".into()))? - .expect_ascii() - .map_err(|_| CostErrors::InterpreterFailure)?, - ) { - Ok(x) => x, - Err(_) => { - warn!("Confirmed cost proposal invalid: cost-function-name is not a valid function name"; - "confirmed_proposal_id" => confirmed_proposal); - continue; - } - }; - - // Here is where we perform the required validity checks for a confirmed proposal: - // * Replaced contract-calls _must_ be `define-read-only` _or_ refer to one of the boot code - // cost functions - // * cost-function contracts must be arithmetic only - - // make sure the contract is "cost contract eligible" via the - // arithmetic-checking analysis pass - let (cost_func_ref, cost_func_type) = match clarity_db - .load_contract_analysis(&cost_contract) - .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))? - { - Some(c) => { - if !c.is_cost_contract_eligible { - warn!("Confirmed cost proposal invalid: cost-function-contract uses non-arithmetic or otherwise illegal operations"; - "confirmed_proposal_id" => confirmed_proposal, - "contract_name" => %cost_contract, - ); - continue; - } - - if let Some(FunctionType::Fixed(cost_function_type)) = c - .read_only_function_types - .get(&cost_function) - .or_else(|| c.private_function_types.get(&cost_function)) - { - if !cost_function_type.returns.eq(&COST_TUPLE_TYPE_SIGNATURE) { - warn!("Confirmed cost proposal invalid: cost-function-name does not return a cost tuple"; - "confirmed_proposal_id" => confirmed_proposal, - "contract_name" => %cost_contract, - "function_name" => %cost_function, - "return_type" => %cost_function_type.returns, - ); - continue; - } - if !cost_function_type.args.len() == 1 - || cost_function_type.args[0].signature != TypeSignature::UIntType - { - warn!("Confirmed cost proposal invalid: cost-function-name args should be length-1 and only uint"; - "confirmed_proposal_id" => confirmed_proposal, - "contract_name" => %cost_contract, - "function_name" => %cost_function, - ); - continue; - } - ( - ClarityCostFunctionReference { - contract_id: cost_contract, - function_name: cost_function.to_string(), - }, - cost_function_type.clone(), - ) - } else { - warn!("Confirmed cost proposal invalid: cost-function-name not defined"; - "confirmed_proposal_id" => confirmed_proposal, - "contract_name" => %cost_contract, - "function_name" => %cost_function, - ); - continue; - } - } - None => { - warn!("Confirmed cost proposal invalid: cost-function-contract is not a published contract"; - "confirmed_proposal_id" => confirmed_proposal, - "contract_name" => %cost_contract, - ); - continue; - } - }; - - if target_contract == boot_code_id("costs", mainnet) { - // refering to one of the boot code cost functions - let target = match ClarityCostFunction::lookup_by_name(&target_function) { - Some(ClarityCostFunction::Unimplemented) => { - warn!("Attempted vote on unimplemented cost function"; - "confirmed_proposal_id" => confirmed_proposal, - "cost_function" => %target_function); - continue; - } - Some(cost_func) => cost_func, - None => { - warn!("Confirmed cost proposal invalid: function-name does not reference a Clarity cost function"; - "confirmed_proposal_id" => confirmed_proposal, - "cost_function" => %target_function); - continue; - } - }; - state_summary - .cost_function_references - .insert(target, cost_func_ref); - } else { - // referring to a user-defined function - match clarity_db - .load_contract_analysis(&target_contract) - .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))? - { - Some(c) => { - if let Some(Fixed(tf)) = c.read_only_function_types.get(&target_function) { - if cost_func_type.args.len() != tf.args.len() { - warn!("Confirmed cost proposal invalid: cost-function contains the wrong number of arguments"; - "confirmed_proposal_id" => confirmed_proposal, - "target_contract_name" => %target_contract, - "target_function_name" => %target_function, - ); - continue; - } - for arg in &cost_func_type.args { - if arg.signature != TypeSignature::UIntType { - warn!("Confirmed cost proposal invalid: contains non uint argument"; - "confirmed_proposal_id" => confirmed_proposal, - ); - continue; - } - } - } else { - warn!("Confirmed cost proposal invalid: function-name not defined or is not read-only"; - "confirmed_proposal_id" => confirmed_proposal, - "target_contract_name" => %target_contract, - "target_function_name" => %target_function, - ); - continue; - } - } - None => { - warn!("Confirmed cost proposal invalid: contract-name not a published contract"; - "confirmed_proposal_id" => confirmed_proposal, - "target_contract_name" => %target_contract, - ); - continue; - } - } - state_summary - .contract_call_circuits - .insert((target_contract, target_function), cost_func_ref); - } - } - if confirmed_proposals_count > last_processed_count { - store_state_summary(mainnet, clarity_db, &state_summary)?; - clarity_db - .put_value( - "vm-costs::last_processed_count", - Value::UInt(confirmed_proposals_count), - &clarity_epoch, - ) - .map_err(|_e| CostErrors::CostContractLoadFailure)?; - } - - Ok(state_summary) -} - -impl LimitedCostTracker { - pub fn new( - mainnet: bool, - chain_id: u32, - limit: ExecutionCost, - clarity_db: &mut ClarityDatabase, - epoch: StacksEpochId, - ) -> Result { - let mut cost_tracker = TrackerData { - cost_function_references: HashMap::new(), - cost_contracts: HashMap::new(), - contract_call_circuits: HashMap::new(), - limit, - memory_limit: CLARITY_MEMORY_LIMIT, - total: ExecutionCost::ZERO, - memory: 0, - epoch, - mainnet, - chain_id, - }; - assert!(clarity_db.is_stack_empty()); - cost_tracker.load_costs(clarity_db, true)?; - Ok(Self::Limited(cost_tracker)) - } - - pub fn new_mid_block( - mainnet: bool, - chain_id: u32, - limit: ExecutionCost, - clarity_db: &mut ClarityDatabase, - epoch: StacksEpochId, - ) -> Result { - let mut cost_tracker = TrackerData { - cost_function_references: HashMap::new(), - cost_contracts: HashMap::new(), - contract_call_circuits: HashMap::new(), - limit, - memory_limit: CLARITY_MEMORY_LIMIT, - total: ExecutionCost::ZERO, - memory: 0, - epoch, - mainnet, - chain_id, - }; - cost_tracker.load_costs(clarity_db, false)?; - Ok(Self::Limited(cost_tracker)) - } - - #[cfg(any(test, feature = "testing"))] - pub fn new_max_limit( - clarity_db: &mut ClarityDatabase, - epoch: StacksEpochId, - use_mainnet: bool, - ) -> Result { - use crate::vm::tests::test_only_mainnet_to_chain_id; - let chain_id = test_only_mainnet_to_chain_id(use_mainnet); - assert!(clarity_db.is_stack_empty()); - LimitedCostTracker::new( - use_mainnet, - chain_id, - ExecutionCost::max_value(), - clarity_db, - epoch, - ) - } - - pub fn new_free() -> LimitedCostTracker { - Self::Free - } - - pub fn default_cost_contract_for_epoch(epoch_id: StacksEpochId) -> Result { - let result = match epoch_id { - StacksEpochId::Epoch10 => { - return Err(CostErrors::Expect("Attempted to get default cost functions for Epoch 1.0 where Clarity does not exist".into())); - } - StacksEpochId::Epoch20 => COSTS_1_NAME.to_string(), - StacksEpochId::Epoch2_05 => COSTS_2_NAME.to_string(), - StacksEpochId::Epoch21 - | StacksEpochId::Epoch22 - | StacksEpochId::Epoch23 - | StacksEpochId::Epoch24 - | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 - | StacksEpochId::Epoch31 => COSTS_3_NAME.to_string(), - }; - Ok(result) - } -} - -impl TrackerData { - // TODO: add tests from mutation testing results #4831 - #[cfg_attr(test, mutants::skip)] - /// `apply_updates` - tells this function to look for any changes in the cost voting contract - /// which would need to be applied. if `false`, just load the last computed cost state in this - /// fork. - fn load_costs(&mut self, clarity_db: &mut ClarityDatabase, apply_updates: bool) -> Result<()> { - clarity_db.begin(); - let epoch_id = clarity_db - .get_clarity_epoch_version() - .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; - let boot_costs_id = boot_code_id( - &LimitedCostTracker::default_cost_contract_for_epoch(epoch_id)?, - self.mainnet, - ); - - let v = DefaultVersion::try_from(self.mainnet, &boot_costs_id).map_err(|e| { - CostErrors::Expect(format!( - "Failed to get version of default costs contract {e}" - )) - })?; - - let CostStateSummary { - contract_call_circuits, - mut cost_function_references, - } = load_cost_functions(self.mainnet, clarity_db, apply_updates).map_err(|e| { - let result = clarity_db - .roll_back() - .map_err(|e| CostErrors::Expect(e.to_string())); - match result { - Ok(_) => e, - Err(rollback_err) => rollback_err, - } - })?; - - self.contract_call_circuits = contract_call_circuits; - - let iter = ClarityCostFunction::ALL.iter(); - let iter_len = iter.len(); - let mut cost_contracts = HashMap::with_capacity(iter_len); - let mut m = HashMap::with_capacity(iter_len); - - for f in iter { - let cost_function_ref = cost_function_references.remove(f).unwrap_or_else(|| { - ClarityCostFunctionReference::new(boot_costs_id.clone(), f.get_name()) - }); - if !cost_contracts.contains_key(&cost_function_ref.contract_id) { - let contract_context = match clarity_db.get_contract(&cost_function_ref.contract_id) - { - Ok(contract) => contract.contract_context, - Err(e) => { - error!("Failed to load intended Clarity cost contract"; - "contract" => %cost_function_ref.contract_id, - "error" => ?e); - clarity_db - .roll_back() - .map_err(|e| CostErrors::Expect(e.to_string()))?; - return Err(CostErrors::CostContractLoadFailure); - } - }; - cost_contracts.insert(cost_function_ref.contract_id.clone(), contract_context); - } - - if cost_function_ref.contract_id == boot_costs_id { - m.insert( - f, - ClarityCostFunctionEvaluator::Default(cost_function_ref, *f, v), - ); - } else { - m.insert(f, ClarityCostFunctionEvaluator::Clarity(cost_function_ref)); - } - } - - for (_, circuit_target) in self.contract_call_circuits.iter() { - if !cost_contracts.contains_key(&circuit_target.contract_id) { - let contract_context = match clarity_db.get_contract(&circuit_target.contract_id) { - Ok(contract) => contract.contract_context, - Err(e) => { - error!("Failed to load intended Clarity cost contract"; - "contract" => %boot_costs_id.to_string(), - "error" => %format!("{:?}", e)); - clarity_db - .roll_back() - .map_err(|e| CostErrors::Expect(e.to_string()))?; - return Err(CostErrors::CostContractLoadFailure); - } - }; - cost_contracts.insert(circuit_target.contract_id.clone(), contract_context); - } - } - - self.cost_function_references = m; - self.cost_contracts = cost_contracts; - - if apply_updates { - clarity_db - .commit() - .map_err(|e| CostErrors::Expect(e.to_string()))?; - } else { - clarity_db - .roll_back() - .map_err(|e| CostErrors::Expect(e.to_string()))?; - } - - Ok(()) - } -} - -impl LimitedCostTracker { - pub fn get_total(&self) -> ExecutionCost { - match self { - Self::Limited(TrackerData { total, .. }) => total.clone(), - Self::Free => ExecutionCost::ZERO, - } - } - #[allow(clippy::panic)] - pub fn set_total(&mut self, total: ExecutionCost) { - // used by the miner to "undo" the cost of a transaction when trying to pack a block. - match self { - Self::Limited(ref mut data) => data.total = total, - Self::Free => panic!("Cannot set total on free tracker"), - } - } - pub fn get_limit(&self) -> ExecutionCost { - match self { - Self::Limited(TrackerData { limit, .. }) => limit.clone(), - Self::Free => ExecutionCost::max_value(), - } - } - - pub fn get_memory(&self) -> u64 { - match self { - Self::Limited(TrackerData { memory, .. }) => *memory, - Self::Free => 0, - } - } - pub fn get_memory_limit(&self) -> u64 { - match self { - Self::Limited(TrackerData { memory_limit, .. }) => *memory_limit, - Self::Free => u64::MAX, - } - } -} - -pub fn parse_cost( - cost_function_name: &str, - eval_result: InterpreterResult>, -) -> Result { - match eval_result { - Ok(Some(Value::Tuple(data))) => { - let results = ( - data.data_map.get("write_length"), - data.data_map.get("write_count"), - data.data_map.get("runtime"), - data.data_map.get("read_length"), - data.data_map.get("read_count"), - ); - - match results { - ( - Some(UInt(write_length)), - Some(UInt(write_count)), - Some(UInt(runtime)), - Some(UInt(read_length)), - Some(UInt(read_count)), - ) => Ok(ExecutionCost { - write_length: (*write_length).try_into().unwrap_or(u64::MAX), - write_count: (*write_count).try_into().unwrap_or(u64::MAX), - runtime: (*runtime).try_into().unwrap_or(u64::MAX), - read_length: (*read_length).try_into().unwrap_or(u64::MAX), - read_count: (*read_count).try_into().unwrap_or(u64::MAX), - }), - _ => Err(CostErrors::CostComputationFailed( - "Execution Cost tuple does not contain only UInts".to_string(), - )), - } - } - Ok(Some(_)) => Err(CostErrors::CostComputationFailed( - "Clarity cost function returned something other than a Cost tuple".to_string(), - )), - Ok(None) => Err(CostErrors::CostComputationFailed( - "Clarity cost function returned nothing".to_string(), - )), - Err(e) => Err(CostErrors::CostComputationFailed(format!( - "Error evaluating result of cost function {cost_function_name}: {e}" - ))), - } -} - -// TODO: add tests from mutation testing results #4832 -#[cfg_attr(test, mutants::skip)] -pub fn compute_cost( - cost_tracker: &TrackerData, - cost_function_reference: ClarityCostFunctionReference, - input_sizes: &[u64], - eval_in_epoch: StacksEpochId, -) -> Result { - let mainnet = cost_tracker.mainnet; - let chain_id = cost_tracker.chain_id; - let mut null_store = NullBackingStore::new(); - let conn = null_store.as_clarity_db(); - let mut global_context = GlobalContext::new( - mainnet, - chain_id, - conn, - LimitedCostTracker::new_free(), - eval_in_epoch, - ); - - let cost_contract = cost_tracker - .cost_contracts - .get(&cost_function_reference.contract_id) - .ok_or(CostErrors::CostComputationFailed(format!( - "CostFunction not found: {cost_function_reference}" - )))?; - - let mut program = vec![SymbolicExpression::atom( - cost_function_reference.function_name[..].into(), - )]; - - for input_size in input_sizes.iter() { - program.push(SymbolicExpression::atom_value(Value::UInt( - *input_size as u128, - ))); - } - - let function_invocation = SymbolicExpression::list(program); - let eval_result = global_context.execute(|global_context| { - let context = LocalContext::new(); - let mut call_stack = CallStack::new(); - let publisher: PrincipalData = cost_contract.contract_identifier.issuer.clone().into(); - let mut env = Environment::new( - global_context, - cost_contract, - &mut call_stack, - Some(publisher.clone()), - Some(publisher.clone()), - None, - ); - - let result = super::eval(&function_invocation, &mut env, &context)?; - Ok(Some(result)) - }); - - parse_cost(&cost_function_reference.to_string(), eval_result) -} - -fn add_cost(s: &mut TrackerData, cost: ExecutionCost) -> std::result::Result<(), CostErrors> { - s.total.add(&cost)?; - if cfg!(feature = "disable-costs") { - // Disable check for exceeding the cost limit to allow mining large blocks for profiling purposes. - return Ok(()); - } - if s.total.exceeds(&s.limit) { - Err(CostErrors::CostBalanceExceeded( - s.total.clone(), - s.limit.clone(), - )) - } else { - Ok(()) - } -} - -fn add_memory(s: &mut TrackerData, memory: u64) -> std::result::Result<(), CostErrors> { - s.memory = s.memory.cost_overflow_add(memory)?; - if s.memory > s.memory_limit { - Err(CostErrors::MemoryBalanceExceeded(s.memory, s.memory_limit)) - } else { - Ok(()) - } -} - -fn drop_memory(s: &mut TrackerData, memory: u64) -> Result<()> { - s.memory = s - .memory - .checked_sub(memory) - .ok_or_else(|| CostErrors::Expect("Underflowed dropped memory".into()))?; - Ok(()) -} - -impl CostTracker for LimitedCostTracker { - fn compute_cost( - &mut self, - cost_function: ClarityCostFunction, - input: &[u64], - ) -> std::result::Result { - match self { - Self::Free => { - // tracker is free, return zero! - Ok(ExecutionCost::ZERO) - } - Self::Limited(ref mut data) => { - if cost_function == ClarityCostFunction::Unimplemented { - return Err(CostErrors::Expect( - "Used unimplemented cost function".into(), - )); - } - let cost_function_ref = data.cost_function_references.get(&cost_function).ok_or( - CostErrors::CostComputationFailed(format!( - "CostFunction not defined: {cost_function}" - )), - )?; - - match cost_function_ref { - ClarityCostFunctionEvaluator::Default( - cost_function_ref, - clarity_cost_function, - default_version, - ) => default_version.evaluate(cost_function_ref, clarity_cost_function, input), - ClarityCostFunctionEvaluator::Clarity(cost_function_ref) => { - compute_cost(data, cost_function_ref.clone(), input, data.epoch) - } - } - } - } - } - fn add_cost(&mut self, cost: ExecutionCost) -> std::result::Result<(), CostErrors> { - match self { - Self::Free => Ok(()), - Self::Limited(ref mut data) => add_cost(data, cost), - } - } - fn add_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { - match self { - Self::Free => Ok(()), - Self::Limited(ref mut data) => add_memory(data, memory), - } - } - fn drop_memory(&mut self, memory: u64) -> Result<()> { - match self { - Self::Free => Ok(()), - Self::Limited(ref mut data) => drop_memory(data, memory), - } - } - fn reset_memory(&mut self) { - match self { - Self::Free => {} - Self::Limited(ref mut data) => { - data.memory = 0; - } - } - } - fn short_circuit_contract_call( - &mut self, - contract: &QualifiedContractIdentifier, - function: &ClarityName, - input: &[u64], - ) -> Result { - match self { - Self::Free => { - // if we're already free, no need to worry about short circuiting contract-calls - Ok(false) - } - Self::Limited(data) => { - // grr, if HashMap::get didn't require Borrow, we wouldn't need this cloning. - let lookup_key = (contract.clone(), function.clone()); - if let Some(cost_function) = data.contract_call_circuits.get(&lookup_key).cloned() { - compute_cost(data, cost_function, input, data.epoch)?; - Ok(true) - } else { - Ok(false) - } - } - } - } -} - -impl CostTracker for &mut LimitedCostTracker { - fn compute_cost( - &mut self, - cost_function: ClarityCostFunction, - input: &[u64], - ) -> std::result::Result { - LimitedCostTracker::compute_cost(self, cost_function, input) - } - fn add_cost(&mut self, cost: ExecutionCost) -> std::result::Result<(), CostErrors> { - LimitedCostTracker::add_cost(self, cost) - } - fn add_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { - LimitedCostTracker::add_memory(self, memory) - } - fn drop_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { - LimitedCostTracker::drop_memory(self, memory) - } - fn reset_memory(&mut self) { - LimitedCostTracker::reset_memory(self) - } - fn short_circuit_contract_call( - &mut self, - contract: &QualifiedContractIdentifier, - function: &ClarityName, - input: &[u64], - ) -> Result { - LimitedCostTracker::short_circuit_contract_call(self, contract, function, input) - } -} - #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq, Hash)] pub struct ExecutionCost { pub write_length: u64, @@ -1276,208 +74,4 @@ impl fmt::Display for ExecutionCost { } } -pub trait CostOverflowingMath { - fn cost_overflow_mul(self, other: T) -> Result; - fn cost_overflow_add(self, other: T) -> Result; - fn cost_overflow_sub(self, other: T) -> Result; - fn cost_overflow_div(self, other: T) -> Result; -} - -impl CostOverflowingMath for u64 { - fn cost_overflow_mul(self, other: u64) -> Result { - self.checked_mul(other).ok_or(CostErrors::CostOverflow) - } - fn cost_overflow_add(self, other: u64) -> Result { - self.checked_add(other).ok_or(CostErrors::CostOverflow) - } - fn cost_overflow_sub(self, other: u64) -> Result { - self.checked_sub(other).ok_or(CostErrors::CostOverflow) - } - fn cost_overflow_div(self, other: u64) -> Result { - self.checked_div(other).ok_or(CostErrors::CostOverflow) - } -} - -impl ExecutionCost { - pub const ZERO: Self = Self { - runtime: 0, - write_length: 0, - read_count: 0, - write_count: 0, - read_length: 0, - }; - - /// Returns the percentage of self consumed in `numerator`'s largest proportion dimension. - pub fn proportion_largest_dimension(&self, numerator: &ExecutionCost) -> u64 { - // max() should always return because there are > 0 elements - #[allow(clippy::expect_used)] - *[ - numerator.runtime / cmp::max(1, self.runtime / 100), - numerator.write_length / cmp::max(1, self.write_length / 100), - numerator.write_count / cmp::max(1, self.write_count / 100), - numerator.read_length / cmp::max(1, self.read_length / 100), - numerator.read_count / cmp::max(1, self.read_count / 100), - ] - .iter() - .max() - .expect("BUG: should find maximum") - } - - /// Returns the dot product of this execution cost with `resolution`/block_limit - /// This provides a scalar value representing the cumulative consumption - /// of `self` in the provided block_limit. - pub fn proportion_dot_product(&self, block_limit: &ExecutionCost, resolution: u64) -> u64 { - [ - // each field here is calculating `r * self / limit`, using f64 - // use MAX(1, block_limit) to guard against divide by zero - // use MIN(1, self/block_limit) to guard against self > block_limit - resolution as f64 - * 1_f64.min(self.runtime as f64 / 1_f64.max(block_limit.runtime as f64)), - resolution as f64 - * 1_f64.min(self.read_count as f64 / 1_f64.max(block_limit.read_count as f64)), - resolution as f64 - * 1_f64.min(self.write_count as f64 / 1_f64.max(block_limit.write_count as f64)), - resolution as f64 - * 1_f64.min(self.read_length as f64 / 1_f64.max(block_limit.read_length as f64)), - resolution as f64 - * 1_f64.min(self.write_length as f64 / 1_f64.max(block_limit.write_length as f64)), - ] - .iter() - .fold(0, |acc, dim| acc.saturating_add(cmp::max(*dim as u64, 1))) - } - - pub fn max_value() -> ExecutionCost { - Self { - runtime: u64::MAX, - write_length: u64::MAX, - read_count: u64::MAX, - write_count: u64::MAX, - read_length: u64::MAX, - } - } - - pub fn runtime(runtime: u64) -> ExecutionCost { - Self { - runtime, - write_length: 0, - read_count: 0, - write_count: 0, - read_length: 0, - } - } - - pub fn add_runtime(&mut self, runtime: u64) -> Result<()> { - self.runtime = self.runtime.cost_overflow_add(runtime)?; - Ok(()) - } - - pub fn add(&mut self, other: &ExecutionCost) -> Result<()> { - self.runtime = self.runtime.cost_overflow_add(other.runtime)?; - self.read_count = self.read_count.cost_overflow_add(other.read_count)?; - self.read_length = self.read_length.cost_overflow_add(other.read_length)?; - self.write_length = self.write_length.cost_overflow_add(other.write_length)?; - self.write_count = self.write_count.cost_overflow_add(other.write_count)?; - Ok(()) - } - - pub fn sub(&mut self, other: &ExecutionCost) -> Result<()> { - self.runtime = self.runtime.cost_overflow_sub(other.runtime)?; - self.read_count = self.read_count.cost_overflow_sub(other.read_count)?; - self.read_length = self.read_length.cost_overflow_sub(other.read_length)?; - self.write_length = self.write_length.cost_overflow_sub(other.write_length)?; - self.write_count = self.write_count.cost_overflow_sub(other.write_count)?; - Ok(()) - } - - pub fn multiply(&mut self, times: u64) -> Result<()> { - self.runtime = self.runtime.cost_overflow_mul(times)?; - self.read_count = self.read_count.cost_overflow_mul(times)?; - self.read_length = self.read_length.cost_overflow_mul(times)?; - self.write_length = self.write_length.cost_overflow_mul(times)?; - self.write_count = self.write_count.cost_overflow_mul(times)?; - Ok(()) - } - - pub fn divide(&mut self, divisor: u64) -> Result<()> { - self.runtime = self.runtime.cost_overflow_div(divisor)?; - self.read_count = self.read_count.cost_overflow_div(divisor)?; - self.read_length = self.read_length.cost_overflow_div(divisor)?; - self.write_length = self.write_length.cost_overflow_div(divisor)?; - self.write_count = self.write_count.cost_overflow_div(divisor)?; - Ok(()) - } - - /// Returns whether or not this cost exceeds any dimension of the - /// other cost. - pub fn exceeds(&self, other: &ExecutionCost) -> bool { - self.runtime > other.runtime - || self.write_length > other.write_length - || self.write_count > other.write_count - || self.read_count > other.read_count - || self.read_length > other.read_length - } - - pub fn max_cost(first: ExecutionCost, second: ExecutionCost) -> ExecutionCost { - Self { - runtime: first.runtime.max(second.runtime), - write_length: first.write_length.max(second.write_length), - write_count: first.write_count.max(second.write_count), - read_count: first.read_count.max(second.read_count), - read_length: first.read_length.max(second.read_length), - } - } - - pub fn is_zero(&self) -> bool { - *self == Self::ZERO - } -} - -// ONLY WORKS IF INPUT IS u64 -fn int_log2(input: u64) -> Option { - 63_u32.checked_sub(input.leading_zeros()).map(|floor_log| { - if input.trailing_zeros() == floor_log { - u64::from(floor_log) - } else { - u64::from(floor_log + 1) - } - }) -} - -#[cfg(test)] -mod unit_tests { - use super::*; - - #[test] - fn test_simple_overflows() { - assert_eq!(u64::MAX.cost_overflow_add(1), Err(CostErrors::CostOverflow)); - assert_eq!(u64::MAX.cost_overflow_mul(2), Err(CostErrors::CostOverflow)); - } - - #[test] - fn test_simple_sub() { - assert_eq!(0u64.cost_overflow_sub(1), Err(CostErrors::CostOverflow)); - } - - #[test] - fn test_simple_log2s() { - let inputs = [ - 1, - 2, - 4, - 8, - 16, - 31, - 32, - 33, - 39, - 64, - 128, - 2_u64.pow(63), - u64::MAX, - ]; - let expected = [0, 1, 2, 3, 4, 5, 5, 6, 6, 6, 7, 63, 64]; - for (input, expected) in inputs.iter().zip(expected.iter()) { - assert_eq!(int_log2(*input).unwrap(), *expected); - } - } -} +type Result = std::result::Result; diff --git a/clarity/src/vm/costs/tracker.rs b/clarity/src/vm/costs/tracker.rs new file mode 100644 index 0000000000..75cfd26694 --- /dev/null +++ b/clarity/src/vm/costs/tracker.rs @@ -0,0 +1,1419 @@ +use std::{cmp, fmt}; + +use hashbrown::HashMap; +use lazy_static::lazy_static; +use stacks_common::types::StacksEpochId; + +use super::{CostErrors, ExecutionCost, Result}; +use crate::boot_util::boot_code_id; +use crate::vm::contexts::{ContractContext, GlobalContext}; +pub use crate::vm::costs::cost_functions::ClarityCostFunction; +use crate::vm::costs::costs_1::Costs1; +use crate::vm::costs::costs_2::Costs2; +use crate::vm::costs::costs_2_testnet::Costs2Testnet; +use crate::vm::costs::costs_3::Costs3; +use crate::vm::database::clarity_store::NullBackingStore; +use crate::vm::database::ClarityDatabase; +use crate::vm::errors::{CheckErrors, InterpreterResult, RuntimeErrorType}; +use crate::vm::types::signatures::FunctionType::Fixed; +use crate::vm::types::signatures::TupleTypeSignature; +use crate::vm::types::Value::UInt; +use crate::vm::types::{FunctionType, PrincipalData, TupleData, TypeSignature}; +use crate::vm::{ + CallStack, ClarityName, Environment, LocalContext, QualifiedContractIdentifier, + SymbolicExpression, Value, +}; + +pub const CLARITY_MEMORY_LIMIT: u64 = 100 * 1000 * 1000; + +// TODO: factor out into a boot lib? +pub const COSTS_1_NAME: &str = "costs"; +pub const COSTS_2_NAME: &str = "costs-2"; +pub const COSTS_3_NAME: &str = "costs-3"; + +lazy_static! { + static ref COST_TUPLE_TYPE_SIGNATURE: TypeSignature = { + #[allow(clippy::expect_used)] + TypeSignature::TupleType( + TupleTypeSignature::try_from(vec![ + ("runtime".into(), TypeSignature::UIntType), + ("write_length".into(), TypeSignature::UIntType), + ("write_count".into(), TypeSignature::UIntType), + ("read_count".into(), TypeSignature::UIntType), + ("read_length".into(), TypeSignature::UIntType), + ]) + .expect("BUG: failed to construct type signature for cost tuple"), + ) + }; +} + +pub fn runtime_cost, C: CostTracker>( + cost_function: ClarityCostFunction, + tracker: &mut C, + input: T, +) -> Result<()> { + let size: u64 = input.try_into().map_err(|_| CostErrors::CostOverflow)?; + let cost = tracker.compute_cost(cost_function, &[size])?; + + tracker.add_cost(cost) +} + +macro_rules! finally_drop_memory { + ( $env: expr, $used_mem:expr; $exec:expr ) => {{ + let result = (|| $exec)(); + $env.drop_memory($used_mem)?; + result + }}; +} + +pub fn analysis_typecheck_cost( + track: &mut T, + t1: &TypeSignature, + t2: &TypeSignature, +) -> Result<()> { + let t1_size = t1.type_size().map_err(|_| CostErrors::CostOverflow)?; + let t2_size = t2.type_size().map_err(|_| CostErrors::CostOverflow)?; + let cost = track.compute_cost( + ClarityCostFunction::AnalysisTypeCheck, + &[cmp::max(t1_size, t2_size) as u64], + )?; + track.add_cost(cost) +} + +pub trait MemoryConsumer { + fn get_memory_use(&self) -> Result; +} + +impl MemoryConsumer for Value { + fn get_memory_use(&self) -> Result { + Ok(self + .size() + .map_err(|_| CostErrors::InterpreterFailure)? + .into()) + } +} +pub trait CostTracker { + fn compute_cost( + &mut self, + cost_function: ClarityCostFunction, + input: &[u64], + ) -> Result; + fn add_cost(&mut self, cost: ExecutionCost) -> Result<()>; + fn add_memory(&mut self, memory: u64) -> Result<()>; + fn drop_memory(&mut self, memory: u64) -> Result<()>; + fn reset_memory(&mut self); + /// Check if the given contract-call should be short-circuited. + /// If so: this charges the cost to the CostTracker, and return true + /// If not: return false + fn short_circuit_contract_call( + &mut self, + contract: &QualifiedContractIdentifier, + function: &ClarityName, + input: &[u64], + ) -> Result; +} + +// Don't track! +impl CostTracker for () { + fn compute_cost( + &mut self, + _cost_function: ClarityCostFunction, + _input: &[u64], + ) -> std::result::Result { + Ok(ExecutionCost::ZERO) + } + fn add_cost(&mut self, _cost: ExecutionCost) -> std::result::Result<(), CostErrors> { + Ok(()) + } + fn add_memory(&mut self, _memory: u64) -> std::result::Result<(), CostErrors> { + Ok(()) + } + fn drop_memory(&mut self, _memory: u64) -> Result<()> { + Ok(()) + } + fn reset_memory(&mut self) {} + fn short_circuit_contract_call( + &mut self, + _contract: &QualifiedContractIdentifier, + _function: &ClarityName, + _input: &[u64], + ) -> Result { + Ok(false) + } +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] +pub struct ClarityCostFunctionReference { + pub contract_id: QualifiedContractIdentifier, + pub function_name: String, +} + +impl ::std::fmt::Display for ClarityCostFunctionReference { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "{}.{}", &self.contract_id, &self.function_name) + } +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq, Copy)] +pub enum DefaultVersion { + Costs1, + Costs2, + Costs2Testnet, + Costs3, +} + +impl DefaultVersion { + pub fn evaluate( + &self, + cost_function_ref: &ClarityCostFunctionReference, + f: &ClarityCostFunction, + input: &[u64], + ) -> Result { + let n = input.first().ok_or_else(|| { + CostErrors::Expect("Default cost function supplied with 0 args".into()) + })?; + let r = match self { + DefaultVersion::Costs1 => f.eval::(*n), + DefaultVersion::Costs2 => f.eval::(*n), + DefaultVersion::Costs2Testnet => f.eval::(*n), + DefaultVersion::Costs3 => f.eval::(*n), + }; + r.map_err(|e| { + let e = match e { + crate::vm::errors::Error::Runtime(RuntimeErrorType::NotImplemented, _) => { + CheckErrors::UndefinedFunction(cost_function_ref.function_name.clone()).into() + } + other => other, + }; + + CostErrors::CostComputationFailed(format!( + "Error evaluating result of cost function {cost_function_ref}: {e}", + )) + }) + } +} + +impl DefaultVersion { + pub fn try_from( + mainnet: bool, + value: &QualifiedContractIdentifier, + ) -> std::result::Result { + if !value.is_boot() { + return Err("Not a boot contract".into()); + } + if value.name.as_str() == COSTS_1_NAME { + Ok(Self::Costs1) + } else if value.name.as_str() == COSTS_2_NAME { + if mainnet { + Ok(Self::Costs2) + } else { + Ok(Self::Costs2Testnet) + } + } else if value.name.as_str() == COSTS_3_NAME { + Ok(Self::Costs3) + } else { + Err(format!("Unknown default contract {}", &value.name)) + } + } +} + +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] +pub enum ClarityCostFunctionEvaluator { + Default( + ClarityCostFunctionReference, + ClarityCostFunction, + DefaultVersion, + ), + Clarity(ClarityCostFunctionReference), +} + +impl ClarityCostFunctionReference { + fn new(id: QualifiedContractIdentifier, name: String) -> ClarityCostFunctionReference { + ClarityCostFunctionReference { + contract_id: id, + function_name: name, + } + } +} + +#[derive(Debug, Clone)] +pub struct CostStateSummary { + pub contract_call_circuits: + HashMap<(QualifiedContractIdentifier, ClarityName), ClarityCostFunctionReference>, + pub cost_function_references: HashMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct SerializedCostStateSummary { + contract_call_circuits: Vec<( + (QualifiedContractIdentifier, ClarityName), + ClarityCostFunctionReference, + )>, + cost_function_references: Vec<(ClarityCostFunction, ClarityCostFunctionReference)>, +} + +impl From for SerializedCostStateSummary { + fn from(other: CostStateSummary) -> SerializedCostStateSummary { + let CostStateSummary { + contract_call_circuits, + cost_function_references, + } = other; + SerializedCostStateSummary { + contract_call_circuits: contract_call_circuits.into_iter().collect(), + cost_function_references: cost_function_references.into_iter().collect(), + } + } +} + +impl From for CostStateSummary { + fn from(other: SerializedCostStateSummary) -> CostStateSummary { + let SerializedCostStateSummary { + contract_call_circuits, + cost_function_references, + } = other; + CostStateSummary { + contract_call_circuits: contract_call_circuits.into_iter().collect(), + cost_function_references: cost_function_references.into_iter().collect(), + } + } +} + +impl CostStateSummary { + pub fn empty() -> CostStateSummary { + CostStateSummary { + contract_call_circuits: HashMap::new(), + cost_function_references: HashMap::new(), + } + } +} + +#[derive(Clone)] +/// This struct holds all of the data required for non-free LimitedCostTracker instances +pub struct TrackerData { + cost_function_references: HashMap<&'static ClarityCostFunction, ClarityCostFunctionEvaluator>, + cost_contracts: HashMap, + contract_call_circuits: + HashMap<(QualifiedContractIdentifier, ClarityName), ClarityCostFunctionReference>, + total: ExecutionCost, + limit: ExecutionCost, + memory: u64, + memory_limit: u64, + /// if the cost tracker is non-free, this holds the StacksEpochId that should be used to evaluate + /// the Clarity cost functions. If the tracker *is* free, then those functions do not need to be + /// evaluated, so no epoch identifier is necessary. + pub epoch: StacksEpochId, + mainnet: bool, + chain_id: u32, +} + +#[derive(Clone)] +#[allow(clippy::large_enum_variant)] +pub enum LimitedCostTracker { + Limited(TrackerData), + Free, +} + +#[cfg(any(test, feature = "testing"))] +impl LimitedCostTracker { + pub fn contract_call_circuits( + &self, + ) -> HashMap<(QualifiedContractIdentifier, ClarityName), ClarityCostFunctionReference> { + match self { + Self::Free => panic!("Cannot get contract call circuits on free tracker"), + Self::Limited(TrackerData { + ref contract_call_circuits, + .. + }) => contract_call_circuits.clone(), + } + } + pub fn cost_function_references( + &self, + ) -> HashMap<&'static ClarityCostFunction, ClarityCostFunctionEvaluator> { + match self { + Self::Free => panic!("Cannot get cost function references on free tracker"), + Self::Limited(TrackerData { + ref cost_function_references, + .. + }) => cost_function_references.clone(), + } + } +} + +impl fmt::Debug for LimitedCostTracker { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Free => f.debug_struct("LimitedCostTracker::Free").finish(), + Self::Limited(TrackerData { + total, + limit, + memory, + memory_limit, + .. + }) => f + .debug_struct("LimitedCostTracker") + .field("total", total) + .field("limit", limit) + .field("memory", memory) + .field("memory_limit", memory_limit) + .finish(), + } + } +} + +impl PartialEq for LimitedCostTracker { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::Free, Self::Free) => true, + (Self::Limited(self_data), Self::Limited(other_data)) => { + self_data.total == other_data.total + && other_data.limit == self_data.limit + && self_data.memory == other_data.memory + && self_data.memory_limit == other_data.memory_limit + } + (_, _) => false, + } + } +} + +fn load_state_summary(mainnet: bool, clarity_db: &mut ClarityDatabase) -> Result { + let cost_voting_contract = boot_code_id("cost-voting", mainnet); + + let clarity_epoch = clarity_db + .get_clarity_epoch_version() + .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; + let last_processed_at = match clarity_db.get_value( + "vm-costs::last-processed-at-height", + &TypeSignature::UIntType, + &clarity_epoch, + ) { + Ok(Some(v)) => u32::try_from( + v.value + .expect_u128() + .map_err(|_| CostErrors::InterpreterFailure)?, + ) + .map_err(|_| CostErrors::InterpreterFailure)?, + Ok(None) => return Ok(CostStateSummary::empty()), + Err(e) => return Err(CostErrors::CostComputationFailed(e.to_string())), + }; + + let metadata_result = clarity_db + .fetch_metadata_manual::( + last_processed_at, + &cost_voting_contract, + "::state_summary", + ) + .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; + let serialized: SerializedCostStateSummary = match metadata_result { + Some(serialized) => { + serde_json::from_str(&serialized).map_err(|_| CostErrors::InterpreterFailure)? + } + None => return Ok(CostStateSummary::empty()), + }; + Ok(CostStateSummary::from(serialized)) +} + +fn store_state_summary( + mainnet: bool, + clarity_db: &mut ClarityDatabase, + to_store: &CostStateSummary, +) -> Result<()> { + let block_height = clarity_db.get_current_block_height(); + let cost_voting_contract = boot_code_id("cost-voting", mainnet); + let epoch = clarity_db + .get_clarity_epoch_version() + .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; + clarity_db + .put_value( + "vm-costs::last-processed-at-height", + Value::UInt(block_height as u128), + &epoch, + ) + .map_err(|_e| CostErrors::CostContractLoadFailure)?; + let serialized_summary = + serde_json::to_string(&SerializedCostStateSummary::from(to_store.clone())) + .map_err(|_| CostErrors::InterpreterFailure)?; + clarity_db + .set_metadata( + &cost_voting_contract, + "::state_summary", + &serialized_summary, + ) + .map_err(|e| CostErrors::Expect(e.to_string()))?; + + Ok(()) +} + +/// +/// This method loads a cost state summary structure from the currently open stacks chain tip +/// In doing so, it reads from the cost-voting contract to find any newly confirmed proposals, +/// checks those proposals for validity, and then applies those changes to the cached set +/// of cost functions. +/// +/// `apply_updates` - tells this function to look for any changes in the cost voting contract +/// which would need to be applied. if `false`, just load the last computed cost state in this +/// fork. +/// +fn load_cost_functions( + mainnet: bool, + clarity_db: &mut ClarityDatabase, + apply_updates: bool, +) -> Result { + let clarity_epoch = clarity_db + .get_clarity_epoch_version() + .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; + let last_processed_count = clarity_db + .get_value( + "vm-costs::last_processed_count", + &TypeSignature::UIntType, + &clarity_epoch, + ) + .map_err(|_e| CostErrors::CostContractLoadFailure)? + .map(|result| result.value) + .unwrap_or(Value::UInt(0)) + .expect_u128() + .map_err(|_| CostErrors::InterpreterFailure)?; + let cost_voting_contract = boot_code_id("cost-voting", mainnet); + let confirmed_proposals_count = clarity_db + .lookup_variable_unknown_descriptor( + &cost_voting_contract, + "confirmed-proposal-count", + &clarity_epoch, + ) + .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))? + .expect_u128() + .map_err(|_| CostErrors::InterpreterFailure)?; + debug!("Check cost voting contract"; + "confirmed_proposal_count" => confirmed_proposals_count, + "last_processed_count" => last_processed_count); + + // we need to process any confirmed proposals in the range [fetch-start, fetch-end) + let (fetch_start, fetch_end) = (last_processed_count, confirmed_proposals_count); + let mut state_summary = load_state_summary(mainnet, clarity_db)?; + if !apply_updates { + return Ok(state_summary); + } + + for confirmed_proposal in fetch_start..fetch_end { + // fetch the proposal data + let entry = clarity_db + .fetch_entry_unknown_descriptor( + &cost_voting_contract, + "confirmed-proposals", + &Value::from( + TupleData::from_data(vec![( + "confirmed-id".into(), + Value::UInt(confirmed_proposal), + )]) + .map_err(|_| { + CostErrors::Expect("BUG: failed to construct simple tuple".into()) + })?, + ), + &clarity_epoch, + ) + .map_err(|_| CostErrors::Expect("BUG: Failed querying confirmed-proposals".into()))? + .expect_optional() + .map_err(|_| CostErrors::InterpreterFailure)? + .ok_or_else(|| { + CostErrors::Expect("BUG: confirmed-proposal-count exceeds stored proposals".into()) + })? + .expect_tuple() + .map_err(|_| CostErrors::InterpreterFailure)?; + let target_contract = match entry + .get("function-contract") + .map_err(|_| CostErrors::Expect("BUG: malformed cost proposal tuple".into()))? + .clone() + .expect_principal() + .map_err(|_| CostErrors::InterpreterFailure)? + { + PrincipalData::Contract(contract_id) => contract_id, + _ => { + warn!("Confirmed cost proposal invalid: function-contract is not a contract principal"; + "confirmed_proposal_id" => confirmed_proposal); + continue; + } + }; + let target_function = match ClarityName::try_from( + entry + .get("function-name") + .map_err(|_| CostErrors::Expect("BUG: malformed cost proposal tuple".into()))? + .clone() + .expect_ascii() + .map_err(|_| CostErrors::InterpreterFailure)?, + ) { + Ok(x) => x, + Err(_) => { + warn!("Confirmed cost proposal invalid: function-name is not a valid function name"; + "confirmed_proposal_id" => confirmed_proposal); + continue; + } + }; + let cost_contract = match entry + .get("cost-function-contract") + .map_err(|_| CostErrors::Expect("BUG: malformed cost proposal tuple".into()))? + .clone() + .expect_principal() + .map_err(|_| CostErrors::InterpreterFailure)? + { + PrincipalData::Contract(contract_id) => contract_id, + _ => { + warn!("Confirmed cost proposal invalid: cost-function-contract is not a contract principal"; + "confirmed_proposal_id" => confirmed_proposal); + continue; + } + }; + + let cost_function = match ClarityName::try_from( + entry + .get_owned("cost-function-name") + .map_err(|_| CostErrors::Expect("BUG: malformed cost proposal tuple".into()))? + .expect_ascii() + .map_err(|_| CostErrors::InterpreterFailure)?, + ) { + Ok(x) => x, + Err(_) => { + warn!("Confirmed cost proposal invalid: cost-function-name is not a valid function name"; + "confirmed_proposal_id" => confirmed_proposal); + continue; + } + }; + + // Here is where we perform the required validity checks for a confirmed proposal: + // * Replaced contract-calls _must_ be `define-read-only` _or_ refer to one of the boot code + // cost functions + // * cost-function contracts must be arithmetic only + + // make sure the contract is "cost contract eligible" via the + // arithmetic-checking analysis pass + let (cost_func_ref, cost_func_type) = match clarity_db + .load_contract_analysis(&cost_contract) + .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))? + { + Some(c) => { + if !c.is_cost_contract_eligible { + warn!("Confirmed cost proposal invalid: cost-function-contract uses non-arithmetic or otherwise illegal operations"; + "confirmed_proposal_id" => confirmed_proposal, + "contract_name" => %cost_contract, + ); + continue; + } + + if let Some(FunctionType::Fixed(cost_function_type)) = c + .read_only_function_types + .get(&cost_function) + .or_else(|| c.private_function_types.get(&cost_function)) + { + if !cost_function_type.returns.eq(&COST_TUPLE_TYPE_SIGNATURE) { + warn!("Confirmed cost proposal invalid: cost-function-name does not return a cost tuple"; + "confirmed_proposal_id" => confirmed_proposal, + "contract_name" => %cost_contract, + "function_name" => %cost_function, + "return_type" => %cost_function_type.returns, + ); + continue; + } + if !cost_function_type.args.len() == 1 + || cost_function_type.args[0].signature != TypeSignature::UIntType + { + warn!("Confirmed cost proposal invalid: cost-function-name args should be length-1 and only uint"; + "confirmed_proposal_id" => confirmed_proposal, + "contract_name" => %cost_contract, + "function_name" => %cost_function, + ); + continue; + } + ( + ClarityCostFunctionReference { + contract_id: cost_contract, + function_name: cost_function.to_string(), + }, + cost_function_type.clone(), + ) + } else { + warn!("Confirmed cost proposal invalid: cost-function-name not defined"; + "confirmed_proposal_id" => confirmed_proposal, + "contract_name" => %cost_contract, + "function_name" => %cost_function, + ); + continue; + } + } + None => { + warn!("Confirmed cost proposal invalid: cost-function-contract is not a published contract"; + "confirmed_proposal_id" => confirmed_proposal, + "contract_name" => %cost_contract, + ); + continue; + } + }; + + if target_contract == boot_code_id("costs", mainnet) { + // refering to one of the boot code cost functions + let target = match ClarityCostFunction::lookup_by_name(&target_function) { + Some(ClarityCostFunction::Unimplemented) => { + warn!("Attempted vote on unimplemented cost function"; + "confirmed_proposal_id" => confirmed_proposal, + "cost_function" => %target_function); + continue; + } + Some(cost_func) => cost_func, + None => { + warn!("Confirmed cost proposal invalid: function-name does not reference a Clarity cost function"; + "confirmed_proposal_id" => confirmed_proposal, + "cost_function" => %target_function); + continue; + } + }; + state_summary + .cost_function_references + .insert(target, cost_func_ref); + } else { + // referring to a user-defined function + match clarity_db + .load_contract_analysis(&target_contract) + .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))? + { + Some(c) => { + if let Some(Fixed(tf)) = c.read_only_function_types.get(&target_function) { + if cost_func_type.args.len() != tf.args.len() { + warn!("Confirmed cost proposal invalid: cost-function contains the wrong number of arguments"; + "confirmed_proposal_id" => confirmed_proposal, + "target_contract_name" => %target_contract, + "target_function_name" => %target_function, + ); + continue; + } + for arg in &cost_func_type.args { + if arg.signature != TypeSignature::UIntType { + warn!("Confirmed cost proposal invalid: contains non uint argument"; + "confirmed_proposal_id" => confirmed_proposal, + ); + continue; + } + } + } else { + warn!("Confirmed cost proposal invalid: function-name not defined or is not read-only"; + "confirmed_proposal_id" => confirmed_proposal, + "target_contract_name" => %target_contract, + "target_function_name" => %target_function, + ); + continue; + } + } + None => { + warn!("Confirmed cost proposal invalid: contract-name not a published contract"; + "confirmed_proposal_id" => confirmed_proposal, + "target_contract_name" => %target_contract, + ); + continue; + } + } + state_summary + .contract_call_circuits + .insert((target_contract, target_function), cost_func_ref); + } + } + if confirmed_proposals_count > last_processed_count { + store_state_summary(mainnet, clarity_db, &state_summary)?; + clarity_db + .put_value( + "vm-costs::last_processed_count", + Value::UInt(confirmed_proposals_count), + &clarity_epoch, + ) + .map_err(|_e| CostErrors::CostContractLoadFailure)?; + } + + Ok(state_summary) +} + +impl LimitedCostTracker { + pub fn new( + mainnet: bool, + chain_id: u32, + limit: ExecutionCost, + clarity_db: &mut ClarityDatabase, + epoch: StacksEpochId, + ) -> Result { + let mut cost_tracker = TrackerData { + cost_function_references: HashMap::new(), + cost_contracts: HashMap::new(), + contract_call_circuits: HashMap::new(), + limit, + memory_limit: CLARITY_MEMORY_LIMIT, + total: ExecutionCost::ZERO, + memory: 0, + epoch, + mainnet, + chain_id, + }; + assert!(clarity_db.is_stack_empty()); + cost_tracker.load_costs(clarity_db, true)?; + Ok(Self::Limited(cost_tracker)) + } + + pub fn new_mid_block( + mainnet: bool, + chain_id: u32, + limit: ExecutionCost, + clarity_db: &mut ClarityDatabase, + epoch: StacksEpochId, + ) -> Result { + let mut cost_tracker = TrackerData { + cost_function_references: HashMap::new(), + cost_contracts: HashMap::new(), + contract_call_circuits: HashMap::new(), + limit, + memory_limit: CLARITY_MEMORY_LIMIT, + total: ExecutionCost::ZERO, + memory: 0, + epoch, + mainnet, + chain_id, + }; + cost_tracker.load_costs(clarity_db, false)?; + Ok(Self::Limited(cost_tracker)) + } + + #[cfg(any(test, feature = "testing"))] + pub fn new_max_limit( + clarity_db: &mut ClarityDatabase, + epoch: StacksEpochId, + use_mainnet: bool, + ) -> Result { + use crate::vm::tests::test_only_mainnet_to_chain_id; + let chain_id = test_only_mainnet_to_chain_id(use_mainnet); + assert!(clarity_db.is_stack_empty()); + LimitedCostTracker::new( + use_mainnet, + chain_id, + ExecutionCost::max_value(), + clarity_db, + epoch, + ) + } + + pub fn new_free() -> LimitedCostTracker { + Self::Free + } + + pub fn default_cost_contract_for_epoch(epoch_id: StacksEpochId) -> Result { + let result = match epoch_id { + StacksEpochId::Epoch10 => { + return Err(CostErrors::Expect("Attempted to get default cost functions for Epoch 1.0 where Clarity does not exist".into())); + } + StacksEpochId::Epoch20 => COSTS_1_NAME.to_string(), + StacksEpochId::Epoch2_05 => COSTS_2_NAME.to_string(), + StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 => COSTS_3_NAME.to_string(), + }; + Ok(result) + } +} + +impl TrackerData { + // TODO: add tests from mutation testing results #4831 + #[cfg_attr(test, mutants::skip)] + /// `apply_updates` - tells this function to look for any changes in the cost voting contract + /// which would need to be applied. if `false`, just load the last computed cost state in this + /// fork. + fn load_costs(&mut self, clarity_db: &mut ClarityDatabase, apply_updates: bool) -> Result<()> { + clarity_db.begin(); + let epoch_id = clarity_db + .get_clarity_epoch_version() + .map_err(|e| CostErrors::CostComputationFailed(e.to_string()))?; + let boot_costs_id = boot_code_id( + &LimitedCostTracker::default_cost_contract_for_epoch(epoch_id)?, + self.mainnet, + ); + + let v = DefaultVersion::try_from(self.mainnet, &boot_costs_id).map_err(|e| { + CostErrors::Expect(format!( + "Failed to get version of default costs contract {e}" + )) + })?; + + let CostStateSummary { + contract_call_circuits, + mut cost_function_references, + } = load_cost_functions(self.mainnet, clarity_db, apply_updates).map_err(|e| { + let result = clarity_db + .roll_back() + .map_err(|e| CostErrors::Expect(e.to_string())); + match result { + Ok(_) => e, + Err(rollback_err) => rollback_err, + } + })?; + + self.contract_call_circuits = contract_call_circuits; + + let iter = ClarityCostFunction::ALL.iter(); + let iter_len = iter.len(); + let mut cost_contracts = HashMap::with_capacity(iter_len); + let mut m = HashMap::with_capacity(iter_len); + + for f in iter { + let cost_function_ref = cost_function_references.remove(f).unwrap_or_else(|| { + ClarityCostFunctionReference::new(boot_costs_id.clone(), f.get_name()) + }); + if !cost_contracts.contains_key(&cost_function_ref.contract_id) { + let contract_context = match clarity_db.get_contract(&cost_function_ref.contract_id) + { + Ok(contract) => contract.contract_context, + Err(e) => { + error!("Failed to load intended Clarity cost contract"; + "contract" => %cost_function_ref.contract_id, + "error" => ?e); + clarity_db + .roll_back() + .map_err(|e| CostErrors::Expect(e.to_string()))?; + return Err(CostErrors::CostContractLoadFailure); + } + }; + cost_contracts.insert(cost_function_ref.contract_id.clone(), contract_context); + } + + if cost_function_ref.contract_id == boot_costs_id { + m.insert( + f, + ClarityCostFunctionEvaluator::Default(cost_function_ref, *f, v), + ); + } else { + m.insert(f, ClarityCostFunctionEvaluator::Clarity(cost_function_ref)); + } + } + + for (_, circuit_target) in self.contract_call_circuits.iter() { + if !cost_contracts.contains_key(&circuit_target.contract_id) { + let contract_context = match clarity_db.get_contract(&circuit_target.contract_id) { + Ok(contract) => contract.contract_context, + Err(e) => { + error!("Failed to load intended Clarity cost contract"; + "contract" => %boot_costs_id.to_string(), + "error" => %format!("{:?}", e)); + clarity_db + .roll_back() + .map_err(|e| CostErrors::Expect(e.to_string()))?; + return Err(CostErrors::CostContractLoadFailure); + } + }; + cost_contracts.insert(circuit_target.contract_id.clone(), contract_context); + } + } + + self.cost_function_references = m; + self.cost_contracts = cost_contracts; + + if apply_updates { + clarity_db + .commit() + .map_err(|e| CostErrors::Expect(e.to_string()))?; + } else { + clarity_db + .roll_back() + .map_err(|e| CostErrors::Expect(e.to_string()))?; + } + + Ok(()) + } +} + +impl LimitedCostTracker { + pub fn get_total(&self) -> ExecutionCost { + match self { + Self::Limited(TrackerData { total, .. }) => total.clone(), + Self::Free => ExecutionCost::ZERO, + } + } + #[allow(clippy::panic)] + pub fn set_total(&mut self, total: ExecutionCost) { + // used by the miner to "undo" the cost of a transaction when trying to pack a block. + match self { + Self::Limited(ref mut data) => data.total = total, + Self::Free => panic!("Cannot set total on free tracker"), + } + } + pub fn get_limit(&self) -> ExecutionCost { + match self { + Self::Limited(TrackerData { limit, .. }) => limit.clone(), + Self::Free => ExecutionCost::max_value(), + } + } + + pub fn get_memory(&self) -> u64 { + match self { + Self::Limited(TrackerData { memory, .. }) => *memory, + Self::Free => 0, + } + } + pub fn get_memory_limit(&self) -> u64 { + match self { + Self::Limited(TrackerData { memory_limit, .. }) => *memory_limit, + Self::Free => u64::MAX, + } + } +} + +pub fn parse_cost( + cost_function_name: &str, + eval_result: InterpreterResult>, +) -> Result { + match eval_result { + Ok(Some(Value::Tuple(data))) => { + let results = ( + data.data_map.get("write_length"), + data.data_map.get("write_count"), + data.data_map.get("runtime"), + data.data_map.get("read_length"), + data.data_map.get("read_count"), + ); + + match results { + ( + Some(UInt(write_length)), + Some(UInt(write_count)), + Some(UInt(runtime)), + Some(UInt(read_length)), + Some(UInt(read_count)), + ) => Ok(ExecutionCost { + write_length: (*write_length).try_into().unwrap_or(u64::MAX), + write_count: (*write_count).try_into().unwrap_or(u64::MAX), + runtime: (*runtime).try_into().unwrap_or(u64::MAX), + read_length: (*read_length).try_into().unwrap_or(u64::MAX), + read_count: (*read_count).try_into().unwrap_or(u64::MAX), + }), + _ => Err(CostErrors::CostComputationFailed( + "Execution Cost tuple does not contain only UInts".to_string(), + )), + } + } + Ok(Some(_)) => Err(CostErrors::CostComputationFailed( + "Clarity cost function returned something other than a Cost tuple".to_string(), + )), + Ok(None) => Err(CostErrors::CostComputationFailed( + "Clarity cost function returned nothing".to_string(), + )), + Err(e) => Err(CostErrors::CostComputationFailed(format!( + "Error evaluating result of cost function {cost_function_name}: {e}" + ))), + } +} + +// TODO: add tests from mutation testing results #4832 +#[cfg_attr(test, mutants::skip)] +pub fn compute_cost( + cost_tracker: &TrackerData, + cost_function_reference: ClarityCostFunctionReference, + input_sizes: &[u64], + eval_in_epoch: StacksEpochId, +) -> Result { + let mainnet = cost_tracker.mainnet; + let chain_id = cost_tracker.chain_id; + let mut null_store = NullBackingStore::new(); + let conn = null_store.as_clarity_db(); + let mut global_context = GlobalContext::new( + mainnet, + chain_id, + conn, + LimitedCostTracker::new_free(), + eval_in_epoch, + ); + + let cost_contract = cost_tracker + .cost_contracts + .get(&cost_function_reference.contract_id) + .ok_or(CostErrors::CostComputationFailed(format!( + "CostFunction not found: {cost_function_reference}" + )))?; + + let mut program = vec![SymbolicExpression::atom( + cost_function_reference.function_name[..].into(), + )]; + + for input_size in input_sizes.iter() { + program.push(SymbolicExpression::atom_value(Value::UInt( + *input_size as u128, + ))); + } + + let function_invocation = SymbolicExpression::list(program); + let eval_result = global_context.execute(|global_context| { + let context = LocalContext::new(); + let mut call_stack = CallStack::new(); + let publisher: PrincipalData = cost_contract.contract_identifier.issuer.clone().into(); + let mut env = Environment::new( + global_context, + cost_contract, + &mut call_stack, + Some(publisher.clone()), + Some(publisher.clone()), + None, + ); + + let result = crate::vm::eval(&function_invocation, &mut env, &context)?; + Ok(Some(result)) + }); + + parse_cost(&cost_function_reference.to_string(), eval_result) +} + +fn add_cost(s: &mut TrackerData, cost: ExecutionCost) -> std::result::Result<(), CostErrors> { + s.total.add(&cost)?; + if cfg!(feature = "disable-costs") { + // Disable check for exceeding the cost limit to allow mining large blocks for profiling purposes. + return Ok(()); + } + if s.total.exceeds(&s.limit) { + Err(CostErrors::CostBalanceExceeded( + s.total.clone(), + s.limit.clone(), + )) + } else { + Ok(()) + } +} + +fn add_memory(s: &mut TrackerData, memory: u64) -> std::result::Result<(), CostErrors> { + s.memory = s.memory.cost_overflow_add(memory)?; + if s.memory > s.memory_limit { + Err(CostErrors::MemoryBalanceExceeded(s.memory, s.memory_limit)) + } else { + Ok(()) + } +} + +fn drop_memory(s: &mut TrackerData, memory: u64) -> Result<()> { + s.memory = s + .memory + .checked_sub(memory) + .ok_or_else(|| CostErrors::Expect("Underflowed dropped memory".into()))?; + Ok(()) +} + +impl CostTracker for LimitedCostTracker { + fn compute_cost( + &mut self, + cost_function: ClarityCostFunction, + input: &[u64], + ) -> std::result::Result { + match self { + Self::Free => { + // tracker is free, return zero! + Ok(ExecutionCost::ZERO) + } + Self::Limited(ref mut data) => { + if cost_function == ClarityCostFunction::Unimplemented { + return Err(CostErrors::Expect( + "Used unimplemented cost function".into(), + )); + } + let cost_function_ref = data.cost_function_references.get(&cost_function).ok_or( + CostErrors::CostComputationFailed(format!( + "CostFunction not defined: {cost_function}" + )), + )?; + + match cost_function_ref { + ClarityCostFunctionEvaluator::Default( + cost_function_ref, + clarity_cost_function, + default_version, + ) => default_version.evaluate(cost_function_ref, clarity_cost_function, input), + ClarityCostFunctionEvaluator::Clarity(cost_function_ref) => { + compute_cost(data, cost_function_ref.clone(), input, data.epoch) + } + } + } + } + } + fn add_cost(&mut self, cost: ExecutionCost) -> std::result::Result<(), CostErrors> { + match self { + Self::Free => Ok(()), + Self::Limited(ref mut data) => add_cost(data, cost), + } + } + fn add_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { + match self { + Self::Free => Ok(()), + Self::Limited(ref mut data) => add_memory(data, memory), + } + } + fn drop_memory(&mut self, memory: u64) -> Result<()> { + match self { + Self::Free => Ok(()), + Self::Limited(ref mut data) => drop_memory(data, memory), + } + } + fn reset_memory(&mut self) { + match self { + Self::Free => {} + Self::Limited(ref mut data) => { + data.memory = 0; + } + } + } + fn short_circuit_contract_call( + &mut self, + contract: &QualifiedContractIdentifier, + function: &ClarityName, + input: &[u64], + ) -> Result { + match self { + Self::Free => { + // if we're already free, no need to worry about short circuiting contract-calls + Ok(false) + } + Self::Limited(data) => { + // grr, if HashMap::get didn't require Borrow, we wouldn't need this cloning. + let lookup_key = (contract.clone(), function.clone()); + if let Some(cost_function) = data.contract_call_circuits.get(&lookup_key).cloned() { + compute_cost(data, cost_function, input, data.epoch)?; + Ok(true) + } else { + Ok(false) + } + } + } + } +} + +impl CostTracker for &mut LimitedCostTracker { + fn compute_cost( + &mut self, + cost_function: ClarityCostFunction, + input: &[u64], + ) -> std::result::Result { + LimitedCostTracker::compute_cost(self, cost_function, input) + } + fn add_cost(&mut self, cost: ExecutionCost) -> std::result::Result<(), CostErrors> { + LimitedCostTracker::add_cost(self, cost) + } + fn add_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { + LimitedCostTracker::add_memory(self, memory) + } + fn drop_memory(&mut self, memory: u64) -> std::result::Result<(), CostErrors> { + LimitedCostTracker::drop_memory(self, memory) + } + fn reset_memory(&mut self) { + LimitedCostTracker::reset_memory(self) + } + fn short_circuit_contract_call( + &mut self, + contract: &QualifiedContractIdentifier, + function: &ClarityName, + input: &[u64], + ) -> Result { + LimitedCostTracker::short_circuit_contract_call(self, contract, function, input) + } +} + +pub trait CostOverflowingMath { + fn cost_overflow_mul(self, other: T) -> Result; + fn cost_overflow_add(self, other: T) -> Result; + fn cost_overflow_sub(self, other: T) -> Result; + fn cost_overflow_div(self, other: T) -> Result; +} + +impl CostOverflowingMath for u64 { + fn cost_overflow_mul(self, other: u64) -> Result { + self.checked_mul(other).ok_or(CostErrors::CostOverflow) + } + fn cost_overflow_add(self, other: u64) -> Result { + self.checked_add(other).ok_or(CostErrors::CostOverflow) + } + fn cost_overflow_sub(self, other: u64) -> Result { + self.checked_sub(other).ok_or(CostErrors::CostOverflow) + } + fn cost_overflow_div(self, other: u64) -> Result { + self.checked_div(other).ok_or(CostErrors::CostOverflow) + } +} + +impl ExecutionCost { + pub const ZERO: Self = Self { + runtime: 0, + write_length: 0, + read_count: 0, + write_count: 0, + read_length: 0, + }; + + /// Returns the percentage of self consumed in `numerator`'s largest proportion dimension. + pub fn proportion_largest_dimension(&self, numerator: &ExecutionCost) -> u64 { + // max() should always return because there are > 0 elements + #[allow(clippy::expect_used)] + *[ + numerator.runtime / cmp::max(1, self.runtime / 100), + numerator.write_length / cmp::max(1, self.write_length / 100), + numerator.write_count / cmp::max(1, self.write_count / 100), + numerator.read_length / cmp::max(1, self.read_length / 100), + numerator.read_count / cmp::max(1, self.read_count / 100), + ] + .iter() + .max() + .expect("BUG: should find maximum") + } + + /// Returns the dot product of this execution cost with `resolution`/block_limit + /// This provides a scalar value representing the cumulative consumption + /// of `self` in the provided block_limit. + pub fn proportion_dot_product(&self, block_limit: &ExecutionCost, resolution: u64) -> u64 { + [ + // each field here is calculating `r * self / limit`, using f64 + // use MAX(1, block_limit) to guard against divide by zero + // use MIN(1, self/block_limit) to guard against self > block_limit + resolution as f64 + * 1_f64.min(self.runtime as f64 / 1_f64.max(block_limit.runtime as f64)), + resolution as f64 + * 1_f64.min(self.read_count as f64 / 1_f64.max(block_limit.read_count as f64)), + resolution as f64 + * 1_f64.min(self.write_count as f64 / 1_f64.max(block_limit.write_count as f64)), + resolution as f64 + * 1_f64.min(self.read_length as f64 / 1_f64.max(block_limit.read_length as f64)), + resolution as f64 + * 1_f64.min(self.write_length as f64 / 1_f64.max(block_limit.write_length as f64)), + ] + .iter() + .fold(0, |acc, dim| acc.saturating_add(cmp::max(*dim as u64, 1))) + } + + pub fn max_value() -> ExecutionCost { + Self { + runtime: u64::MAX, + write_length: u64::MAX, + read_count: u64::MAX, + write_count: u64::MAX, + read_length: u64::MAX, + } + } + + pub fn runtime(runtime: u64) -> ExecutionCost { + Self { + runtime, + write_length: 0, + read_count: 0, + write_count: 0, + read_length: 0, + } + } + + pub fn add_runtime(&mut self, runtime: u64) -> Result<()> { + self.runtime = self.runtime.cost_overflow_add(runtime)?; + Ok(()) + } + + pub fn add(&mut self, other: &ExecutionCost) -> Result<()> { + self.runtime = self.runtime.cost_overflow_add(other.runtime)?; + self.read_count = self.read_count.cost_overflow_add(other.read_count)?; + self.read_length = self.read_length.cost_overflow_add(other.read_length)?; + self.write_length = self.write_length.cost_overflow_add(other.write_length)?; + self.write_count = self.write_count.cost_overflow_add(other.write_count)?; + Ok(()) + } + + pub fn sub(&mut self, other: &ExecutionCost) -> Result<()> { + self.runtime = self.runtime.cost_overflow_sub(other.runtime)?; + self.read_count = self.read_count.cost_overflow_sub(other.read_count)?; + self.read_length = self.read_length.cost_overflow_sub(other.read_length)?; + self.write_length = self.write_length.cost_overflow_sub(other.write_length)?; + self.write_count = self.write_count.cost_overflow_sub(other.write_count)?; + Ok(()) + } + + pub fn multiply(&mut self, times: u64) -> Result<()> { + self.runtime = self.runtime.cost_overflow_mul(times)?; + self.read_count = self.read_count.cost_overflow_mul(times)?; + self.read_length = self.read_length.cost_overflow_mul(times)?; + self.write_length = self.write_length.cost_overflow_mul(times)?; + self.write_count = self.write_count.cost_overflow_mul(times)?; + Ok(()) + } + + pub fn divide(&mut self, divisor: u64) -> Result<()> { + self.runtime = self.runtime.cost_overflow_div(divisor)?; + self.read_count = self.read_count.cost_overflow_div(divisor)?; + self.read_length = self.read_length.cost_overflow_div(divisor)?; + self.write_length = self.write_length.cost_overflow_div(divisor)?; + self.write_count = self.write_count.cost_overflow_div(divisor)?; + Ok(()) + } + + /// Returns whether or not this cost exceeds any dimension of the + /// other cost. + pub fn exceeds(&self, other: &ExecutionCost) -> bool { + self.runtime > other.runtime + || self.write_length > other.write_length + || self.write_count > other.write_count + || self.read_count > other.read_count + || self.read_length > other.read_length + } + + pub fn max_cost(first: ExecutionCost, second: ExecutionCost) -> ExecutionCost { + Self { + runtime: first.runtime.max(second.runtime), + write_length: first.write_length.max(second.write_length), + write_count: first.write_count.max(second.write_count), + read_count: first.read_count.max(second.read_count), + read_length: first.read_length.max(second.read_length), + } + } + + pub fn is_zero(&self) -> bool { + *self == Self::ZERO + } +} + +// ONLY WORKS IF INPUT IS u64 +fn int_log2(input: u64) -> Option { + 63_u32.checked_sub(input.leading_zeros()).map(|floor_log| { + if input.trailing_zeros() == floor_log { + u64::from(floor_log) + } else { + u64::from(floor_log + 1) + } + }) +} + +#[cfg(test)] +mod unit_tests { + use super::*; + + #[test] + fn test_simple_overflows() { + assert_eq!(u64::MAX.cost_overflow_add(1), Err(CostErrors::CostOverflow)); + assert_eq!(u64::MAX.cost_overflow_mul(2), Err(CostErrors::CostOverflow)); + } + + #[test] + fn test_simple_sub() { + assert_eq!(0u64.cost_overflow_sub(1), Err(CostErrors::CostOverflow)); + } + + #[test] + fn test_simple_log2s() { + let inputs = [ + 1, + 2, + 4, + 8, + 16, + 31, + 32, + 33, + 39, + 64, + 128, + 2_u64.pow(63), + u64::MAX, + ]; + let expected = [0, 1, 2, 3, 4, 5, 5, 6, 6, 6, 7, 63, 64]; + for (input, expected) in inputs.iter().zip(expected.iter()) { + assert_eq!(int_log2(*input).unwrap(), *expected); + } + } +} diff --git a/clarity/src/vm/database/mod.rs b/clarity/src/vm/database/mod.rs index cee4cbe00c..5aeee02300 100644 --- a/clarity/src/vm/database/mod.rs +++ b/clarity/src/vm/database/mod.rs @@ -32,6 +32,5 @@ pub use self::structures::{ pub mod clarity_db; pub mod clarity_store; mod key_value_wrapper; -#[cfg(feature = "rusqlite")] pub mod sqlite; mod structures; diff --git a/clarity/src/vm/database/sqlite.rs b/clarity/src/vm/database/sqlite.rs index b92e79cbf2..0841c2ed31 100644 --- a/clarity/src/vm/database/sqlite.rs +++ b/clarity/src/vm/database/sqlite.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +#![cfg(feature = "rusqlite")] + use rusqlite::types::{FromSql, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use rusqlite::{params, Connection, OptionalExtension}; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId, TrieHash}; diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 54e1247f9b..c321f96975 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -511,8 +511,7 @@ const SQRTI_API: SimpleFunctionAPI = SimpleFunctionAPI { name: None, snippet: "sqrti ${1:expr-1}", signature: "(sqrti n)", - description: - "Returns the largest integer that is less than or equal to the square root of `n`. + description: "Returns the largest integer that is less than or equal to the square root of `n`. Fails on a negative numbers. ", example: "(sqrti u11) ;; Returns u3 @@ -527,7 +526,7 @@ const LOG2_API: SimpleFunctionAPI = SimpleFunctionAPI { snippet: "log2 ${1:expr-1}", signature: "(log2 n)", description: - "Returns the power to which the number 2 must be raised to obtain the value `n`, rounded + "Returns the power to which the number 2 must be raised to obtain the value `n`, rounded down to the nearest integer. Fails on a negative numbers. ", example: "(log2 u8) ;; Returns u3 @@ -605,7 +604,7 @@ const BITWISE_LEFT_SHIFT_API: SimpleFunctionAPI = SimpleFunctionAPI { name: None, snippet: "bit-shift-left ${1:expr-1} ${2:expr-2}", signature: "(bit-shift-left i1 shamt)", - description: "Shifts all the bits in `i1` to the left by the number of places specified in `shamt` modulo 128 (the bit width of Clarity integers). + description: "Shifts all the bits in `i1` to the left by the number of places specified in `shamt` modulo 128 (the bit width of Clarity integers). Note that there is a deliberate choice made to ignore arithmetic overflow for this operation. In use cases where overflow should be detected, developers should use `*`, `/`, and `pow` instead of the shift operators. @@ -625,7 +624,7 @@ const BITWISE_RIGHT_SHIFT_API: SimpleFunctionAPI = SimpleFunctionAPI { name: None, snippet: "bit-shift-right ${1:expr-1} ${2:expr-2}", signature: "(bit-shift-right i1 shamt)", - description: "Shifts all the bits in `i1` to the right by the number of places specified in `shamt` modulo 128 (the bit width of Clarity integers). + description: "Shifts all the bits in `i1` to the right by the number of places specified in `shamt` modulo 128 (the bit width of Clarity integers). When `i1` is a `uint` (unsigned), new bits are filled with zeros. When `i1` is an `int` (signed), the sign is preserved, meaning that new bits are filled with the value of the previous sign-bit. Note that there is a deliberate choice made to ignore arithmetic overflow for this operation. In use cases where overflow should be detected, developers should use `*`, `/`, and `pow` instead of the shift operators. @@ -647,8 +646,8 @@ const AND_API: SimpleFunctionAPI = SimpleFunctionAPI { name: None, snippet: "and ${1:expr-1} ${2:expr-2}", signature: "(and b1 b2 ...)", - description: "Returns `true` if all boolean inputs are `true`. Importantly, the supplied arguments are -evaluated in-order and lazily. Lazy evaluation means that if one of the arguments returns `false`, the function + description: "Returns `true` if all boolean inputs are `true`. Importantly, the supplied arguments are +evaluated in-order and lazily. Lazy evaluation means that if one of the arguments returns `false`, the function short-circuits, and no subsequent arguments are evaluated. ", example: "(and true false) ;; Returns false @@ -661,8 +660,8 @@ const OR_API: SimpleFunctionAPI = SimpleFunctionAPI { name: None, snippet: "or ${1:expr-1} ${2:expr-2}", signature: "(or b1 b2 ...)", - description: "Returns `true` if any boolean inputs are `true`. Importantly, the supplied arguments are -evaluated in-order and lazily. Lazy evaluation means that if one of the arguments returns `true`, the function + description: "Returns `true` if any boolean inputs are `true`. Importantly, the supplied arguments are +evaluated in-order and lazily. Lazy evaluation means that if one of the arguments returns `true`, the function short-circuits, and no subsequent arguments are evaluated.", example: "(or true false) ;; Returns true (or (is-eq (+ 1 2) 1) (is-eq 4 4)) ;; Returns true @@ -887,7 +886,7 @@ const EQUALS_API: SpecialAPI = SpecialAPI { snippet: "is-eq ${1:expr-1} ${2:expr-2}", output_type: "bool", signature: "(is-eq v1 v2...)", - description: "Compares the inputted values, returning `true` if they are all equal. Note that + description: "Compares the inputted values, returning `true` if they are all equal. Note that _unlike_ the `(and ...)` function, `(is-eq ...)` will _not_ short-circuit. All values supplied to is-eq _must_ be the same type.", example: "(is-eq 1 1) ;; Returns true @@ -1017,7 +1016,7 @@ The `func` argument must be a literal function name. (fold * (list 2 2 2) 1) ;; Returns 8 (fold * (list 2 2 2) 0) ;; Returns 0 ;; calculates (- 11 (- 7 (- 3 2))) -(fold - (list 3 7 11) 2) ;; Returns 5 +(fold - (list 3 7 11) 2) ;; Returns 5 (define-private (concat-string (a (string-ascii 20)) (b (string-ascii 20))) (unwrap-panic (as-max-len? (concat a b) u20))) (fold concat-string "cdef" "ab") ;; Returns "fedcab" (fold concat-string (list "cd" "ef") "ab") ;; Returns "efcdab" @@ -1730,7 +1729,7 @@ value and type returned are determined by the specified `BlockInfoPropertyName`. not correspond to an existing block prior to the current block, the function returns `none`. The currently available property names are as follows: -- `burnchain-header-hash`: This property returns a `(buff 32)` value containing the header hash of the burnchain (Bitcoin) block that selected the +- `burnchain-header-hash`: This property returns a `(buff 32)` value containing the header hash of the burnchain (Bitcoin) block that selected the Stacks block at the given Stacks chain height. - `id-header-hash`: This property returns a `(buff 32)` value containing the _index block hash_ of a Stacks block. This hash is globally unique, and is derived @@ -1739,7 +1738,7 @@ from the block hash and the history of accepted PoX operations. This is also th - `header-hash`: This property returns a `(buff 32)` value containing the header hash of a Stacks block, given a Stacks chain height. **WARNING* this hash is not guaranteed to be globally unique, since the same Stacks block can be mined in different PoX forks. If you need global uniqueness, you should use `id-header-hash`. -- `miner-address`: This property returns a `principal` value corresponding to the miner of the given block. **WARNING** In Stacks 2.1, this is not guaranteed to +- `miner-address`: This property returns a `principal` value corresponding to the miner of the given block. **WARNING** In Stacks 2.1, this is not guaranteed to be the same `principal` that received the block reward, since Stacks 2.1 supports coinbase transactions that pay the reward to a contract address. This is merely the address of the `principal` that produced the block. @@ -1750,9 +1749,9 @@ For blocks mined after epoch 3.0, all Stacks blocks in one tenure will share the - `vrf-seed`: This property returns a `(buff 32)` value of the VRF seed for the corresponding block. -- `block-reward`: This property returns a `uint` value for the total block reward of the indicated Stacks block. This value is only available once the reward for +- `block-reward`: This property returns a `uint` value for the total block reward of the indicated Stacks block. This value is only available once the reward for the block matures. That is, the latest `block-reward` value available is at least 101 Stacks blocks in the past (on mainnet). The reward includes the coinbase, -the anchored block's transaction fees, and the shares of the confirmed and produced microblock transaction fees earned by this block's miner. Note that this value may +the anchored block's transaction fees, and the shares of the confirmed and produced microblock transaction fees earned by this block's miner. Note that this value may be smaller than the Stacks coinbase at this height, because the miner may have been punished with a valid `PoisonMicroblock` transaction in the event that the miner published two or more microblock stream forks. Added in Clarity 2. @@ -1886,14 +1885,14 @@ The latter, a _contract principal_, is encoded as a standard principal concatena a `(string-ascii 40)` *contract name* that identifies the code body. The `principal-construct?` function allows users to create either standard or contract principals, -depending on which form is used. To create a standard principal, +depending on which form is used. To create a standard principal, `principal-construct?` would be called with two arguments: it takes as input a `(buff 1)` which encodes the principal address's `version-byte`, a `(buff 20)` which encodes the principal address's `hash-bytes`. To create a contract principal, `principal-construct?` would be called with three arguments: the `(buff 1)` and `(buff 20)` to represent the standard principal that created the contract, and a `(string-ascii 40)` which encodes the contract's name. -On success, this function returns either a standard principal or contract principal, +On success, this function returns either a standard principal or contract principal, depending on whether or not the third `(string-ascii 40)` argument is given. This function returns a `Response`. On success, the `ok` value is a `Principal`. @@ -1901,7 +1900,7 @@ The `err` value is a value tuple with the form `{ error_code: uint, value: (opti If the single-byte `version-byte` is in the valid range `0x00` to `0x1f`, but is not an appropriate version byte for the current network, then the error will be `u0`, and `value` will contain -`(some principal)`, where the wrapped value is the principal. If the `version-byte` is not in this range, +`(some principal)`, where the wrapped value is the principal. If the `version-byte` is not in this range, however, then the `value` will be `none`. If the `version-byte` is a `buff` of length 0, if the single-byte `version-byte` is a @@ -2180,10 +2179,10 @@ const MINT_TOKEN: SpecialAPI = SpecialAPI { signature: "(ft-mint? token-name amount recipient)", description: "`ft-mint?` is used to increase the token balance for the `recipient` principal for a token type defined using `define-fungible-token`. The increased token balance is _not_ transfered from another principal, but -rather minted. +rather minted. If a non-positive amount is provided to mint, this function returns `(err 1)`. Otherwise, on successfully mint, it -returns `(ok true)`. If this call would result in more supplied tokens than defined by the total supply in +returns `(ok true)`. If this call would result in more supplied tokens than defined by the total supply in `define-fungible-token`, then a `SupplyOverflow` runtime error is thrown. ", example: " @@ -2247,7 +2246,7 @@ const TOKEN_TRANSFER: SpecialAPI = SpecialAPI { output_type: "(response bool uint)", signature: "(ft-transfer? token-name amount sender recipient)", description: "`ft-transfer?` is used to increase the token balance for the `recipient` principal for a token -type defined using `define-fungible-token` by debiting the `sender` principal. In contrast to `stx-transfer?`, +type defined using `define-fungible-token` by debiting the `sender` principal. In contrast to `stx-transfer?`, any user can transfer the assets. When used, relevant guards need to be added. This function returns (ok true) if the transfer is successful. In the event of an unsuccessful transfer it returns @@ -2272,7 +2271,7 @@ const ASSET_TRANSFER: SpecialAPI = SpecialAPI { signature: "(nft-transfer? asset-class asset-identifier sender recipient)", description: "`nft-transfer?` is used to change the owner of an asset identified by `asset-identifier` from `sender` to `recipient`. The `asset-class` must have been defined by `define-non-fungible-token` and `asset-identifier` -must be of the type specified in that definition. In contrast to `stx-transfer?`, any user can transfer the asset. +must be of the type specified in that definition. In contrast to `stx-transfer?`, any user can transfer the asset. When used, relevant guards need to be added. This function returns (ok true) if the transfer is successful. In the event of an unsuccessful transfer it returns @@ -2312,7 +2311,7 @@ const BURN_TOKEN: SpecialAPI = SpecialAPI { signature: "(ft-burn? token-name amount sender)", description: "`ft-burn?` is used to decrease the token balance for the `sender` principal for a token type defined using `define-fungible-token`. The decreased token balance is _not_ transfered to another principal, but -rather destroyed, reducing the circulating supply. +rather destroyed, reducing the circulating supply. On a successful burn, it returns `(ok true)`. The burn may fail with error code: @@ -2331,7 +2330,7 @@ const BURN_ASSET: SpecialAPI = SpecialAPI { output_type: "(response bool uint)", signature: "(nft-burn? asset-class asset-identifier sender)", description: "`nft-burn?` is used to burn an asset that the `sender` principal owns. -The asset must have been defined using `define-non-fungible-token`, and the supplied +The asset must have been defined using `define-non-fungible-token`, and the supplied `asset-identifier` must be of the same type specified in that definition. On a successful burn, it returns `(ok true)`. In the event of an unsuccessful burn it @@ -2354,7 +2353,7 @@ const STX_GET_BALANCE: SimpleFunctionAPI = SimpleFunctionAPI { description: "`stx-get-balance` is used to query the STX balance of the `owner` principal. This function returns the (unlocked) STX balance, in microstacks (1 STX = 1,000,000 microstacks), of the -`owner` principal. The result is the same as `(get unlocked (stx-account user))`. +`owner` principal. The result is the same as `(get unlocked (stx-account user))`. In the event that the `owner` principal isn't materialized, it returns 0. ", example: " @@ -2411,7 +2410,7 @@ const STX_TRANSFER_MEMO: SpecialAPI = SpecialAPI { snippet: "stx-transfer-memo? ${1:amount} ${2:sender} ${3:recipient} ${4:memo}", output_type: "(response bool uint)", signature: "(stx-transfer-memo? amount sender recipient memo)", - description: "`stx-transfer-memo?` is similar to `stx-transfer?`, except that it adds a `memo` field. + description: "`stx-transfer-memo?` is similar to `stx-transfer?`, except that it adds a `memo` field. This function returns (ok true) if the transfer is successful, or, on an error, returns the same codes as `stx-transfer?`. ", @@ -2501,8 +2500,8 @@ const REPLACE_AT: SpecialAPI = SpecialAPI { snippet: "replace-at? ${1:sequence} ${2:index} ${3:element}", signature: "(replace-at? sequence index element)", description: "The `replace-at?` function takes in a sequence, an index, and an element, -and returns a new sequence with the data at the index position replaced with the given element. -The given element's type must match the type of the sequence, and must correspond to a single +and returns a new sequence with the data at the index position replaced with the given element. +The given element's type must match the type of the sequence, and must correspond to a single index of the input sequence. The return type on success is the same type as the input sequence. If the provided index is out of bounds, this functions returns `none`. diff --git a/clarity/src/vm/errors.rs b/clarity/src/vm/errors.rs index a3100dcd83..cec2a9855f 100644 --- a/clarity/src/vm/errors.rs +++ b/clarity/src/vm/errors.rs @@ -21,13 +21,12 @@ use rusqlite::Error as SqliteError; use serde_json::Error as SerdeJSONErr; use stacks_common::types::chainstate::BlockHeaderHash; -use super::ast::errors::ParseErrors; pub use crate::vm::analysis::errors::{ check_argument_count, check_arguments_at_least, check_arguments_at_most, CheckErrors, }; -use crate::vm::ast::errors::ParseError; -use crate::vm::contexts::StackTrace; +use crate::vm::ast::errors::{ParseError, ParseErrors}; use crate::vm::costs::CostErrors; +use crate::vm::representations::FunctionIdentifier; use crate::vm::types::Value; #[derive(Debug)] @@ -35,6 +34,8 @@ pub struct IncomparableError { pub err: T, } +pub type StackTrace = Vec; + #[derive(Debug)] #[allow(clippy::large_enum_variant)] pub enum Error { diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index 3dbe8a2951..055a30bc79 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -15,702 +15,55 @@ // along with this program. If not, see . #![allow(clippy::result_large_err)] -pub mod diagnostic; -pub mod errors; - +pub mod analysis; #[macro_use] pub mod costs; - +pub mod ast; +pub mod diagnostic; +pub mod errors; +pub mod representations; pub mod types; +pub mod version; -pub mod contracts; +#[cfg(any(test, feature = "testing"))] +pub mod test_util; +#[cfg(any(test, feature = "testing"))] +pub mod tests; -pub mod ast; +#[cfg(feature = "vm")] +pub mod callables; +#[cfg(feature = "vm")] +pub mod clarity; +#[cfg(feature = "vm")] pub mod contexts; +#[cfg(feature = "vm")] +pub mod contracts; +#[cfg(feature = "vm")] +pub mod coverage; +#[cfg(feature = "vm")] pub mod database; -pub mod representations; - -pub mod callables; -pub mod functions; -pub mod variables; - -pub mod analysis; +#[cfg(feature = "vm")] pub mod docs; -pub mod version; - -pub mod coverage; - +#[cfg(feature = "vm")] pub mod events; - +#[cfg(feature = "vm")] +pub mod functions; #[cfg(feature = "rusqlite")] pub mod tooling; +#[cfg(feature = "vm")] +pub mod variables; -#[cfg(any(test, feature = "testing"))] -pub mod tests; - -#[cfg(any(test, feature = "testing"))] -pub mod test_util; - -pub mod clarity; - -use std::collections::BTreeMap; - -use costs::CostErrors; -use serde_json; -use stacks_common::types::StacksEpochId; +#[cfg(feature = "vm")] +pub mod core; +#[cfg(feature = "vm")] +pub use core::*; -use self::analysis::ContractAnalysis; -use self::ast::ContractAST; -use self::costs::ExecutionCost; -use self::diagnostic::Diagnostic; -use crate::vm::callables::CallableType; -pub use crate::vm::contexts::{ - CallStack, ContractContext, Environment, LocalContext, MAX_CONTEXT_DEPTH, -}; -use crate::vm::contexts::{ExecutionTimeTracker, GlobalContext}; -use crate::vm::costs::cost_functions::ClarityCostFunction; -use crate::vm::costs::{ - runtime_cost, CostOverflowingMath, CostTracker, LimitedCostTracker, MemoryConsumer, -}; -// publish the non-generic StacksEpoch form for use throughout module -pub use crate::vm::database::clarity_db::StacksEpoch; -use crate::vm::errors::{ - CheckErrors, Error, InterpreterError, InterpreterResult as Result, RuntimeErrorType, -}; -use crate::vm::functions::define::DefineResult; -pub use crate::vm::functions::stx_transfer_consolidated; -pub use crate::vm::representations::{ +pub use self::representations::{ ClarityName, ContractName, SymbolicExpression, SymbolicExpressionType, }; -pub use crate::vm::types::Value; -use crate::vm::types::{PrincipalData, TypeSignature}; -pub use crate::vm::version::ClarityVersion; +pub use self::types::{ + PrincipalData, QualifiedContractIdentifier, StandardPrincipalData, TypeSignature, Value, +}; +pub use self::version::ClarityVersion; pub const MAX_CALL_STACK_DEPTH: usize = 64; - -#[derive(Debug, Clone)] -pub struct ParsedContract { - pub contract_identifier: String, - pub code: String, - pub function_args: BTreeMap>, - pub ast: ContractAST, - pub analysis: ContractAnalysis, -} - -#[derive(Debug, Clone)] -pub struct ContractEvaluationResult { - pub result: Option, - pub contract: ParsedContract, -} - -#[derive(Debug, Clone)] -pub struct SnippetEvaluationResult { - pub result: Value, -} - -#[derive(Debug, Clone)] -#[allow(clippy::large_enum_variant)] -pub enum EvaluationResult { - Contract(ContractEvaluationResult), - Snippet(SnippetEvaluationResult), -} - -#[derive(Debug, Clone)] -pub struct ExecutionResult { - pub result: EvaluationResult, - pub events: Vec, - pub cost: Option, - pub diagnostics: Vec, -} - -#[derive(Clone, Debug)] -pub struct CostSynthesis { - pub total: ExecutionCost, - pub limit: ExecutionCost, - pub memory: u64, - pub memory_limit: u64, -} - -impl CostSynthesis { - pub fn from_cost_tracker(cost_tracker: &LimitedCostTracker) -> CostSynthesis { - CostSynthesis { - total: cost_tracker.get_total(), - limit: cost_tracker.get_limit(), - memory: cost_tracker.get_memory(), - memory_limit: cost_tracker.get_memory_limit(), - } - } -} - -/// EvalHook defines an interface for hooks to execute during evaluation. -pub trait EvalHook { - // Called before the expression is evaluated - fn will_begin_eval( - &mut self, - _env: &mut Environment, - _context: &LocalContext, - _expr: &SymbolicExpression, - ); - - // Called after the expression is evaluated - fn did_finish_eval( - &mut self, - _env: &mut Environment, - _context: &LocalContext, - _expr: &SymbolicExpression, - _res: &core::result::Result, - ); - - // Called upon completion of the execution - fn did_complete(&mut self, _result: core::result::Result<&mut ExecutionResult, String>); -} - -fn lookup_variable(name: &str, context: &LocalContext, env: &mut Environment) -> Result { - if name.starts_with(char::is_numeric) || name.starts_with('\'') { - Err(InterpreterError::BadSymbolicRepresentation(format!( - "Unexpected variable name: {}", - name - )) - .into()) - } else if let Some(value) = variables::lookup_reserved_variable(name, context, env)? { - Ok(value) - } else { - runtime_cost( - ClarityCostFunction::LookupVariableDepth, - env, - context.depth(), - )?; - if let Some(value) = context.lookup_variable(name) { - runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; - Ok(value.clone()) - } else if let Some(value) = env.contract_context.lookup_variable(name).cloned() { - runtime_cost(ClarityCostFunction::LookupVariableSize, env, value.size()?)?; - let (value, _) = - Value::sanitize_value(env.epoch(), &TypeSignature::type_of(&value)?, value) - .ok_or_else(|| CheckErrors::CouldNotDetermineType)?; - Ok(value) - } else if let Some(callable_data) = context.lookup_callable_contract(name) { - if env.contract_context.get_clarity_version() < &ClarityVersion::Clarity2 { - Ok(callable_data.contract_identifier.clone().into()) - } else { - Ok(Value::CallableContract(callable_data.clone())) - } - } else { - Err(CheckErrors::UndefinedVariable(name.to_string()).into()) - } - } -} - -pub fn lookup_function(name: &str, env: &mut Environment) -> Result { - runtime_cost(ClarityCostFunction::LookupFunction, env, 0)?; - - if let Some(result) = - functions::lookup_reserved_functions(name, env.contract_context.get_clarity_version()) - { - Ok(result) - } else { - let user_function = env - .contract_context - .lookup_function(name) - .ok_or(CheckErrors::UndefinedFunction(name.to_string()))?; - Ok(CallableType::UserFunction(user_function)) - } -} - -fn add_stack_trace(result: &mut Result, env: &Environment) { - if let Err(Error::Runtime(_, ref mut stack_trace)) = result { - if stack_trace.is_none() { - stack_trace.replace(env.call_stack.make_stack_trace()); - } - } -} - -pub fn apply( - function: &CallableType, - args: &[SymbolicExpression], - env: &mut Environment, - context: &LocalContext, -) -> Result { - let identifier = function.get_identifier(); - // Aaron: in non-debug executions, we shouldn't track a full call-stack. - // only enough to do recursion detection. - - // do recursion check on user functions. - let track_recursion = matches!(function, CallableType::UserFunction(_)); - if track_recursion && env.call_stack.contains(&identifier) { - return Err(CheckErrors::CircularReference(vec![identifier.to_string()]).into()); - } - - if env.call_stack.depth() >= MAX_CALL_STACK_DEPTH { - return Err(RuntimeErrorType::MaxStackDepthReached.into()); - } - - if let CallableType::SpecialFunction(_, function) = function { - env.call_stack.insert(&identifier, track_recursion); - let mut resp = function(args, env, context); - add_stack_trace(&mut resp, env); - env.call_stack.remove(&identifier, track_recursion)?; - resp - } else { - let mut used_memory = 0; - let mut evaluated_args = Vec::with_capacity(args.len()); - env.call_stack.incr_apply_depth(); - for arg_x in args.iter() { - let arg_value = match eval(arg_x, env, context) { - Ok(x) => x, - Err(e) => { - env.drop_memory(used_memory)?; - env.call_stack.decr_apply_depth(); - return Err(e); - } - }; - let arg_use = arg_value.get_memory_use()?; - match env.add_memory(arg_use) { - Ok(_x) => {} - Err(e) => { - env.drop_memory(used_memory)?; - env.call_stack.decr_apply_depth(); - return Err(Error::from(e)); - } - }; - used_memory += arg_value.get_memory_use()?; - evaluated_args.push(arg_value); - } - env.call_stack.decr_apply_depth(); - - env.call_stack.insert(&identifier, track_recursion); - let mut resp = match function { - CallableType::NativeFunction(_, function, cost_function) => { - runtime_cost(*cost_function, env, evaluated_args.len()) - .map_err(Error::from) - .and_then(|_| function.apply(evaluated_args, env)) - } - CallableType::NativeFunction205(_, function, cost_function, cost_input_handle) => { - let cost_input = if env.epoch() >= &StacksEpochId::Epoch2_05 { - cost_input_handle(evaluated_args.as_slice())? - } else { - evaluated_args.len() as u64 - }; - runtime_cost(*cost_function, env, cost_input) - .map_err(Error::from) - .and_then(|_| function.apply(evaluated_args, env)) - } - CallableType::UserFunction(function) => function.apply(&evaluated_args, env), - _ => return Err(InterpreterError::Expect("Should be unreachable.".into()).into()), - }; - add_stack_trace(&mut resp, env); - env.drop_memory(used_memory)?; - env.call_stack.remove(&identifier, track_recursion)?; - resp - } -} - -fn check_max_execution_time_expired(global_context: &GlobalContext) -> Result<()> { - match global_context.execution_time_tracker { - ExecutionTimeTracker::NoTracking => Ok(()), - ExecutionTimeTracker::MaxTime { - start_time, - max_duration, - } => { - if start_time.elapsed() >= max_duration { - Err(CostErrors::ExecutionTimeExpired.into()) - } else { - Ok(()) - } - } - } -} - -pub fn eval( - exp: &SymbolicExpression, - env: &mut Environment, - context: &LocalContext, -) -> Result { - use crate::vm::representations::SymbolicExpressionType::{ - Atom, AtomValue, Field, List, LiteralValue, TraitReference, - }; - - check_max_execution_time_expired(env.global_context)?; - - if let Some(mut eval_hooks) = env.global_context.eval_hooks.take() { - for hook in eval_hooks.iter_mut() { - hook.will_begin_eval(env, context, exp); - } - env.global_context.eval_hooks = Some(eval_hooks); - } - - let res = match exp.expr { - AtomValue(ref value) | LiteralValue(ref value) => Ok(value.clone()), - Atom(ref value) => lookup_variable(value, context, env), - List(ref children) => { - let (function_variable, rest) = children - .split_first() - .ok_or(CheckErrors::NonFunctionApplication)?; - - let function_name = function_variable - .match_atom() - .ok_or(CheckErrors::BadFunctionName)?; - let f = lookup_function(function_name, env)?; - apply(&f, rest, env, context) - } - TraitReference(_, _) | Field(_) => { - return Err(InterpreterError::BadSymbolicRepresentation( - "Unexpected trait reference".into(), - ) - .into()) - } - }; - - if let Some(mut eval_hooks) = env.global_context.eval_hooks.take() { - for hook in eval_hooks.iter_mut() { - hook.did_finish_eval(env, context, exp, &res); - } - env.global_context.eval_hooks = Some(eval_hooks); - } - - res -} - -pub fn is_reserved(name: &str, version: &ClarityVersion) -> bool { - functions::lookup_reserved_functions(name, version).is_some() - || variables::is_reserved_name(name, version) -} - -/// This function evaluates a list of expressions, sharing a global context. -/// It returns the final evaluated result. -/// Used for the initialization of a new contract. -pub fn eval_all( - expressions: &[SymbolicExpression], - contract_context: &mut ContractContext, - global_context: &mut GlobalContext, - sponsor: Option, -) -> Result> { - let mut last_executed = None; - let context = LocalContext::new(); - let mut total_memory_use = 0; - - let publisher: PrincipalData = contract_context.contract_identifier.issuer.clone().into(); - - finally_drop_memory!(global_context, total_memory_use; { - for exp in expressions { - let try_define = global_context.execute(|context| { - let mut call_stack = CallStack::new(); - let mut env = Environment::new( - context, contract_context, &mut call_stack, Some(publisher.clone()), Some(publisher.clone()), sponsor.clone()); - functions::define::evaluate_define(exp, &mut env) - })?; - match try_define { - DefineResult::Variable(name, value) => { - runtime_cost(ClarityCostFunction::BindName, global_context, 0)?; - let value_memory_use = value.get_memory_use()?; - global_context.add_memory(value_memory_use)?; - total_memory_use += value_memory_use; - - contract_context.variables.insert(name, value); - }, - DefineResult::Function(name, value) => { - runtime_cost(ClarityCostFunction::BindName, global_context, 0)?; - - contract_context.functions.insert(name, value); - }, - DefineResult::PersistedVariable(name, value_type, value) => { - runtime_cost(ClarityCostFunction::CreateVar, global_context, value_type.size()?)?; - contract_context.persisted_names.insert(name.clone()); - - global_context.add_memory(value_type.type_size() - .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; - - global_context.add_memory(value.size()? as u64)?; - - let data_type = global_context.database.create_variable(&contract_context.contract_identifier, &name, value_type)?; - global_context.database.set_variable(&contract_context.contract_identifier, &name, value, &data_type, &global_context.epoch_id)?; - - contract_context.meta_data_var.insert(name, data_type); - }, - DefineResult::Map(name, key_type, value_type) => { - runtime_cost(ClarityCostFunction::CreateMap, global_context, - u64::from(key_type.size()?).cost_overflow_add( - u64::from(value_type.size()?))?)?; - contract_context.persisted_names.insert(name.clone()); - - global_context.add_memory(key_type.type_size() - .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; - global_context.add_memory(value_type.type_size() - .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; - - let data_type = global_context.database.create_map(&contract_context.contract_identifier, &name, key_type, value_type)?; - - contract_context.meta_data_map.insert(name, data_type); - }, - DefineResult::FungibleToken(name, total_supply) => { - runtime_cost(ClarityCostFunction::CreateFt, global_context, 0)?; - contract_context.persisted_names.insert(name.clone()); - - global_context.add_memory(TypeSignature::UIntType.type_size() - .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; - - let data_type = global_context.database.create_fungible_token(&contract_context.contract_identifier, &name, &total_supply)?; - - contract_context.meta_ft.insert(name, data_type); - }, - DefineResult::NonFungibleAsset(name, asset_type) => { - runtime_cost(ClarityCostFunction::CreateNft, global_context, asset_type.size()?)?; - contract_context.persisted_names.insert(name.clone()); - - global_context.add_memory(asset_type.type_size() - .map_err(|_| InterpreterError::Expect("Type size should be realizable".into()))? as u64)?; - - let data_type = global_context.database.create_non_fungible_token(&contract_context.contract_identifier, &name, &asset_type)?; - - contract_context.meta_nft.insert(name, data_type); - }, - DefineResult::Trait(name, trait_type) => { - contract_context.defined_traits.insert(name, trait_type); - }, - DefineResult::UseTrait(_name, _trait_identifier) => {}, - DefineResult::ImplTrait(trait_identifier) => { - contract_context.implemented_traits.insert(trait_identifier); - }, - DefineResult::NoDefine => { - // not a define function, evaluate normally. - global_context.execute(|global_context| { - let mut call_stack = CallStack::new(); - let mut env = Environment::new( - global_context, contract_context, &mut call_stack, Some(publisher.clone()), Some(publisher.clone()), sponsor.clone()); - - let result = eval(exp, &mut env, &context)?; - last_executed = Some(result); - Ok(()) - })?; - } - } - } - - contract_context.data_size = total_memory_use; - Ok(last_executed) - }) -} - -/// Run provided program in a brand new environment, with a transient, empty -/// database. Only used for testing -/// This method executes the program in Epoch 2.0 *and* Epoch 2.05 and asserts -/// that the result is the same before returning the result -#[cfg(any(test, feature = "testing"))] -pub fn execute_on_network(program: &str, use_mainnet: bool) -> Result> { - let epoch_200_result = execute_with_parameters( - program, - ClarityVersion::Clarity2, - StacksEpochId::Epoch20, - ast::ASTRules::PrecheckSize, - use_mainnet, - ); - let epoch_205_result = execute_with_parameters( - program, - ClarityVersion::Clarity2, - StacksEpochId::Epoch2_05, - ast::ASTRules::PrecheckSize, - use_mainnet, - ); - - assert_eq!( - epoch_200_result, epoch_205_result, - "Epoch 2.0 and 2.05 should have same execution result, but did not for program `{}`", - program - ); - epoch_205_result -} - -/// Runs `program` in a test environment with the provided parameters. -#[cfg(any(test, feature = "testing"))] -pub fn execute_with_parameters_and_call_in_global_context( - program: &str, - clarity_version: ClarityVersion, - epoch: StacksEpochId, - ast_rules: ast::ASTRules, - use_mainnet: bool, - mut global_context_function: F, -) -> Result> -where - F: FnMut(&mut GlobalContext) -> Result<()>, -{ - use crate::vm::database::MemoryBackingStore; - use crate::vm::tests::test_only_mainnet_to_chain_id; - use crate::vm::types::QualifiedContractIdentifier; - - let contract_id = QualifiedContractIdentifier::transient(); - let mut contract_context = ContractContext::new(contract_id.clone(), clarity_version); - let mut marf = MemoryBackingStore::new(); - let conn = marf.as_clarity_db(); - let chain_id = test_only_mainnet_to_chain_id(use_mainnet); - let mut global_context = GlobalContext::new( - use_mainnet, - chain_id, - conn, - LimitedCostTracker::new_free(), - epoch, - ); - global_context.execute(|g| { - global_context_function(g)?; - let parsed = ast::build_ast_with_rules( - &contract_id, - program, - &mut (), - clarity_version, - epoch, - ast_rules, - )? - .expressions; - eval_all(&parsed, &mut contract_context, g, None) - }) -} - -#[cfg(any(test, feature = "testing"))] -pub fn execute_with_parameters( - program: &str, - clarity_version: ClarityVersion, - epoch: StacksEpochId, - ast_rules: ast::ASTRules, - use_mainnet: bool, -) -> Result> { - execute_with_parameters_and_call_in_global_context( - program, - clarity_version, - epoch, - ast_rules, - use_mainnet, - |_| Ok(()), - ) -} - -/// Execute for test with `version`, Epoch20, testnet. -#[cfg(any(test, feature = "testing"))] -pub fn execute_against_version(program: &str, version: ClarityVersion) -> Result> { - execute_with_parameters( - program, - version, - StacksEpochId::Epoch20, - ast::ASTRules::PrecheckSize, - false, - ) -} - -/// Execute for test in Clarity1, Epoch20, testnet. -#[cfg(any(test, feature = "testing"))] -pub fn execute(program: &str) -> Result> { - execute_with_parameters( - program, - ClarityVersion::Clarity1, - StacksEpochId::Epoch20, - ast::ASTRules::PrecheckSize, - false, - ) -} - -/// Execute for test in Clarity1, Epoch20, testnet. -#[cfg(any(test, feature = "testing"))] -pub fn execute_with_limited_execution_time( - program: &str, - max_execution_time: std::time::Duration, -) -> Result> { - execute_with_parameters_and_call_in_global_context( - program, - ClarityVersion::Clarity1, - StacksEpochId::Epoch20, - ast::ASTRules::PrecheckSize, - false, - |g| { - g.set_max_execution_time(max_execution_time); - Ok(()) - }, - ) -} - -/// Execute for test in Clarity2, Epoch21, testnet. -#[cfg(any(test, feature = "testing"))] -pub fn execute_v2(program: &str) -> Result> { - execute_with_parameters( - program, - ClarityVersion::Clarity2, - StacksEpochId::Epoch21, - ast::ASTRules::PrecheckSize, - false, - ) -} - -#[cfg(test)] -mod test { - use stacks_common::consts::CHAIN_ID_TESTNET; - use stacks_common::types::StacksEpochId; - - use super::ClarityVersion; - use crate::vm::callables::{DefineType, DefinedFunction}; - use crate::vm::costs::LimitedCostTracker; - use crate::vm::database::MemoryBackingStore; - use crate::vm::types::{QualifiedContractIdentifier, TypeSignature}; - use crate::vm::{ - eval, CallStack, ContractContext, Environment, GlobalContext, LocalContext, - SymbolicExpression, Value, - }; - - #[test] - fn test_simple_user_function() { - // - // test program: - // (define (do_work x) (+ 5 x)) - // (define a 59) - // (do_work a) - // - let content = [SymbolicExpression::list(vec![ - SymbolicExpression::atom("do_work".into()), - SymbolicExpression::atom("a".into()), - ])]; - - let func_body = SymbolicExpression::list(vec![ - SymbolicExpression::atom("+".into()), - SymbolicExpression::atom_value(Value::Int(5)), - SymbolicExpression::atom("x".into()), - ]); - - let func_args = vec![("x".into(), TypeSignature::IntType)]; - let user_function = DefinedFunction::new( - func_args, - func_body, - DefineType::Private, - &"do_work".into(), - "", - ); - - let context = LocalContext::new(); - let mut contract_context = ContractContext::new( - QualifiedContractIdentifier::transient(), - ClarityVersion::Clarity1, - ); - - let mut marf = MemoryBackingStore::new(); - let mut global_context = GlobalContext::new( - false, - CHAIN_ID_TESTNET, - marf.as_clarity_db(), - LimitedCostTracker::new_free(), - StacksEpochId::Epoch2_05, - ); - - contract_context - .variables - .insert("a".into(), Value::Int(59)); - contract_context - .functions - .insert("do_work".into(), user_function); - - let mut call_stack = CallStack::new(); - let mut env = Environment::new( - &mut global_context, - &contract_context, - &mut call_stack, - None, - None, - None, - ); - assert_eq!(Ok(Value::Int(64)), eval(&content[0], &mut env, &context)); - } -} diff --git a/clarity/src/vm/representations.rs b/clarity/src/vm/representations.rs index ce97d913b4..7b27e032c5 100644 --- a/clarity/src/vm/representations.rs +++ b/clarity/src/vm/representations.rs @@ -675,3 +675,26 @@ impl Span { } } } + +#[derive(Clone, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)] +pub struct FunctionIdentifier { + pub(crate) identifier: String, +} + +impl fmt::Display for FunctionIdentifier { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.identifier) + } +} + +impl FunctionIdentifier { + pub fn new_native_function(name: &str) -> FunctionIdentifier { + let identifier = format!("_native_:{}", name); + FunctionIdentifier { identifier } + } + + pub fn new_user_function(name: &str, context: &str) -> FunctionIdentifier { + let identifier = format!("{}:{}", context, name); + FunctionIdentifier { identifier } + } +} diff --git a/clarity/src/vm/tests/simple_apply_eval.rs b/clarity/src/vm/tests/simple_apply_eval.rs index 7136ab5fb0..8e16b44b40 100644 --- a/clarity/src/vm/tests/simple_apply_eval.rs +++ b/clarity/src/vm/tests/simple_apply_eval.rs @@ -334,7 +334,7 @@ fn test_from_consensus_buff_missed_expectations() { ("0x0200000004deadbeef", "(string-ascii 8)"), ("0x03", "uint"), ("0x04", "(optional int)"), - ("0x0700ffffffffffffffffffffffffffffffff", "(response uint int)"), + ("0x0700ffffffffffffffffffffffffffffffff", "(response uint int)"), ("0x0800ffffffffffffffffffffffffffffffff", "(response int uint)"), ("0x09", "(response int int)"), ("0x0b0000000400000000000000000000000000000000010000000000000000000000000000000002000000000000000000000000000000000300fffffffffffffffffffffffffffffffc", @@ -369,7 +369,7 @@ fn test_to_from_consensus_buff_vectors() { ("0x04", "false", "bool"), ("0x050011deadbeef11ababffff11deadbeef11ababffff", "'S08XXBDYXW8TQAZZZW8XXBDYXW8TQAZZZZ88551S", "principal"), ("0x060011deadbeef11ababffff11deadbeef11ababffff0461626364", "'S08XXBDYXW8TQAZZZW8XXBDYXW8TQAZZZZ88551S.abcd", "principal"), - ("0x0700ffffffffffffffffffffffffffffffff", "(ok -1)", "(response int int)"), + ("0x0700ffffffffffffffffffffffffffffffff", "(ok -1)", "(response int int)"), ("0x0800ffffffffffffffffffffffffffffffff", "(err -1)", "(response int int)"), ("0x09", "none", "(optional int)"), ("0x0a00ffffffffffffffffffffffffffffffff", "(some -1)", "(optional int)"), diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index d34a9cdf70..240a45ad24 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -31,11 +31,12 @@ use crate::vm::errors::{ CheckErrors, InterpreterError, InterpreterResult as Result, RuntimeErrorType, }; use crate::vm::representations::{ClarityName, ContractName, SymbolicExpression}; +#[cfg(feature = "vm")] +pub use crate::vm::types::signatures::parse_name_type_pairs; pub use crate::vm::types::signatures::{ - parse_name_type_pairs, AssetIdentifier, BufferLength, FixedFunction, FunctionArg, - FunctionSignature, FunctionType, ListTypeData, SequenceSubtype, StringSubtype, - StringUTF8Length, TupleTypeSignature, TypeSignature, BUFF_1, BUFF_20, BUFF_21, BUFF_32, - BUFF_33, BUFF_64, BUFF_65, + AssetIdentifier, BufferLength, FixedFunction, FunctionArg, FunctionSignature, FunctionType, + ListTypeData, SequenceSubtype, StringSubtype, StringUTF8Length, TupleTypeSignature, + TypeSignature, BUFF_1, BUFF_20, BUFF_21, BUFF_32, BUFF_33, BUFF_64, BUFF_65, }; use crate::vm::ClarityVersion; diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index 0961d27401..c61f50b480 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -24,8 +24,11 @@ use stacks_common::util::hash::{hex_bytes, to_hex}; use stacks_common::util::retry::BoundReader; use super::{ListTypeData, TupleTypeSignature}; +#[cfg(feature = "vm")] use crate::vm::database::{ClarityDeserializable, ClaritySerializable}; -use crate::vm::errors::{CheckErrors, Error as ClarityError, IncomparableError, InterpreterError}; +#[cfg(feature = "vm")] +use crate::vm::errors::Error as ClarityError; +use crate::vm::errors::{CheckErrors, IncomparableError, InterpreterError}; use crate::vm::representations::{ClarityName, ContractName, MAX_STRING_LEN}; use crate::vm::types::{ BufferLength, CallableData, CharType, OptionalData, PrincipalData, QualifiedContractIdentifier, @@ -1327,12 +1330,14 @@ impl Value { } } +#[cfg(feature = "vm")] impl ClaritySerializable for u32 { fn serialize(&self) -> String { to_hex(&self.to_be_bytes()) } } +#[cfg(feature = "vm")] impl ClarityDeserializable for u32 { fn deserialize(input: &str) -> Result { let bytes = hex_bytes(input).map_err(|_| { diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index f41b8ed1a3..8abc8f9d87 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -25,17 +25,19 @@ use hashbrown::HashSet; use lazy_static::lazy_static; use stacks_common::types::StacksEpochId; -use crate::vm::costs::{runtime_cost, CostOverflowingMath}; +#[cfg(feature = "vm")] +use crate::vm::costs::{runtime_cost, ClarityCostFunction, CostOverflowingMath as _, CostTracker}; use crate::vm::errors::CheckErrors; -use crate::vm::representations::{ - ClarityName, ContractName, SymbolicExpression, SymbolicExpressionType, TraitDefinition, - CONTRACT_MAX_NAME_LENGTH, -}; +use crate::vm::representations::{ClarityName, ContractName, CONTRACT_MAX_NAME_LENGTH}; +#[cfg(feature = "vm")] +use crate::vm::representations::{SymbolicExpression, SymbolicExpressionType, TraitDefinition}; use crate::vm::types::{ CharType, PrincipalData, QualifiedContractIdentifier, SequenceData, SequencedValue, StandardPrincipalData, TraitIdentifier, Value, MAX_TYPE_DEPTH, MAX_VALUE_SIZE, WRAPPER_VALUE_SIZE, }; +#[cfg(feature = "vm")] +use crate::vm::ClarityVersion; type Result = std::result::Result; @@ -948,6 +950,7 @@ impl TupleTypeSignature { Ok(true) } + #[cfg(feature = "vm")] pub fn parse_name_type_pair_list( epoch: StacksEpochId, type_def: &SymbolicExpression, @@ -966,6 +969,7 @@ impl TupleTypeSignature { } } +#[cfg(feature = "vm")] impl FixedFunction { pub fn total_type_size(&self) -> Result { let mut function_type_size = u64::from(self.returns.type_size()?); @@ -977,6 +981,7 @@ impl FixedFunction { } } +#[cfg(feature = "vm")] impl FunctionSignature { pub fn total_type_size(&self) -> Result { let mut function_type_size = u64::from(self.returns.type_size()?); @@ -1464,6 +1469,7 @@ impl TypeSignature { } } +#[cfg(feature = "vm")] /// Parsing functions. impl TypeSignature { fn parse_atom_type(typename: &str) -> Result { @@ -1917,10 +1923,7 @@ impl TupleTypeSignature { } } -use crate::vm::costs::cost_functions::ClarityCostFunction; -use crate::vm::costs::CostTracker; -use crate::vm::ClarityVersion; - +#[cfg(feature = "vm")] pub fn parse_name_type_pairs( epoch: StacksEpochId, name_type_pairs: &[SymbolicExpression], diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 45afb08452..4b2c96d3bb 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -30,21 +30,26 @@ name = "stacks_common" path = "./src/libcommon.rs" [dependencies] -rand = { workspace = true } -serde = { version = "1.0", features = ["derive"] } -serde_derive = "1" -sha3 = "0.10.1" -ripemd = "0.1.1" -lazy_static = "1.4.0" -slog = { version = "2.5.2", features = ["max_level_trace"] } -slog-term = "2.6.0" -slog-json = { version = "2.3.0", optional = true } -chrono = "0.4.19" +chrono = { version = "0.4.41", default-features = false, features = ["clock"] } +curve25519-dalek = { version = "4.1.3", default-features = false, features = ["serde"] } +ed25519-dalek = { workspace = true } hashbrown = { workspace = true } -rusqlite = { workspace = true, optional = true } +lazy_static = { workspace = true } +ripemd = { version = "0.1.1", default-features = false } +serde = { workspace = true , features = ["derive"] } +serde_derive = { workspace = true } +serde_json = { workspace = true } +sha3 = { version = "0.10.1", default-features = false } +slog = { workspace = true } +slog-term = { version = "2.6.0", default-features = false } + +# Optional dependencies +getrandom = { version = "0.2", default-features = false, optional = true } +rand = { workspace = true, optional = true } +slog-json = { version = "2.3.0", default-features = false, optional = true } [target.'cfg(unix)'.dependencies] -nix = "0.23" +nix = {version = "0.23", default-features = false, optional = true} [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = [ @@ -52,47 +57,46 @@ winapi = { version = "0.3", features = [ "handleapi", "synchapi", "winbase", -] } - -[target.'cfg(windows)'.dev-dependencies] -winapi = { version = "0.3", features = ["fileapi", "processenv", "winnt"] } - -[dependencies.serde_json] -version = "1.0" -features = ["arbitrary_precision", "unbounded_depth"] - -[dependencies.ed25519-dalek] -workspace = true - -[dependencies.curve25519-dalek] -version = "4.1.3" -features = ["serde"] +], optional = true } [target.'cfg(not(target_family = "wasm"))'.dependencies] -secp256k1 = { version = "0.24.3", features = ["serde", "recovery"] } +secp256k1 = { version = "0.24.3", default-features = false, features = ["std","serde", "recovery"] } +rusqlite = { workspace = true, optional = true } [target.'cfg(target_family = "wasm")'.dependencies] -libsecp256k1 = { version = "0.7.0" } +libsecp256k1 = { version = "0.7.0", default-features = false, features = ["hmac"] } + +[target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] +sha2 = { version = "0.10", features = ["asm"] } + +[target.'cfg(any(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")), any(target_os="windows")))'.dependencies] +sha2 = { version = "0.10" } [dev-dependencies] -rand_core = { workspace = true } proptest = "1.6.0" +[target.'cfg(windows)'.dev-dependencies] +winapi = { version = "0.3", features = ["fileapi", "processenv", "winnt"] } + +[build-dependencies] +toml = { workspace = true } + [features] -default = ["developer-mode"] +default = ["developer-mode", "ctrlc-handler", "rand"] developer-mode = [] +# Enables graceful shutdown handling for Ctrl+C (SIGINT) signals. +# This pulls in the `nix` or `winapi` dependency. +ctrlc-handler = ["dep:nix", "dep:winapi"] slog_json = ["slog-json"] -rusqlite = ["dep:rusqlite"] -testing = [] +rusqlite = ["dep:rusqlite", "rand"] +# Enables the rand module. This flag must be off on deterministic +# platforms such as CosmWasm +rand = ["dep:rand"] serde = [] +testing = ["rand"] bech32_std = [] bech32_strict = [] -[build-dependencies] -toml = "0.5.6" - -[target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] -sha2 = { version = "0.10", features = ["asm"] } - -[target.'cfg(any(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")), any(target_os="windows")))'.dependencies] -sha2 = { version = "0.10" } +# Wasm-specific features for easier configuration +wasm-web = ["rand", "getrandom/js", "libsecp256k1/static-context"] +wasm-deterministic = ["getrandom/custom"] diff --git a/stacks-common/src/deps_common/bitcoin/mod.rs b/stacks-common/src/deps_common/bitcoin/mod.rs index 097bfa2b65..2585d164a8 100644 --- a/stacks-common/src/deps_common/bitcoin/mod.rs +++ b/stacks-common/src/deps_common/bitcoin/mod.rs @@ -25,9 +25,6 @@ //! software. //! -// Clippy flags -#![allow(clippy::needless_range_loop)] // suggests making a big mess of array newtypes - // Coding conventions #![deny(non_upper_case_globals)] #![deny(non_camel_case_types)] diff --git a/stacks-common/src/deps_common/bitcoin/util/mod.rs b/stacks-common/src/deps_common/bitcoin/util/mod.rs index 7032ba41cd..a4740ffe06 100644 --- a/stacks-common/src/deps_common/bitcoin/util/mod.rs +++ b/stacks-common/src/deps_common/bitcoin/util/mod.rs @@ -20,8 +20,6 @@ pub mod hash; use std::{error, fmt}; -use secp256k1; - use crate::deps_common::bitcoin::network; use crate::deps_common::bitcoin::network::serialize; @@ -50,8 +48,6 @@ pub trait BitArray { /// if appropriate. #[derive(Debug)] pub enum Error { - /// secp-related error - Secp256k1(secp256k1::Error), /// Serialization error Serialize(serialize::Error), /// Network error @@ -65,7 +61,6 @@ pub enum Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - Error::Secp256k1(ref e) => fmt::Display::fmt(e, f), Error::Serialize(ref e) => fmt::Display::fmt(e, f), Error::Network(ref e) => fmt::Display::fmt(e, f), Error::SpvBadProofOfWork => f.write_str("target correct but not attained"), @@ -77,7 +72,6 @@ impl fmt::Display for Error { impl error::Error for Error { fn cause(&self) -> Option<&dyn error::Error> { match *self { - Error::Secp256k1(ref e) => Some(e), Error::Serialize(ref e) => Some(e), Error::Network(ref e) => Some(e), Error::SpvBadProofOfWork | Error::SpvBadTarget => None, @@ -85,20 +79,6 @@ impl error::Error for Error { } } -#[doc(hidden)] -impl From for Error { - fn from(e: secp256k1::Error) -> Error { - Error::Secp256k1(e) - } -} - -#[doc(hidden)] -impl From for Error { - fn from(e: serialize::Error) -> Error { - Error::Serialize(e) - } -} - #[doc(hidden)] impl From for Error { fn from(e: network::Error) -> Error { diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/libcommon.rs index 48f8cd5970..2a83bd4270 100644 --- a/stacks-common/src/libcommon.rs +++ b/stacks-common/src/libcommon.rs @@ -4,7 +4,6 @@ #![allow(non_snake_case)] #![allow(non_upper_case_globals)] #![cfg_attr(test, allow(unused_variables, unused_assignments))] -#![allow(clippy::assertions_on_constants)] #[allow(unused_imports)] #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] @@ -13,10 +12,10 @@ extern crate slog; #[macro_use] extern crate serde_derive; -#[cfg(unix)] +#[cfg(all(unix, feature = "ctrlc-handler"))] extern crate nix; -#[cfg(windows)] +#[cfg(all(windows, feature = "ctrlc-handler"))] extern crate winapi; #[macro_use] @@ -29,7 +28,14 @@ pub mod types; pub mod address; -pub mod deps_common; +pub mod deps_common { + pub mod bech32; + pub mod bitcoin; + pub mod httparse; + + #[cfg(all(not(target_family = "wasm"), feature = "ctrlc-handler"))] + pub mod ctrlc; +} pub mod bitvec; diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 7026eacffd..58f247a88f 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -168,8 +168,12 @@ impl SortitionId { write!(hasher, "{pox}").expect("Failed to deserialize PoX ID into the hasher"); let h = Sha512Trunc256Sum::from_hasher(hasher); let s = SortitionId(h.0); - test_debug!("SortitionId({}) = {} + {}", &s, bhh, pox); - s + // The `test_debug!` macro will expand to nothing on release builds. + #[allow(clippy::let_and_return)] + { + test_debug!("SortitionId({}) = {} + {}", &s, bhh, pox); + s + } } } } diff --git a/stacks-common/src/util/chunked_encoding.rs b/stacks-common/src/util/chunked_encoding.rs index ef6ce6fa79..81ee94ae99 100644 --- a/stacks-common/src/util/chunked_encoding.rs +++ b/stacks-common/src/util/chunked_encoding.rs @@ -447,7 +447,7 @@ mod test { use std::io; use std::io::Read; - use rand::RngCore; + use rand::RngCore as _; use super::*; diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 1765817f29..69e14a4473 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -147,3 +147,5 @@ where let reader = BufReader::new(file); serde_json::from_reader::<_, J>(reader).map_err(std::io::Error::from) } +#[cfg(all(feature = "rusqlite", target_family = "wasm"))] +compile_error!("The `rusqlite` feature is not supported for wasm targets"); diff --git a/stacks-common/src/util/pipe.rs b/stacks-common/src/util/pipe.rs index c851c04ae5..e7bfca27fb 100644 --- a/stacks-common/src/util/pipe.rs +++ b/stacks-common/src/util/pipe.rs @@ -317,8 +317,7 @@ mod test { use std::io::{Read, Write}; use std::{io, thread}; - use rand; - use rand::RngCore; + use rand::RngCore as _; use super::*; diff --git a/stacks-common/src/util/secp256k1/native.rs b/stacks-common/src/util/secp256k1/native.rs index d47bcb597b..74c57ad837 100644 --- a/stacks-common/src/util/secp256k1/native.rs +++ b/stacks-common/src/util/secp256k1/native.rs @@ -24,7 +24,6 @@ use ::secp256k1::{ constants as LibSecp256k1Constants, Error as LibSecp256k1Error, Message as LibSecp256k1Message, PublicKey as LibSecp256k1PublicKey, Secp256k1, SecretKey as LibSecp256k1PrivateKey, }; -use rand::RngCore; use serde::de::{Deserialize, Error as de_Error}; use serde::Serialize; @@ -240,7 +239,10 @@ impl PublicKey for Secp256k1PublicKey { } impl Secp256k1PrivateKey { + #[cfg(feature = "rand")] pub fn random() -> Secp256k1PrivateKey { + use rand::RngCore as _; + let mut rng = rand::thread_rng(); loop { // keep trying to generate valid bytes @@ -422,6 +424,7 @@ pub fn secp256k1_verify( #[cfg(test)] mod tests { + use rand::RngCore as _; use secp256k1; use secp256k1::{PublicKey as LibSecp256k1PublicKey, Secp256k1}; @@ -612,15 +615,13 @@ mod tests { (Err(e1), Err(e2)) => assert_eq!(e1, e2), (Err(e1), _) => { test_debug!("Failed to verify signature: {}", e1); - assert!( - false, + panic!( "failed fixture (verification: {:?}): {:#?}", &ver_res, &fixture ); } (_, _) => { - assert!( - false, + panic!( "failed fixture (verification: {:?}): {:#?}", &ver_res, &fixture ); diff --git a/stacks-common/src/util/secp256k1/wasm.rs b/stacks-common/src/util/secp256k1/wasm.rs index bea3c5e2d5..c9c79a352e 100644 --- a/stacks-common/src/util/secp256k1/wasm.rs +++ b/stacks-common/src/util/secp256k1/wasm.rs @@ -16,12 +16,12 @@ use ::libsecp256k1; pub use ::libsecp256k1::Error; +#[cfg(not(feature = "wasm-deterministic"))] +use ::libsecp256k1::{Error as LibSecp256k1Error, Message as LibSecp256k1Message}; use ::libsecp256k1::{ - Error as LibSecp256k1Error, Message as LibSecp256k1Message, PublicKey as LibSecp256k1PublicKey, - RecoveryId as LibSecp256k1RecoveryId, SecretKey as LibSecp256k1PrivateKey, - Signature as LibSecp256k1Signature, + PublicKey as LibSecp256k1PublicKey, RecoveryId as LibSecp256k1RecoveryId, + SecretKey as LibSecp256k1PrivateKey, Signature as LibSecp256k1Signature, }; -use rand::RngCore; use serde::de::{Deserialize, Error as de_Error}; use serde::Serialize; @@ -102,6 +102,7 @@ impl Secp256k1PublicKey { Secp256k1PublicKey::from_slice(&data[..]).map_err(|_e| "Invalid public key hex string") } + #[cfg(not(feature = "wasm-deterministic"))] pub fn from_private(privk: &Secp256k1PrivateKey) -> Secp256k1PublicKey { let key = LibSecp256k1PublicKey::from_secret_key(&privk.key); Secp256k1PublicKey { @@ -110,6 +111,7 @@ impl Secp256k1PublicKey { } } + #[cfg(not(feature = "wasm-deterministic"))] /// recover message and signature to public key (will be compressed) pub fn recover_to_pubkey( msg: &[u8], @@ -123,7 +125,10 @@ impl Secp256k1PublicKey { } impl Secp256k1PrivateKey { + #[cfg(feature = "rand")] pub fn new() -> Secp256k1PrivateKey { + use rand::RngCore as _; + let mut rng = rand::thread_rng(); loop { // keep trying to generate valid bytes @@ -184,6 +189,7 @@ impl Secp256k1PrivateKey { } } +#[cfg(not(feature = "wasm-deterministic"))] pub fn secp256k1_recover( message_arr: &[u8], serialized_signature: &[u8], @@ -195,6 +201,7 @@ pub fn secp256k1_recover( Ok(recovered_pub_key.serialize_compressed()) } +#[cfg(not(feature = "wasm-deterministic"))] pub fn secp256k1_verify( message_arr: &[u8], serialized_signature: &[u8], @@ -298,6 +305,12 @@ impl PublicKey for Secp256k1PublicKey { self.to_bytes() } + #[cfg(feature = "wasm-deterministic")] + fn verify(&self, _data_hash: &[u8], _sig: &MessageSignature) -> Result { + Err("Not implemented for wasm-deterministic") + } + + #[cfg(not(feature = "wasm-deterministic"))] fn verify(&self, data_hash: &[u8], sig: &MessageSignature) -> Result { let pub_key = Secp256k1PublicKey::recover_to_pubkey(data_hash, sig)?; Ok(self.eq(&pub_key)) @@ -313,6 +326,12 @@ impl PrivateKey for Secp256k1PrivateKey { bits } + #[cfg(feature = "wasm-deterministic")] + fn sign(&self, _data_hash: &[u8]) -> Result { + Err("Not implemented for wasm-deterministic") + } + + #[cfg(not(feature = "wasm-deterministic"))] fn sign(&self, data_hash: &[u8]) -> Result { let message = LibSecp256k1Message::parse_slice(data_hash) .map_err(|_e| "Invalid message: failed to decode data hash: must be a 32-byte hash")?; diff --git a/stacks-common/src/util/vrf.rs b/stacks-common/src/util/vrf.rs index bd124a5da0..a0cbee3373 100644 --- a/stacks-common/src/util/vrf.rs +++ b/stacks-common/src/util/vrf.rs @@ -27,7 +27,6 @@ use std::{error, fmt}; use curve25519_dalek::constants::ED25519_BASEPOINT_POINT; use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint}; use curve25519_dalek::scalar::{clamp_integer, Scalar as ed25519_Scalar}; -use rand; use sha2::{Digest, Sha512}; use crate::util::hash::{hex_bytes, to_hex}; @@ -99,6 +98,7 @@ impl PartialEq for VRFPrivateKey { } } +#[cfg(any(test, feature = "testing"))] impl Default for VRFPrivateKey { fn default() -> Self { Self::new() @@ -106,9 +106,13 @@ impl Default for VRFPrivateKey { } impl VRFPrivateKey { + #[cfg(any(test, feature = "testing"))] pub fn new() -> VRFPrivateKey { + use rand::RngCore as _; let mut rng = rand::thread_rng(); - let signing_key = ed25519_dalek::SigningKey::generate(&mut rng); + let mut sk_bytes = [0u8; 32]; + rng.fill_bytes(&mut sk_bytes); + let signing_key = ed25519_dalek::SigningKey::from_bytes(&sk_bytes); VRFPrivateKey(signing_key) } @@ -182,7 +186,6 @@ pub enum Error { InvalidPublicKey, InvalidDataError, InvalidHashPoints, - OSRNGError(rand::Error), } impl fmt::Display for Error { @@ -191,7 +194,6 @@ impl fmt::Display for Error { Error::InvalidPublicKey => write!(f, "Invalid public key"), Error::InvalidDataError => write!(f, "No data could be found"), Error::InvalidHashPoints => write!(f, "VRF hash points did not yield a valid scalar"), - Error::OSRNGError(ref e) => fmt::Display::fmt(e, f), } } } @@ -202,7 +204,6 @@ impl error::Error for Error { Error::InvalidPublicKey => None, Error::InvalidDataError => None, Error::InvalidHashPoints => None, - Error::OSRNGError(ref e) => Some(e), } } } @@ -540,8 +541,7 @@ impl VRF { #[cfg(test)] mod tests { - use rand; - use rand::RngCore; + use rand::RngCore as _; use super::*; use crate::util::hash::hex_bytes; diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index 9d170ab6f2..c4d6cb0ed9 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -15,7 +15,7 @@ // along with this program. If not, see . use rand::rngs::ThreadRng; -use rand::thread_rng; +use rand::{thread_rng, RngCore as _}; use stacks_common::address::AddressHashMode; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, TrieHash, VRFSeed, @@ -690,7 +690,9 @@ fn test_burn_snapshot_sequence() { for i in 0..32 { let mut csprng: ThreadRng = thread_rng(); - let vrf_privkey = VRFPrivateKey(ed25519_dalek::SigningKey::generate(&mut csprng)); + let mut sk_bytes = [0u8; 32]; + csprng.fill_bytes(&mut sk_bytes); + let vrf_privkey = VRFPrivateKey(ed25519_dalek::SigningKey::from_bytes(&sk_bytes)); let vrf_pubkey = VRFPublicKey::from_private(&vrf_privkey); let pubkey_hex = vrf_pubkey.to_hex();