@@ -1485,7 +1485,7 @@ test "line comment followed by top-level comptime" {
1485
1485
});
1486
1486
}
1487
1487
1488
- test "tokenizer - unknown length pointer and then c pointer" {
1488
+ test "unknown length pointer and then c pointer" {
1489
1489
try testTokenize (
1490
1490
\\[*]u8
1491
1491
\\[*c]u8
@@ -1502,7 +1502,7 @@ test "tokenizer - unknown length pointer and then c pointer" {
1502
1502
});
1503
1503
}
1504
1504
1505
- test "tokenizer - code point literal with hex escape" {
1505
+ test "code point literal with hex escape" {
1506
1506
try testTokenize (
1507
1507
\\'\x1b'
1508
1508
, &.{.char_literal });
@@ -1511,21 +1511,21 @@ test "tokenizer - code point literal with hex escape" {
1511
1511
, &.{ .invalid , .invalid });
1512
1512
}
1513
1513
1514
- test "tokenizer - newline in char literal" {
1514
+ test "newline in char literal" {
1515
1515
try testTokenize (
1516
1516
\\'
1517
1517
\\'
1518
1518
, &.{ .invalid , .invalid });
1519
1519
}
1520
1520
1521
- test "tokenizer - newline in string literal" {
1521
+ test "newline in string literal" {
1522
1522
try testTokenize (
1523
1523
\\"
1524
1524
\\"
1525
1525
, &.{ .invalid , .string_literal });
1526
1526
}
1527
1527
1528
- test "tokenizer - code point literal with unicode escapes" {
1528
+ test "code point literal with unicode escapes" {
1529
1529
// Valid unicode escapes
1530
1530
try testTokenize (
1531
1531
\\'\u{3}'
@@ -1575,13 +1575,13 @@ test "tokenizer - code point literal with unicode escapes" {
1575
1575
, &.{ .invalid , .integer_literal , .invalid });
1576
1576
}
1577
1577
1578
- test "tokenizer - code point literal with unicode code point" {
1578
+ test "code point literal with unicode code point" {
1579
1579
try testTokenize (
1580
1580
\\'💩'
1581
1581
, &.{.char_literal });
1582
1582
}
1583
1583
1584
- test "tokenizer - float literal e exponent" {
1584
+ test "float literal e exponent" {
1585
1585
try testTokenize ("a = 4.94065645841246544177e-324;\n " , &.{
1586
1586
.identifier ,
1587
1587
.equal ,
@@ -1590,7 +1590,7 @@ test "tokenizer - float literal e exponent" {
1590
1590
});
1591
1591
}
1592
1592
1593
- test "tokenizer - float literal p exponent" {
1593
+ test "float literal p exponent" {
1594
1594
try testTokenize ("a = 0x1.a827999fcef32p+1022;\n " , &.{
1595
1595
.identifier ,
1596
1596
.equal ,
@@ -1599,19 +1599,19 @@ test "tokenizer - float literal p exponent" {
1599
1599
});
1600
1600
}
1601
1601
1602
- test "tokenizer - chars" {
1602
+ test "chars" {
1603
1603
try testTokenize ("'c'" , &.{.char_literal });
1604
1604
}
1605
1605
1606
- test "tokenizer - invalid token characters" {
1606
+ test "invalid token characters" {
1607
1607
try testTokenize ("#" , &.{.invalid });
1608
1608
try testTokenize ("`" , &.{.invalid });
1609
1609
try testTokenize ("'c" , &.{.invalid });
1610
1610
try testTokenize ("'" , &.{.invalid });
1611
1611
try testTokenize ("''" , &.{ .invalid , .invalid });
1612
1612
}
1613
1613
1614
- test "tokenizer - invalid literal/comment characters" {
1614
+ test "invalid literal/comment characters" {
1615
1615
try testTokenize ("\" \x00 \" " , &.{
1616
1616
.string_literal ,
1617
1617
.invalid ,
@@ -1627,12 +1627,12 @@ test "tokenizer - invalid literal/comment characters" {
1627
1627
});
1628
1628
}
1629
1629
1630
- test "tokenizer - utf8" {
1630
+ test "utf8" {
1631
1631
try testTokenize ("//\xc2\x80 " , &.{});
1632
1632
try testTokenize ("//\xf4\x8f\xbf\xbf " , &.{});
1633
1633
}
1634
1634
1635
- test "tokenizer - invalid utf8" {
1635
+ test "invalid utf8" {
1636
1636
try testTokenize ("//\x80 " , &.{
1637
1637
.invalid ,
1638
1638
});
@@ -1659,7 +1659,7 @@ test "tokenizer - invalid utf8" {
1659
1659
});
1660
1660
}
1661
1661
1662
- test "tokenizer - illegal unicode codepoints" {
1662
+ test "illegal unicode codepoints" {
1663
1663
// unicode newline characters.U+0085, U+2028, U+2029
1664
1664
try testTokenize ("//\xc2\x84 " , &.{});
1665
1665
try testTokenize ("//\xc2\x85 " , &.{
@@ -1676,7 +1676,7 @@ test "tokenizer - illegal unicode codepoints" {
1676
1676
try testTokenize ("//\xe2\x80\xaa " , &.{});
1677
1677
}
1678
1678
1679
- test "tokenizer - string identifier and builtin fns" {
1679
+ test "string identifier and builtin fns" {
1680
1680
try testTokenize (
1681
1681
\\const @"if" = @import("std");
1682
1682
, &.{
@@ -1691,15 +1691,15 @@ test "tokenizer - string identifier and builtin fns" {
1691
1691
});
1692
1692
}
1693
1693
1694
- test "tokenizer - multiline string literal with literal tab" {
1694
+ test "multiline string literal with literal tab" {
1695
1695
try testTokenize (
1696
1696
\\\\foo bar
1697
1697
, &.{
1698
1698
.multiline_string_literal_line ,
1699
1699
});
1700
1700
}
1701
1701
1702
- test "tokenizer - comments with literal tab" {
1702
+ test "comments with literal tab" {
1703
1703
try testTokenize (
1704
1704
\\//foo bar
1705
1705
\\//!foo bar
@@ -1715,14 +1715,14 @@ test "tokenizer - comments with literal tab" {
1715
1715
});
1716
1716
}
1717
1717
1718
- test "tokenizer - pipe and then invalid" {
1718
+ test "pipe and then invalid" {
1719
1719
try testTokenize ("||=" , &.{
1720
1720
.pipe_pipe ,
1721
1721
.equal ,
1722
1722
});
1723
1723
}
1724
1724
1725
- test "tokenizer - line comment and doc comment" {
1725
+ test "line comment and doc comment" {
1726
1726
try testTokenize ("//" , &.{});
1727
1727
try testTokenize ("// a / b" , &.{});
1728
1728
try testTokenize ("// /" , &.{});
@@ -1733,7 +1733,7 @@ test "tokenizer - line comment and doc comment" {
1733
1733
try testTokenize ("//!!" , &.{.container_doc_comment });
1734
1734
}
1735
1735
1736
- test "tokenizer - line comment followed by identifier" {
1736
+ test "line comment followed by identifier" {
1737
1737
try testTokenize (
1738
1738
\\ Unexpected,
1739
1739
\\ // another
@@ -1746,7 +1746,7 @@ test "tokenizer - line comment followed by identifier" {
1746
1746
});
1747
1747
}
1748
1748
1749
- test "tokenizer - UTF-8 BOM is recognized and skipped" {
1749
+ test "UTF-8 BOM is recognized and skipped" {
1750
1750
try testTokenize ("\xEF\xBB\xBF a;\n " , &.{
1751
1751
.identifier ,
1752
1752
.semicolon ,
@@ -1788,15 +1788,15 @@ test "correctly parse pointer dereference followed by asterisk" {
1788
1788
});
1789
1789
}
1790
1790
1791
- test "tokenizer - range literals" {
1791
+ test "range literals" {
1792
1792
try testTokenize ("0...9" , &.{ .integer_literal , .ellipsis3 , .integer_literal });
1793
1793
try testTokenize ("'0'...'9'" , &.{ .char_literal , .ellipsis3 , .char_literal });
1794
1794
try testTokenize ("0x00...0x09" , &.{ .integer_literal , .ellipsis3 , .integer_literal });
1795
1795
try testTokenize ("0b00...0b11" , &.{ .integer_literal , .ellipsis3 , .integer_literal });
1796
1796
try testTokenize ("0o00...0o11" , &.{ .integer_literal , .ellipsis3 , .integer_literal });
1797
1797
}
1798
1798
1799
- test "tokenizer - number literals decimal" {
1799
+ test "number literals decimal" {
1800
1800
try testTokenize ("0" , &.{.integer_literal });
1801
1801
try testTokenize ("1" , &.{.integer_literal });
1802
1802
try testTokenize ("2" , &.{.integer_literal });
@@ -1863,7 +1863,7 @@ test "tokenizer - number literals decimal" {
1863
1863
try testTokenize ("1.0e0_+" , &.{ .invalid , .plus });
1864
1864
}
1865
1865
1866
- test "tokenizer - number literals binary" {
1866
+ test "number literals binary" {
1867
1867
try testTokenize ("0b0" , &.{.integer_literal });
1868
1868
try testTokenize ("0b1" , &.{.integer_literal });
1869
1869
try testTokenize ("0b2" , &.{ .invalid , .integer_literal });
@@ -1902,7 +1902,7 @@ test "tokenizer - number literals binary" {
1902
1902
try testTokenize ("0b1_," , &.{ .invalid , .comma });
1903
1903
}
1904
1904
1905
- test "tokenizer - number literals octal" {
1905
+ test "number literals octal" {
1906
1906
try testTokenize ("0o0" , &.{.integer_literal });
1907
1907
try testTokenize ("0o1" , &.{.integer_literal });
1908
1908
try testTokenize ("0o2" , &.{.integer_literal });
@@ -1941,7 +1941,7 @@ test "tokenizer - number literals octal" {
1941
1941
try testTokenize ("0o_," , &.{ .invalid , .identifier , .comma });
1942
1942
}
1943
1943
1944
- test "tokenizer - number literals hexadecimal" {
1944
+ test "number literals hexadecimal" {
1945
1945
try testTokenize ("0x0" , &.{.integer_literal });
1946
1946
try testTokenize ("0x1" , &.{.integer_literal });
1947
1947
try testTokenize ("0x2" , &.{.integer_literal });
@@ -2029,22 +2029,22 @@ test "tokenizer - number literals hexadecimal" {
2029
2029
try testTokenize ("0x0.0p0_" , &.{ .invalid , .eof });
2030
2030
}
2031
2031
2032
- test "tokenizer - multi line string literal with only 1 backslash" {
2032
+ test "multi line string literal with only 1 backslash" {
2033
2033
try testTokenize ("x \\ \n ;" , &.{ .identifier , .invalid , .semicolon });
2034
2034
}
2035
2035
2036
- test "tokenizer - invalid builtin identifiers" {
2036
+ test "invalid builtin identifiers" {
2037
2037
try testTokenize ("@()" , &.{ .invalid , .l_paren , .r_paren });
2038
2038
try testTokenize ("@0()" , &.{ .invalid , .integer_literal , .l_paren , .r_paren });
2039
2039
}
2040
2040
2041
- test "tokenizer - invalid token with unfinished escape right before eof" {
2041
+ test "invalid token with unfinished escape right before eof" {
2042
2042
try testTokenize ("\" \\ " , &.{.invalid });
2043
2043
try testTokenize ("'\\ " , &.{.invalid });
2044
2044
try testTokenize ("'\\ u" , &.{.invalid });
2045
2045
}
2046
2046
2047
- test "tokenizer - saturating" {
2047
+ test "saturating" {
2048
2048
try testTokenize ("<<" , &.{.angle_bracket_angle_bracket_left });
2049
2049
try testTokenize ("<<|" , &.{.angle_bracket_angle_bracket_left_pipe });
2050
2050
try testTokenize ("<<|=" , &.{.angle_bracket_angle_bracket_left_pipe_equal });
0 commit comments