Skip to content

Commit f07cba1

Browse files
author
r00ster91
committed
test(names): remove unnecessary "tokenizer - " prefix
1 parent d234237 commit f07cba1

File tree

1 file changed

+30
-30
lines changed

1 file changed

+30
-30
lines changed

lib/std/zig/tokenizer.zig

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1485,7 +1485,7 @@ test "line comment followed by top-level comptime" {
14851485
});
14861486
}
14871487

1488-
test "tokenizer - unknown length pointer and then c pointer" {
1488+
test "unknown length pointer and then c pointer" {
14891489
try testTokenize(
14901490
\\[*]u8
14911491
\\[*c]u8
@@ -1502,7 +1502,7 @@ test "tokenizer - unknown length pointer and then c pointer" {
15021502
});
15031503
}
15041504

1505-
test "tokenizer - code point literal with hex escape" {
1505+
test "code point literal with hex escape" {
15061506
try testTokenize(
15071507
\\'\x1b'
15081508
, &.{.char_literal});
@@ -1511,21 +1511,21 @@ test "tokenizer - code point literal with hex escape" {
15111511
, &.{ .invalid, .invalid });
15121512
}
15131513

1514-
test "tokenizer - newline in char literal" {
1514+
test "newline in char literal" {
15151515
try testTokenize(
15161516
\\'
15171517
\\'
15181518
, &.{ .invalid, .invalid });
15191519
}
15201520

1521-
test "tokenizer - newline in string literal" {
1521+
test "newline in string literal" {
15221522
try testTokenize(
15231523
\\"
15241524
\\"
15251525
, &.{ .invalid, .string_literal });
15261526
}
15271527

1528-
test "tokenizer - code point literal with unicode escapes" {
1528+
test "code point literal with unicode escapes" {
15291529
// Valid unicode escapes
15301530
try testTokenize(
15311531
\\'\u{3}'
@@ -1575,13 +1575,13 @@ test "tokenizer - code point literal with unicode escapes" {
15751575
, &.{ .invalid, .integer_literal, .invalid });
15761576
}
15771577

1578-
test "tokenizer - code point literal with unicode code point" {
1578+
test "code point literal with unicode code point" {
15791579
try testTokenize(
15801580
\\'💩'
15811581
, &.{.char_literal});
15821582
}
15831583

1584-
test "tokenizer - float literal e exponent" {
1584+
test "float literal e exponent" {
15851585
try testTokenize("a = 4.94065645841246544177e-324;\n", &.{
15861586
.identifier,
15871587
.equal,
@@ -1590,7 +1590,7 @@ test "tokenizer - float literal e exponent" {
15901590
});
15911591
}
15921592

1593-
test "tokenizer - float literal p exponent" {
1593+
test "float literal p exponent" {
15941594
try testTokenize("a = 0x1.a827999fcef32p+1022;\n", &.{
15951595
.identifier,
15961596
.equal,
@@ -1599,19 +1599,19 @@ test "tokenizer - float literal p exponent" {
15991599
});
16001600
}
16011601

1602-
test "tokenizer - chars" {
1602+
test "chars" {
16031603
try testTokenize("'c'", &.{.char_literal});
16041604
}
16051605

1606-
test "tokenizer - invalid token characters" {
1606+
test "invalid token characters" {
16071607
try testTokenize("#", &.{.invalid});
16081608
try testTokenize("`", &.{.invalid});
16091609
try testTokenize("'c", &.{.invalid});
16101610
try testTokenize("'", &.{.invalid});
16111611
try testTokenize("''", &.{ .invalid, .invalid });
16121612
}
16131613

1614-
test "tokenizer - invalid literal/comment characters" {
1614+
test "invalid literal/comment characters" {
16151615
try testTokenize("\"\x00\"", &.{
16161616
.string_literal,
16171617
.invalid,
@@ -1627,12 +1627,12 @@ test "tokenizer - invalid literal/comment characters" {
16271627
});
16281628
}
16291629

1630-
test "tokenizer - utf8" {
1630+
test "utf8" {
16311631
try testTokenize("//\xc2\x80", &.{});
16321632
try testTokenize("//\xf4\x8f\xbf\xbf", &.{});
16331633
}
16341634

1635-
test "tokenizer - invalid utf8" {
1635+
test "invalid utf8" {
16361636
try testTokenize("//\x80", &.{
16371637
.invalid,
16381638
});
@@ -1659,7 +1659,7 @@ test "tokenizer - invalid utf8" {
16591659
});
16601660
}
16611661

1662-
test "tokenizer - illegal unicode codepoints" {
1662+
test "illegal unicode codepoints" {
16631663
// unicode newline characters.U+0085, U+2028, U+2029
16641664
try testTokenize("//\xc2\x84", &.{});
16651665
try testTokenize("//\xc2\x85", &.{
@@ -1676,7 +1676,7 @@ test "tokenizer - illegal unicode codepoints" {
16761676
try testTokenize("//\xe2\x80\xaa", &.{});
16771677
}
16781678

1679-
test "tokenizer - string identifier and builtin fns" {
1679+
test "string identifier and builtin fns" {
16801680
try testTokenize(
16811681
\\const @"if" = @import("std");
16821682
, &.{
@@ -1691,15 +1691,15 @@ test "tokenizer - string identifier and builtin fns" {
16911691
});
16921692
}
16931693

1694-
test "tokenizer - multiline string literal with literal tab" {
1694+
test "multiline string literal with literal tab" {
16951695
try testTokenize(
16961696
\\\\foo bar
16971697
, &.{
16981698
.multiline_string_literal_line,
16991699
});
17001700
}
17011701

1702-
test "tokenizer - comments with literal tab" {
1702+
test "comments with literal tab" {
17031703
try testTokenize(
17041704
\\//foo bar
17051705
\\//!foo bar
@@ -1715,14 +1715,14 @@ test "tokenizer - comments with literal tab" {
17151715
});
17161716
}
17171717

1718-
test "tokenizer - pipe and then invalid" {
1718+
test "pipe and then invalid" {
17191719
try testTokenize("||=", &.{
17201720
.pipe_pipe,
17211721
.equal,
17221722
});
17231723
}
17241724

1725-
test "tokenizer - line comment and doc comment" {
1725+
test "line comment and doc comment" {
17261726
try testTokenize("//", &.{});
17271727
try testTokenize("// a / b", &.{});
17281728
try testTokenize("// /", &.{});
@@ -1733,7 +1733,7 @@ test "tokenizer - line comment and doc comment" {
17331733
try testTokenize("//!!", &.{.container_doc_comment});
17341734
}
17351735

1736-
test "tokenizer - line comment followed by identifier" {
1736+
test "line comment followed by identifier" {
17371737
try testTokenize(
17381738
\\ Unexpected,
17391739
\\ // another
@@ -1746,7 +1746,7 @@ test "tokenizer - line comment followed by identifier" {
17461746
});
17471747
}
17481748

1749-
test "tokenizer - UTF-8 BOM is recognized and skipped" {
1749+
test "UTF-8 BOM is recognized and skipped" {
17501750
try testTokenize("\xEF\xBB\xBFa;\n", &.{
17511751
.identifier,
17521752
.semicolon,
@@ -1788,15 +1788,15 @@ test "correctly parse pointer dereference followed by asterisk" {
17881788
});
17891789
}
17901790

1791-
test "tokenizer - range literals" {
1791+
test "range literals" {
17921792
try testTokenize("0...9", &.{ .integer_literal, .ellipsis3, .integer_literal });
17931793
try testTokenize("'0'...'9'", &.{ .char_literal, .ellipsis3, .char_literal });
17941794
try testTokenize("0x00...0x09", &.{ .integer_literal, .ellipsis3, .integer_literal });
17951795
try testTokenize("0b00...0b11", &.{ .integer_literal, .ellipsis3, .integer_literal });
17961796
try testTokenize("0o00...0o11", &.{ .integer_literal, .ellipsis3, .integer_literal });
17971797
}
17981798

1799-
test "tokenizer - number literals decimal" {
1799+
test "number literals decimal" {
18001800
try testTokenize("0", &.{.integer_literal});
18011801
try testTokenize("1", &.{.integer_literal});
18021802
try testTokenize("2", &.{.integer_literal});
@@ -1863,7 +1863,7 @@ test "tokenizer - number literals decimal" {
18631863
try testTokenize("1.0e0_+", &.{ .invalid, .plus });
18641864
}
18651865

1866-
test "tokenizer - number literals binary" {
1866+
test "number literals binary" {
18671867
try testTokenize("0b0", &.{.integer_literal});
18681868
try testTokenize("0b1", &.{.integer_literal});
18691869
try testTokenize("0b2", &.{ .invalid, .integer_literal });
@@ -1902,7 +1902,7 @@ test "tokenizer - number literals binary" {
19021902
try testTokenize("0b1_,", &.{ .invalid, .comma });
19031903
}
19041904

1905-
test "tokenizer - number literals octal" {
1905+
test "number literals octal" {
19061906
try testTokenize("0o0", &.{.integer_literal});
19071907
try testTokenize("0o1", &.{.integer_literal});
19081908
try testTokenize("0o2", &.{.integer_literal});
@@ -1941,7 +1941,7 @@ test "tokenizer - number literals octal" {
19411941
try testTokenize("0o_,", &.{ .invalid, .identifier, .comma });
19421942
}
19431943

1944-
test "tokenizer - number literals hexadecimal" {
1944+
test "number literals hexadecimal" {
19451945
try testTokenize("0x0", &.{.integer_literal});
19461946
try testTokenize("0x1", &.{.integer_literal});
19471947
try testTokenize("0x2", &.{.integer_literal});
@@ -2029,22 +2029,22 @@ test "tokenizer - number literals hexadecimal" {
20292029
try testTokenize("0x0.0p0_", &.{ .invalid, .eof });
20302030
}
20312031

2032-
test "tokenizer - multi line string literal with only 1 backslash" {
2032+
test "multi line string literal with only 1 backslash" {
20332033
try testTokenize("x \\\n;", &.{ .identifier, .invalid, .semicolon });
20342034
}
20352035

2036-
test "tokenizer - invalid builtin identifiers" {
2036+
test "invalid builtin identifiers" {
20372037
try testTokenize("@()", &.{ .invalid, .l_paren, .r_paren });
20382038
try testTokenize("@0()", &.{ .invalid, .integer_literal, .l_paren, .r_paren });
20392039
}
20402040

2041-
test "tokenizer - invalid token with unfinished escape right before eof" {
2041+
test "invalid token with unfinished escape right before eof" {
20422042
try testTokenize("\"\\", &.{.invalid});
20432043
try testTokenize("'\\", &.{.invalid});
20442044
try testTokenize("'\\u", &.{.invalid});
20452045
}
20462046

2047-
test "tokenizer - saturating" {
2047+
test "saturating" {
20482048
try testTokenize("<<", &.{.angle_bracket_angle_bracket_left});
20492049
try testTokenize("<<|", &.{.angle_bracket_angle_bracket_left_pipe});
20502050
try testTokenize("<<|=", &.{.angle_bracket_angle_bracket_left_pipe_equal});

0 commit comments

Comments
 (0)