@@ -123,8 +123,8 @@ pub struct Parser<'a> {
123
123
pub capture_cfg : bool ,
124
124
restrictions : Restrictions ,
125
125
expected_tokens : Vec < TokenType > ,
126
- // Important: This must only be advanced from `next_tok`
127
- // to ensure that `token_cursor.num_next_calls` is updated properly
126
+ // Important: This must only be advanced from `bump` to ensure that
127
+ // `token_cursor.num_next_calls` is updated properly.
128
128
token_cursor : TokenCursor ,
129
129
desugar_doc_comments : bool ,
130
130
/// This field is used to keep track of how many left angle brackets we have seen. This is
@@ -476,33 +476,6 @@ impl<'a> Parser<'a> {
476
476
parser
477
477
}
478
478
479
- #[ inline]
480
- fn next_tok ( & mut self , fallback_span : Span ) -> ( Token , Spacing ) {
481
- loop {
482
- let ( mut next, spacing) = if self . desugar_doc_comments {
483
- self . token_cursor . inlined_next_desugared ( )
484
- } else {
485
- self . token_cursor . inlined_next ( )
486
- } ;
487
- self . token_cursor . num_next_calls += 1 ;
488
- // We've retrieved an token from the underlying
489
- // cursor, so we no longer need to worry about
490
- // an unglued token. See `break_and_eat` for more details
491
- self . token_cursor . break_last_token = false ;
492
- if next. span . is_dummy ( ) {
493
- // Tweak the location for better diagnostics, but keep syntactic context intact.
494
- next. span = fallback_span. with_ctxt ( next. span . ctxt ( ) ) ;
495
- }
496
- if matches ! (
497
- next. kind,
498
- token:: OpenDelim ( token:: NoDelim ) | token:: CloseDelim ( token:: NoDelim )
499
- ) {
500
- continue ;
501
- }
502
- return ( next, spacing) ;
503
- }
504
- }
505
-
506
479
pub fn unexpected < T > ( & mut self ) -> PResult < ' a , T > {
507
480
match self . expect_one_of ( & [ ] , & [ ] ) {
508
481
Err ( e) => Err ( e) ,
@@ -697,7 +670,7 @@ impl<'a> Parser<'a> {
697
670
//
698
671
// If we consume any additional tokens, then this token
699
672
// is not needed (we'll capture the entire 'glued' token),
700
- // and `next_tok ` will set this field to `None`
673
+ // and `bump ` will set this field to `None`
701
674
self . token_cursor . break_last_token = true ;
702
675
// Use the spacing of the glued token as the spacing
703
676
// of the unglued second token.
@@ -1035,8 +1008,29 @@ impl<'a> Parser<'a> {
1035
1008
1036
1009
/// Advance the parser by one token.
1037
1010
pub fn bump ( & mut self ) {
1038
- let next_token = self . next_tok ( self . token . span ) ;
1039
- self . inlined_bump_with ( next_token) ;
1011
+ let fallback_span = self . token . span ;
1012
+ loop {
1013
+ let ( mut next, spacing) = if self . desugar_doc_comments {
1014
+ self . token_cursor . inlined_next_desugared ( )
1015
+ } else {
1016
+ self . token_cursor . inlined_next ( )
1017
+ } ;
1018
+ self . token_cursor . num_next_calls += 1 ;
1019
+ // We've retrieved an token from the underlying
1020
+ // cursor, so we no longer need to worry about
1021
+ // an unglued token. See `break_and_eat` for more details
1022
+ self . token_cursor . break_last_token = false ;
1023
+ if next. span . is_dummy ( ) {
1024
+ // Tweak the location for better diagnostics, but keep syntactic context intact.
1025
+ next. span = fallback_span. with_ctxt ( next. span . ctxt ( ) ) ;
1026
+ }
1027
+ if !matches ! (
1028
+ next. kind,
1029
+ token:: OpenDelim ( token:: NoDelim ) | token:: CloseDelim ( token:: NoDelim )
1030
+ ) {
1031
+ return self . inlined_bump_with ( ( next, spacing) ) ;
1032
+ }
1033
+ }
1040
1034
}
1041
1035
1042
1036
/// Look-ahead `dist` tokens of `self.token` and get access to that token there.
0 commit comments