@@ -102,7 +102,7 @@ SMRange TGLexer::getLocRange() const {
102
102
}
103
103
104
104
// / ReturnError - Set the error to the specified string at the specified
105
- // / location. This is defined to always return tgtok::Error.
105
+ // / location. This is defined to always return tgtok::Error.
106
106
tgtok::TokKind TGLexer::ReturnError (SMLoc Loc, const Twine &Msg) {
107
107
PrintError (Loc, Msg);
108
108
return tgtok::Error;
@@ -116,7 +116,7 @@ bool TGLexer::processEOF() {
116
116
SMLoc ParentIncludeLoc = SrcMgr.getParentIncludeLoc (CurBuffer);
117
117
if (ParentIncludeLoc != SMLoc ()) {
118
118
// If prepExitInclude() detects a problem with the preprocessing
119
- // control stack, it will return false. Pretend that we reached
119
+ // control stack, it will return false. Pretend that we reached
120
120
// the final EOF and stop lexing more tokens by returning false
121
121
// to LexToken().
122
122
if (!prepExitInclude (false ))
@@ -147,7 +147,7 @@ int TGLexer::getNextChar() {
147
147
148
148
case 0 : {
149
149
// A NUL character in the stream is either the end of the current buffer or
150
- // a spurious NUL in the file. Disambiguate that here.
150
+ // a spurious NUL in the file. Disambiguate that here.
151
151
if (CurPtr - 1 == CurBuf.end ()) {
152
152
--CurPtr; // Arrange for another call to return EOF again.
153
153
return EOF;
@@ -160,7 +160,7 @@ int TGLexer::getNextChar() {
160
160
case ' \n ' :
161
161
case ' \r ' :
162
162
// Handle the newline character by ignoring it and incrementing the line
163
- // count. However, be careful about 'dos style' files with \n\r in them.
163
+ // count. However, be careful about 'dos style' files with \n\r in them.
164
164
// Only treat a \n\r or \r\n as a single line.
165
165
if ((*CurPtr == ' \n ' || (*CurPtr == ' \r ' )) &&
166
166
*CurPtr != CurChar)
@@ -259,7 +259,7 @@ tgtok::TokKind TGLexer::LexToken(bool FileOrLineStart) {
259
259
int NextChar = 0 ;
260
260
if (isDigit (CurChar)) {
261
261
// Allow identifiers to start with a number if it is followed by
262
- // an identifier. This can happen with paste operations like
262
+ // an identifier. This can happen with paste operations like
263
263
// foo#8i.
264
264
int i = 0 ;
265
265
do {
@@ -422,7 +422,7 @@ tgtok::TokKind TGLexer::LexIdentifier() {
422
422
return Kind;
423
423
}
424
424
425
- // / LexInclude - We just read the "include" token. Get the string token that
425
+ // / LexInclude - We just read the "include" token. Get the string token that
426
426
// / comes next and enter the include.
427
427
bool TGLexer::LexInclude () {
428
428
// The token after the include must be a string.
@@ -461,7 +461,7 @@ void TGLexer::SkipBCPLComment() {
461
461
CurPtr = (EOLPos == StringRef::npos) ? CurBuf.end () : CurBuf.data () + EOLPos;
462
462
}
463
463
464
- // / SkipCComment - This skips C-style /**/ comments. The only difference from C
464
+ // / SkipCComment - This skips C-style /**/ comments. The only difference from C
465
465
// / is that we allow nesting.
466
466
bool TGLexer::SkipCComment () {
467
467
++CurPtr; // skip the star.
@@ -554,8 +554,8 @@ tgtok::TokKind TGLexer::LexNumber() {
554
554
return Base == 2 ? tgtok::BinaryIntVal : tgtok::IntVal;
555
555
}
556
556
557
- // / LexBracket - We just read '['. If this is a code block, return it,
558
- // / otherwise return the bracket. Match: '[' and '[{ ( [^}]+ | }[^]] )* }]'
557
+ // / LexBracket - We just read '['. If this is a code block, return it,
558
+ // / otherwise return the bracket. Match: '[' and '[{ ( [^}]+ | }[^]] )* }]'
559
559
tgtok::TokKind TGLexer::LexBracket () {
560
560
if (CurPtr[0 ] != ' {' )
561
561
return tgtok::l_square;
@@ -687,7 +687,7 @@ tgtok::TokKind TGLexer::prepIsDirective() const {
687
687
NextChar == ' \n ' ||
688
688
// It looks like TableGen does not support '\r' as the actual
689
689
// carriage return, e.g. getNextChar() treats a single '\r'
690
- // as '\n'. So we do the same here.
690
+ // as '\n'. So we do the same here.
691
691
NextChar == ' \r ' )
692
692
return Kind;
693
693
@@ -700,7 +700,7 @@ tgtok::TokKind TGLexer::prepIsDirective() const {
700
700
// #define/**/ AND #define//
701
701
//
702
702
// These cases will be reported as incorrect after calling
703
- // prepLexMacroName(). We could have supported C-style comments
703
+ // prepLexMacroName(). We could have supported C-style comments
704
704
// after #ifdef/#define, but this would complicate the code
705
705
// for little benefit.
706
706
if (NextChar == ' /' ) {
@@ -733,7 +733,7 @@ void TGLexer::prepEatPreprocessorDirective(tgtok::TokKind Kind) {
733
733
734
734
tgtok::TokKind TGLexer::lexPreprocessor (tgtok::TokKind Kind,
735
735
bool ReturnNextLiveToken) {
736
- // We must be looking at a preprocessing directive. Eat it!
736
+ // We must be looking at a preprocessing directive. Eat it!
737
737
prepEatPreprocessorDirective (Kind);
738
738
739
739
if (Kind == tgtok::Ifdef || Kind == tgtok::Ifndef) {
@@ -879,7 +879,7 @@ bool TGLexer::prepSkipRegion(bool MustNeverBeFalse) {
879
879
tgtok::TokKind Kind = prepIsDirective ();
880
880
881
881
// If we did not find a preprocessing directive or it is #define,
882
- // then just skip to the next line. We do not have to do anything
882
+ // then just skip to the next line. We do not have to do anything
883
883
// for #define in the line-skipping mode.
884
884
if (Kind == tgtok::Error || Kind == tgtok::Define)
885
885
continue ;
@@ -909,7 +909,7 @@ bool TGLexer::prepSkipRegion(bool MustNeverBeFalse) {
909
909
} while (CurPtr != CurBuf.end ());
910
910
911
911
// We have reached the end of the file, but never left the lines-skipping
912
- // mode. This means there is no matching #endif.
912
+ // mode. This means there is no matching #endif.
913
913
prepReportPreprocessorStackError ();
914
914
return false ;
915
915
}
@@ -939,8 +939,8 @@ bool TGLexer::prepSkipLineBegin() {
939
939
// Skip C-style comment.
940
940
// Note that we do not care about skipping the C++-style comments.
941
941
// If the line contains "//", it may not contain any processable
942
- // preprocessing directive. Just return CurPtr pointing to
943
- // the first '/' in this case. We also do not care about
942
+ // preprocessing directive. Just return CurPtr pointing to
943
+ // the first '/' in this case. We also do not care about
944
944
// incorrect symbols after the first '/' - we are in lines-skipping
945
945
// mode, so incorrect code is allowed to some extent.
946
946
@@ -968,7 +968,7 @@ bool TGLexer::prepSkipLineBegin() {
968
968
++CurPtr;
969
969
}
970
970
971
- // We have reached the end of the file. Return to the lines skipping
971
+ // We have reached the end of the file. Return to the lines skipping
972
972
// code, and allow it to handle the EOF as needed.
973
973
return true ;
974
974
}
@@ -994,7 +994,7 @@ bool TGLexer::prepSkipDirectiveEnd() {
994
994
SkipBCPLComment ();
995
995
} else if (NextChar == ' *' ) {
996
996
// When we are skipping C-style comment at the end of a preprocessing
997
- // directive, we can skip several lines. If any meaningful TD token
997
+ // directive, we can skip several lines. If any meaningful TD token
998
998
// follows the end of the C-style comment on the same line, it will
999
999
// be considered as an invalid usage of TD token.
1000
1000
// For example, we want to forbid usages like this one:
@@ -1003,7 +1003,7 @@ bool TGLexer::prepSkipDirectiveEnd() {
1003
1003
// #define MACRO /* This macro is used
1004
1004
// to ... */ class Class {}
1005
1005
// One can argue that this should be allowed, but it does not seem
1006
- // to be worth of the complication. Moreover, this matches
1006
+ // to be worth of the complication. Moreover, this matches
1007
1007
// the C preprocessor behavior.
1008
1008
1009
1009
// Set TokStart to the beginning of the comment to enable proper
0 commit comments