@@ -205,7 +205,7 @@ def source_line_to_tokens(obj: object) -> Dict[int, List[Dict[str, Union[str, in
205
205
for token_type , token , (start_line , start_column ), (end_line , end_column ), line in tokenize_source (obj ):
206
206
line_to_tokens .setdefault (start_line , []).append ({
207
207
'token_type' : token_type ,
208
- 'token' : bytes ( token , encoding = 'ascii' ). decode ( 'unicode-escape' ) ,
208
+ 'token' : token ,
209
209
'start_line' : start_line ,
210
210
'start_column' : start_column ,
211
211
'end_line' : end_line ,
@@ -241,8 +241,21 @@ def get_class_variables(cls: type) -> Dict[str, Dict[str, str]]:
241
241
and token ["token" ][:1 ] in {'"' , "'" }
242
242
):
243
243
sep = " " if variable_to_comment [class_variable ]["comment" ] else ""
244
+
245
+ # Identify the quote character (single or double)
244
246
quote_char = token ["token" ][:1 ]
245
- variable_to_comment [class_variable ]["comment" ] += sep + token ["token" ].strip (quote_char ).strip ()
247
+
248
+ # Identify the number of quote characters at the start of the string
249
+ num_quote_chars = len (token ["token" ]) - len (token ["token" ].lstrip (quote_char ))
250
+
251
+ # Remove the number of quote characters at the start of the string and the end of the string
252
+ token ["token" ] = token ["token" ][num_quote_chars :- num_quote_chars ]
253
+
254
+ # Remove the unicode escape sequences (e.g. "\"")
255
+ token ["token" ] = bytes (token ["token" ], encoding = 'ascii' ).decode ('unicode-escape' )
256
+
257
+ # Add the token to the comment, stripping whitespace
258
+ variable_to_comment [class_variable ]["comment" ] += sep + token ["token" ].strip ()
246
259
247
260
# Match class variable
248
261
class_variable = None
0 commit comments