@@ -233,9 +233,11 @@ def _flush_markdown(start_line, token, md_metadata):
233
233
meta = nbf .from_dict (md_metadata )
234
234
if md_source :
235
235
source_map .append (start_line )
236
- notebook .cells .append (
237
- nbf_version .new_markdown_cell (source = md_source , metadata = meta )
238
- )
236
+ cell = nbf_version .new_markdown_cell (source = md_source , metadata = meta )
237
+ # remove randomly generated cell IDs
238
+ if "id" in cell :
239
+ del cell ["id" ]
240
+ notebook .cells .append (cell )
239
241
240
242
# iterate through the tokens to identify notebook cells
241
243
nesting_level = 0
@@ -260,9 +262,13 @@ def _flush_markdown(start_line, token, md_metadata):
260
262
)
261
263
meta = nbf .from_dict (options )
262
264
source_map .append (token_map [0 ] + 1 )
263
- notebook . cells . append (
264
- nbf_version . new_code_cell ( source = "\n " .join (body_lines ), metadata = meta )
265
+ cell = nbf_version . new_code_cell (
266
+ source = "\n " .join (body_lines ), metadata = meta
265
267
)
268
+ # remove randomly generated cell IDs
269
+ if "id" in cell :
270
+ del cell ["id" ]
271
+ notebook .cells .append (cell )
266
272
md_metadata = {}
267
273
md_start_line = token_map [1 ]
268
274
@@ -271,9 +277,11 @@ def _flush_markdown(start_line, token, md_metadata):
271
277
options , body_lines = _read_fenced_cell (token , len (notebook .cells ), "Raw" )
272
278
meta = nbf .from_dict (options )
273
279
source_map .append (token_map [0 ] + 1 )
274
- notebook .cells .append (
275
- nbf_version .new_raw_cell (source = "\n " .join (body_lines ), metadata = meta )
276
- )
280
+ cell = nbf_version .new_raw_cell (source = "\n " .join (body_lines ), metadata = meta )
281
+ # remove randomly generated cell IDs
282
+ if "id" in cell :
283
+ del cell ["id" ]
284
+ notebook .cells .append (cell )
277
285
md_metadata = {}
278
286
md_start_line = token_map [1 ]
279
287
0 commit comments