@@ -21,56 +21,56 @@ def search_in_file(citation_part: str, contents: str):
21
21
contents: The file contents
22
22
Returns: the part found
23
23
"""
24
- data = contents .split (" \n " )
25
- if "#" not in citation_part :
24
+ data = contents .split (' \n ' )
25
+ if '#' not in citation_part :
26
26
# All text citation
27
27
return contents
28
- elif "#" in citation_part and not "^" in citation_part :
28
+ elif '#' in citation_part and not '^' in citation_part :
29
29
# cite from title
30
30
sub_section = []
31
- citation_part = citation_part .replace ("-" , " " ).replace ("#" , "# " )
31
+ citation_part = citation_part .replace ('-' , ' ' ).replace ('#' , '# ' )
32
32
heading = 0
33
33
for i in data :
34
- if citation_part in i and i .startswith ("#" ):
35
- heading = i .count ("#" ) * (- 1 )
34
+ if citation_part in i and i .startswith ('#' ):
35
+ heading = i .count ('#' ) * (- 1 )
36
36
sub_section .append ([i ])
37
37
elif heading != 0 :
38
- inverse = i .count ("#" ) * (- 1 )
38
+ inverse = i .count ('#' ) * (- 1 )
39
39
if inverse == 0 or heading > inverse :
40
40
sub_section .append ([i ])
41
41
elif inverse >= heading :
42
42
break
43
43
sub_section = [x for y in sub_section for x in y ]
44
- sub_section = " \n " .join (sub_section )
44
+ sub_section = ' \n ' .join (sub_section )
45
45
return sub_section
46
- elif "#^" in citation_part :
46
+ elif '#^' in citation_part :
47
47
# cite from block
48
- citation_part = citation_part .replace ("#" , "" )
48
+ citation_part = citation_part .replace ('#' , '' )
49
49
for i in data :
50
50
if citation_part in i :
51
- return i .replace (citation_part , "" )
51
+ return i .replace (citation_part , '' )
52
52
return []
53
53
54
54
55
55
def mini_ez_links (urlo , base , end , url_whitespace , url_case ):
56
56
base , url_blog = base
57
- url_blog_path = [x for x in url_blog .split ("/" ) if len (x ) > 0 ]
57
+ url_blog_path = [x for x in url_blog .split ('/' ) if len (x ) > 0 ]
58
58
url_blog_path = url_blog_path [len (url_blog_path ) - 1 ]
59
59
all_docs = [
60
- re .sub (rf"(.*)({ url_blog_path } )?/docs/*" , "" , x .replace (" \\ " , "/" )).replace (
61
- " .md" , ""
60
+ re .sub (rf"(.*)({ url_blog_path } )?/docs/*" , '' , x .replace (' \\ ' , '/' )).replace (
61
+ ' .md' , ''
62
62
)
63
- for x in iglob (str (base ) + os .sep + "**" , recursive = True )
63
+ for x in iglob (str (base ) + os .sep + '**' , recursive = True )
64
64
if os .path .isfile (x )
65
65
]
66
- file_name = urlo [2 ].replace (" index" , "" )
66
+ file_name = urlo [2 ].replace (' index' , '' )
67
67
file_found = [
68
- "/" + x for x in all_docs if os .path .basename (x ) == file_name or x == file_name
68
+ '/' + x for x in all_docs if os .path .basename (x ) == file_name or x == file_name
69
69
]
70
70
if file_found :
71
- file_path = file_found [0 ].replace (base , "" )
72
- url = file_path .replace (" \\ " , "/" ).replace (" .md" , "" )
73
- url = url .replace ("//" , "/" )
71
+ file_path = file_found [0 ].replace (base , '' )
72
+ url = file_path .replace (' \\ ' , '/' ).replace (' .md' , '' )
73
+ url = url .replace ('//' , '/' )
74
74
url = url_blog [:- 1 ] + quote (url )
75
75
if not url .startswith (('https:/' , 'http:/' )):
76
76
url = 'https://' + url
@@ -82,8 +82,8 @@ def mini_ez_links(urlo, base, end, url_whitespace, url_case):
82
82
83
83
84
84
def cite (md_link_path , link , soup , citation_part , config ):
85
- """
86
- Append the content of the founded file to the original file.
85
+ """Append the content of the founded file to the original file.
86
+
87
87
Args:
88
88
md_link_path: File found
89
89
link: Line with the citation
@@ -92,21 +92,21 @@ def cite(md_link_path, link, soup, citation_part, config):
92
92
config: the config file
93
93
Returns: updated HTML
94
94
"""
95
- docs = config [" docs_dir" ]
96
- url = config [" site_url" ]
95
+ docs = config [' docs_dir' ]
96
+ url = config [' site_url' ]
97
97
md_config = {
98
- " mdx_wikilink_plus" : {
99
- " base_url" : (docs , url ),
100
- " build_url" : mini_ez_links ,
101
- " image_class" : " wikilink" ,
98
+ ' mdx_wikilink_plus' : {
99
+ ' base_url' : (docs , url ),
100
+ ' build_url' : mini_ez_links ,
101
+ ' image_class' : ' wikilink' ,
102
102
}
103
103
}
104
104
new_uri = str (md_link_path ).replace (str (docs ), str (url ))
105
- new_uri = new_uri .replace (" \\ " , "/" )
106
- new_uri = new_uri .replace (" .md" , "/" )
107
- new_uri = new_uri .replace ("//" , "/" )
108
- new_uri = re .sub (" https?:\/" , " \g<0>/" , new_uri )
109
- input_file = codecs .open (str (md_link_path ), mode = "r" , encoding = " utf-8" )
105
+ new_uri = new_uri .replace (' \\ ' , '/' )
106
+ new_uri = new_uri .replace (' .md' , '/' )
107
+ new_uri = new_uri .replace ('//' , '/' )
108
+ new_uri = re .sub (' https?:\/' , ' \g<0>/' , new_uri )
109
+ input_file = codecs .open (str (md_link_path ), mode = 'r' , encoding = ' utf-8' )
110
110
text = input_file .read ()
111
111
112
112
contents = frontmatter .loads (text ).content
@@ -115,35 +115,36 @@ def cite(md_link_path, link, soup, citation_part, config):
115
115
html = markdown .markdown (
116
116
quote ,
117
117
extensions = [
118
- " nl2br" ,
119
- " footnotes" ,
120
- " attr_list" ,
121
- " mdx_breakless_lists" ,
122
- " smarty" ,
123
- " sane_lists" ,
124
- " tables" ,
125
- " admonition" ,
126
- WikiLinkPlusExtension (md_config [" mdx_wikilink_plus" ]),
118
+ ' nl2br' ,
119
+ ' footnotes' ,
120
+ ' attr_list' ,
121
+ ' mdx_breakless_lists' ,
122
+ ' smarty' ,
123
+ ' sane_lists' ,
124
+ ' tables' ,
125
+ ' admonition' ,
126
+ WikiLinkPlusExtension (md_config [' mdx_wikilink_plus' ]),
127
127
],
128
128
)
129
- link_soup = BeautifulSoup (html , " html.parser" )
129
+ link_soup = BeautifulSoup (html , ' html.parser' )
130
130
if link_soup :
131
131
tooltip_template = (
132
- "<a href='"
133
- + str (new_uri )
134
- + "' class='link_citation'><i class='fas fa-link'></i> </a> <div"
135
- " class='citation'>"
136
- + str (link_soup ).replace (
137
- '!<img class="wikilink' , '<img class="wikilink'
138
- )
139
- + " </div>"
132
+ "<a href='"
133
+ + str (new_uri )
134
+ + "' class='link_citation'><i class='fas fa-link'></i> </a> <div"
135
+ " class='citation'>"
136
+ + str (link_soup ).replace (
137
+ '!<img class="wikilink' , '<img class="wikilink'
138
+ )
139
+ + ' </div>'
140
140
)
141
141
else :
142
142
tooltip_template = (
143
- "<div class='not_found'>" + str (link ["src" ].replace ("/" , "" )) + "</div>"
143
+ "<div class='not_found'>" +
144
+ str (link ['src' ].replace ('/' , '' )) + '</div>'
144
145
)
145
146
new_soup = str (soup ).replace (str (link ), str (tooltip_template ))
146
- soup = BeautifulSoup (new_soup , " html.parser" )
147
+ soup = BeautifulSoup (new_soup , ' html.parser' )
147
148
return soup
148
149
149
150
@@ -156,92 +157,94 @@ def search_doc(md_link_path, all_docs):
156
157
Returns: Path to link found or 0 otherwise
157
158
158
159
"""
159
- if os .path .basename (md_link_path ) == ".md" :
160
- md_link_path = str (md_link_path ).replace (f"{ os .sep } .md" , f"{ os .sep } index.md" )
160
+ if os .path .basename (md_link_path ) == '.md' :
161
+ md_link_path = str (md_link_path ).replace (
162
+ f'{ os .sep } .md' , f'{ os .sep } index.md' )
161
163
else :
162
- md_link_path = str (md_link_path ).replace (f" { os .sep } .md" , "" )
164
+ md_link_path = str (md_link_path ).replace (f' { os .sep } .md' , '' )
163
165
file = [x for x in all_docs if Path (x ) == Path (md_link_path )]
164
166
if len (file ) > 0 :
165
167
return file [0 ]
166
168
return 0
167
169
168
170
169
- def on_post_page (output_content , page , config ):
170
- soup = BeautifulSoup (output_content , "html.parser" )
171
- docs = Path (config ["docs_dir" ])
172
- md_link_path = ""
173
- all_docs = [
174
- x
175
- for x in iglob (str (docs ) + os .sep + "**" , recursive = True )
176
- if x .endswith (".md" )
177
- ]
178
-
179
- for link in soup .findAll (
180
- "img" ,
181
- src = lambda src : src is not None
182
- and "favicon" not in src
183
- and not src .endswith (("png" , "jpg" , "jpeg" , "gif" )),
184
- ):
185
- if len (link ["src" ]) > 0 :
186
-
187
- if link ["src" ][0 ] == "." :
188
- md_src_path = link ["src" ][3 :- 1 ] + ".md"
189
- md_src_path = md_src_path .replace (".m.md" , ".md" )
190
- md_link_path = os .path .join (
191
- os .path .dirname (page .file .abs_src_path ), md_src_path
192
- )
193
- md_link_path = Path (unquote (md_link_path )).resolve ()
194
-
195
- elif link ["src" ][0 ] == "/" :
196
- if link ["src" ].endswith ("/" ):
197
- md_src_path = link ["src" ][:- 1 ] + ".md"
198
- else :
199
- md_src_path = link ["src" ] + ".md"
200
- md_link_path = os .path .join (config ["docs_dir" ], md_src_path )
201
- md_link_path = Path (unquote (md_link_path )).resolve ()
171
+ class EmbedFile (BasePlugin ):
172
+ config_scheme = (('param' , config_options .Type (str , default = '' )),)
202
173
203
- elif link ["src" ][0 ] != "#" :
204
- if link ["src" ].endswith ("/" ):
205
- md_src_path = link ["src" ][:- 1 ] + ".md"
206
- else :
207
- md_src_path = link ["src" ] + ".md"
174
+ def __init__ (self ):
175
+ self .enabled = True
176
+ self .total_time = 0
208
177
178
+ def on_post_page (self , output_content , page , config ):
179
+ soup = BeautifulSoup (output_content , 'html.parser' )
180
+ docs = Path (config ['docs_dir' ])
181
+ md_link_path = ''
182
+ all_docs = [
183
+ x
184
+ for x in iglob (str (docs ) + os .sep + '**' , recursive = True )
185
+ if x .endswith ('.md' )
186
+ ]
187
+
188
+ for link in soup .findAll (
189
+ 'img' ,
190
+ src = lambda src : src is not None and 'favicon' not in src and not src .endswith (
191
+ ('png' , 'jpg' , 'jpeg' , 'gif' )),
192
+ ):
193
+ if len (link ['src' ]) > 0 :
194
+
195
+ if link ['src' ][0 ] == '.' :
196
+ md_src_path = link ['src' ][3 :- 1 ] + '.md'
197
+ md_src_path = md_src_path .replace ('.m.md' , '.md' )
198
+ md_link_path = os .path .join (
199
+ os .path .dirname (page .file .abs_src_path ), md_src_path
200
+ )
201
+ md_link_path = Path (unquote (md_link_path )).resolve ()
202
+
203
+ elif link ['src' ][0 ] == '/' :
204
+ if link ['src' ].endswith ('/' ):
205
+ md_src_path = link ['src' ][:- 1 ] + '.md'
206
+ else :
207
+ md_src_path = link ['src' ] + '.md'
208
+ md_link_path = os .path .join (
209
+ config ['docs_dir' ], md_src_path )
210
+ md_link_path = Path (unquote (md_link_path )).resolve ()
211
+
212
+ elif link ['src' ][0 ] != '#' :
213
+ if link ['src' ].endswith ('/' ):
214
+ md_src_path = link ['src' ][:- 1 ] + '.md'
215
+ else :
216
+ md_src_path = link ['src' ] + '.md'
217
+
218
+ md_link_path = os .path .join (
219
+ os .path .dirname (page .file .abs_src_path ), md_src_path
220
+ )
221
+ md_link_path = Path (unquote (md_link_path )).resolve ()
222
+ else :
223
+ md_src_path = link ['src' ] + '.md'
209
224
md_link_path = os .path .join (
210
225
os .path .dirname (page .file .abs_src_path ), md_src_path
211
226
)
212
227
md_link_path = Path (unquote (md_link_path )).resolve ()
213
- else :
214
- md_src_path = link ["src" ] + ".md"
215
- md_link_path = os .path .join (
216
- os .path .dirname (page .file .abs_src_path ), md_src_path
217
- )
218
- md_link_path = Path (unquote (md_link_path )).resolve ()
219
-
220
- if md_link_path != "" and len (link ["src" ]) > 0 :
221
- if "#" in link .get ("alt" , "" ):
222
- # heading
223
- citation_part = re .sub ("^(.*)#" , "#" , link ["alt" ])
224
- elif "#" in link .get ("src" , "" ):
225
- citation_part = re .sub ("^(.*)#" , "#" , link ["src" ])
226
- else :
227
- citation_part = link .get ("alt" , False )
228
- if citation_part :
229
- md_link_path = re .sub ("#(.*)\.md" , ".md" , str (md_link_path ))
230
- md_link_path = md_link_path .replace ("\.md" , ".md" )
231
- md_link_path = Path (md_link_path )
232
- if os .path .isfile (md_link_path ):
233
- soup = cite (md_link_path , link , soup , citation_part , config )
234
- else :
235
- link_found = search_doc (md_link_path , all_docs )
236
- if link_found != 0 :
237
- soup = cite (link_found , link , soup , citation_part , config )
238
- return soup .original_encoding
239
-
240
228
241
- class EmbedFile (BasePlugin ):
242
-
243
- config_scheme = (("param" , config_options .Type (str , default = "" )),)
244
-
245
- def __init__ (self ):
246
- self .enabled = True
247
- self .total_time = 0
229
+ if md_link_path != '' and len (link ['src' ]) > 0 :
230
+ if '#' in link .get ('alt' , '' ):
231
+ # heading
232
+ citation_part = re .sub ('^(.*)#' , '#' , link ['alt' ])
233
+ elif '#' in link .get ('src' , '' ):
234
+ citation_part = re .sub ('^(.*)#' , '#' , link ['src' ])
235
+ else :
236
+ citation_part = link .get ('alt' , False )
237
+ if citation_part :
238
+ md_link_path = re .sub (
239
+ '#(.*)\.md' , '.md' , str (md_link_path ))
240
+ md_link_path = md_link_path .replace ('\.md' , '.md' )
241
+ md_link_path = Path (md_link_path )
242
+ if os .path .isfile (md_link_path ):
243
+ soup = cite (md_link_path , link , soup ,
244
+ citation_part , config )
245
+ else :
246
+ link_found = search_doc (md_link_path , all_docs )
247
+ if link_found != 0 :
248
+ soup = cite (link_found , link , soup ,
249
+ citation_part , config )
250
+ return str (soup )
0 commit comments