Skip to content

Commit 855b377

Browse files
committed
fix: Plugin broke with last update
1 parent f6ecc5e commit 855b377

File tree

2 files changed

+132
-128
lines changed

2 files changed

+132
-128
lines changed

environment.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
name: Publish

mkdocs_embed_file_plugins/plugin.py

Lines changed: 131 additions & 128 deletions
Original file line numberDiff line numberDiff line change
@@ -21,56 +21,56 @@ def search_in_file(citation_part: str, contents: str):
2121
contents: The file contents
2222
Returns: the part found
2323
"""
24-
data = contents.split("\n")
25-
if "#" not in citation_part:
24+
data = contents.split('\n')
25+
if '#' not in citation_part:
2626
# All text citation
2727
return contents
28-
elif "#" in citation_part and not "^" in citation_part:
28+
elif '#' in citation_part and not '^' in citation_part:
2929
# cite from title
3030
sub_section = []
31-
citation_part = citation_part.replace("-", " ").replace("#", "# ")
31+
citation_part = citation_part.replace('-', ' ').replace('#', '# ')
3232
heading = 0
3333
for i in data:
34-
if citation_part in i and i.startswith("#"):
35-
heading = i.count("#") * (-1)
34+
if citation_part in i and i.startswith('#'):
35+
heading = i.count('#') * (-1)
3636
sub_section.append([i])
3737
elif heading != 0:
38-
inverse = i.count("#") * (-1)
38+
inverse = i.count('#') * (-1)
3939
if inverse == 0 or heading > inverse:
4040
sub_section.append([i])
4141
elif inverse >= heading:
4242
break
4343
sub_section = [x for y in sub_section for x in y]
44-
sub_section = "\n".join(sub_section)
44+
sub_section = '\n'.join(sub_section)
4545
return sub_section
46-
elif "#^" in citation_part:
46+
elif '#^' in citation_part:
4747
# cite from block
48-
citation_part = citation_part.replace("#", "")
48+
citation_part = citation_part.replace('#', '')
4949
for i in data:
5050
if citation_part in i:
51-
return i.replace(citation_part, "")
51+
return i.replace(citation_part, '')
5252
return []
5353

5454

5555
def mini_ez_links(urlo, base, end, url_whitespace, url_case):
5656
base, url_blog = base
57-
url_blog_path = [x for x in url_blog.split("/") if len(x) > 0]
57+
url_blog_path = [x for x in url_blog.split('/') if len(x) > 0]
5858
url_blog_path = url_blog_path[len(url_blog_path) - 1]
5959
all_docs = [
60-
re.sub(rf"(.*)({url_blog_path})?/docs/*", "", x.replace("\\", "/")).replace(
61-
".md", ""
60+
re.sub(rf"(.*)({url_blog_path})?/docs/*", '', x.replace('\\', '/')).replace(
61+
'.md', ''
6262
)
63-
for x in iglob(str(base) + os.sep + "**", recursive=True)
63+
for x in iglob(str(base) + os.sep + '**', recursive=True)
6464
if os.path.isfile(x)
6565
]
66-
file_name = urlo[2].replace("index", "")
66+
file_name = urlo[2].replace('index', '')
6767
file_found = [
68-
"/" + x for x in all_docs if os.path.basename(x) == file_name or x == file_name
68+
'/' + x for x in all_docs if os.path.basename(x) == file_name or x == file_name
6969
]
7070
if file_found:
71-
file_path = file_found[0].replace(base, "")
72-
url = file_path.replace("\\", "/").replace(".md", "")
73-
url = url.replace("//", "/")
71+
file_path = file_found[0].replace(base, '')
72+
url = file_path.replace('\\', '/').replace('.md', '')
73+
url = url.replace('//', '/')
7474
url = url_blog[:-1] + quote(url)
7575
if not url.startswith(('https:/', 'http:/')):
7676
url = 'https://' + url
@@ -82,8 +82,8 @@ def mini_ez_links(urlo, base, end, url_whitespace, url_case):
8282

8383

8484
def cite(md_link_path, link, soup, citation_part, config):
85-
"""
86-
Append the content of the founded file to the original file.
85+
"""Append the content of the founded file to the original file.
86+
8787
Args:
8888
md_link_path: File found
8989
link: Line with the citation
@@ -92,21 +92,21 @@ def cite(md_link_path, link, soup, citation_part, config):
9292
config: the config file
9393
Returns: updated HTML
9494
"""
95-
docs = config["docs_dir"]
96-
url = config["site_url"]
95+
docs = config['docs_dir']
96+
url = config['site_url']
9797
md_config = {
98-
"mdx_wikilink_plus": {
99-
"base_url": (docs, url),
100-
"build_url": mini_ez_links,
101-
"image_class": "wikilink",
98+
'mdx_wikilink_plus': {
99+
'base_url': (docs, url),
100+
'build_url': mini_ez_links,
101+
'image_class': 'wikilink',
102102
}
103103
}
104104
new_uri = str(md_link_path).replace(str(docs), str(url))
105-
new_uri = new_uri.replace("\\", "/")
106-
new_uri = new_uri.replace(".md", "/")
107-
new_uri = new_uri.replace("//", "/")
108-
new_uri = re.sub("https?:\/", "\g<0>/", new_uri)
109-
input_file = codecs.open(str(md_link_path), mode="r", encoding="utf-8")
105+
new_uri = new_uri.replace('\\', '/')
106+
new_uri = new_uri.replace('.md', '/')
107+
new_uri = new_uri.replace('//', '/')
108+
new_uri = re.sub('https?:\/', '\g<0>/', new_uri)
109+
input_file = codecs.open(str(md_link_path), mode='r', encoding='utf-8')
110110
text = input_file.read()
111111

112112
contents = frontmatter.loads(text).content
@@ -115,35 +115,36 @@ def cite(md_link_path, link, soup, citation_part, config):
115115
html = markdown.markdown(
116116
quote,
117117
extensions=[
118-
"nl2br",
119-
"footnotes",
120-
"attr_list",
121-
"mdx_breakless_lists",
122-
"smarty",
123-
"sane_lists",
124-
"tables",
125-
"admonition",
126-
WikiLinkPlusExtension(md_config["mdx_wikilink_plus"]),
118+
'nl2br',
119+
'footnotes',
120+
'attr_list',
121+
'mdx_breakless_lists',
122+
'smarty',
123+
'sane_lists',
124+
'tables',
125+
'admonition',
126+
WikiLinkPlusExtension(md_config['mdx_wikilink_plus']),
127127
],
128128
)
129-
link_soup = BeautifulSoup(html, "html.parser")
129+
link_soup = BeautifulSoup(html, 'html.parser')
130130
if link_soup:
131131
tooltip_template = (
132-
"<a href='"
133-
+ str(new_uri)
134-
+ "' class='link_citation'><i class='fas fa-link'></i> </a> <div"
135-
" class='citation'>"
136-
+ str(link_soup).replace(
137-
'!<img class="wikilink', '<img class="wikilink'
138-
)
139-
+ "</div>"
132+
"<a href='"
133+
+ str(new_uri)
134+
+ "' class='link_citation'><i class='fas fa-link'></i> </a> <div"
135+
" class='citation'>"
136+
+ str(link_soup).replace(
137+
'!<img class="wikilink', '<img class="wikilink'
138+
)
139+
+ '</div>'
140140
)
141141
else:
142142
tooltip_template = (
143-
"<div class='not_found'>" + str(link["src"].replace("/", "")) + "</div>"
143+
"<div class='not_found'>" +
144+
str(link['src'].replace('/', '')) + '</div>'
144145
)
145146
new_soup = str(soup).replace(str(link), str(tooltip_template))
146-
soup = BeautifulSoup(new_soup, "html.parser")
147+
soup = BeautifulSoup(new_soup, 'html.parser')
147148
return soup
148149

149150

@@ -156,92 +157,94 @@ def search_doc(md_link_path, all_docs):
156157
Returns: Path to link found or 0 otherwise
157158
158159
"""
159-
if os.path.basename(md_link_path) == ".md":
160-
md_link_path = str(md_link_path).replace(f"{os.sep}.md", f"{os.sep}index.md")
160+
if os.path.basename(md_link_path) == '.md':
161+
md_link_path = str(md_link_path).replace(
162+
f'{os.sep}.md', f'{os.sep}index.md')
161163
else:
162-
md_link_path = str(md_link_path).replace(f"{os.sep}.md", "")
164+
md_link_path = str(md_link_path).replace(f'{os.sep}.md', '')
163165
file = [x for x in all_docs if Path(x) == Path(md_link_path)]
164166
if len(file) > 0:
165167
return file[0]
166168
return 0
167169

168170

169-
def on_post_page(output_content, page, config):
170-
soup = BeautifulSoup(output_content, "html.parser")
171-
docs = Path(config["docs_dir"])
172-
md_link_path = ""
173-
all_docs = [
174-
x
175-
for x in iglob(str(docs) + os.sep + "**", recursive=True)
176-
if x.endswith(".md")
177-
]
178-
179-
for link in soup.findAll(
180-
"img",
181-
src=lambda src: src is not None
182-
and "favicon" not in src
183-
and not src.endswith(("png", "jpg", "jpeg", "gif")),
184-
):
185-
if len(link["src"]) > 0:
186-
187-
if link["src"][0] == ".":
188-
md_src_path = link["src"][3:-1] + ".md"
189-
md_src_path = md_src_path.replace(".m.md", ".md")
190-
md_link_path = os.path.join(
191-
os.path.dirname(page.file.abs_src_path), md_src_path
192-
)
193-
md_link_path = Path(unquote(md_link_path)).resolve()
194-
195-
elif link["src"][0] == "/":
196-
if link["src"].endswith("/"):
197-
md_src_path = link["src"][:-1] + ".md"
198-
else:
199-
md_src_path = link["src"] + ".md"
200-
md_link_path = os.path.join(config["docs_dir"], md_src_path)
201-
md_link_path = Path(unquote(md_link_path)).resolve()
171+
class EmbedFile(BasePlugin):
172+
config_scheme = (('param', config_options.Type(str, default='')),)
202173

203-
elif link["src"][0] != "#":
204-
if link["src"].endswith("/"):
205-
md_src_path = link["src"][:-1] + ".md"
206-
else:
207-
md_src_path = link["src"] + ".md"
174+
def __init__(self):
175+
self.enabled = True
176+
self.total_time = 0
208177

178+
def on_post_page(self, output_content, page, config):
179+
soup = BeautifulSoup(output_content, 'html.parser')
180+
docs = Path(config['docs_dir'])
181+
md_link_path = ''
182+
all_docs = [
183+
x
184+
for x in iglob(str(docs) + os.sep + '**', recursive=True)
185+
if x.endswith('.md')
186+
]
187+
188+
for link in soup.findAll(
189+
'img',
190+
src=lambda src: src is not None and 'favicon' not in src and not src.endswith(
191+
('png', 'jpg', 'jpeg', 'gif')),
192+
):
193+
if len(link['src']) > 0:
194+
195+
if link['src'][0] == '.':
196+
md_src_path = link['src'][3:-1] + '.md'
197+
md_src_path = md_src_path.replace('.m.md', '.md')
198+
md_link_path = os.path.join(
199+
os.path.dirname(page.file.abs_src_path), md_src_path
200+
)
201+
md_link_path = Path(unquote(md_link_path)).resolve()
202+
203+
elif link['src'][0] == '/':
204+
if link['src'].endswith('/'):
205+
md_src_path = link['src'][:-1] + '.md'
206+
else:
207+
md_src_path = link['src'] + '.md'
208+
md_link_path = os.path.join(
209+
config['docs_dir'], md_src_path)
210+
md_link_path = Path(unquote(md_link_path)).resolve()
211+
212+
elif link['src'][0] != '#':
213+
if link['src'].endswith('/'):
214+
md_src_path = link['src'][:-1] + '.md'
215+
else:
216+
md_src_path = link['src'] + '.md'
217+
218+
md_link_path = os.path.join(
219+
os.path.dirname(page.file.abs_src_path), md_src_path
220+
)
221+
md_link_path = Path(unquote(md_link_path)).resolve()
222+
else:
223+
md_src_path = link['src'] + '.md'
209224
md_link_path = os.path.join(
210225
os.path.dirname(page.file.abs_src_path), md_src_path
211226
)
212227
md_link_path = Path(unquote(md_link_path)).resolve()
213-
else:
214-
md_src_path = link["src"] + ".md"
215-
md_link_path = os.path.join(
216-
os.path.dirname(page.file.abs_src_path), md_src_path
217-
)
218-
md_link_path = Path(unquote(md_link_path)).resolve()
219-
220-
if md_link_path != "" and len(link["src"]) > 0:
221-
if "#" in link.get("alt", ""):
222-
# heading
223-
citation_part = re.sub("^(.*)#", "#", link["alt"])
224-
elif "#" in link.get("src", ""):
225-
citation_part = re.sub("^(.*)#", "#", link["src"])
226-
else:
227-
citation_part = link.get("alt", False)
228-
if citation_part:
229-
md_link_path = re.sub("#(.*)\.md", ".md", str(md_link_path))
230-
md_link_path = md_link_path.replace("\.md", ".md")
231-
md_link_path = Path(md_link_path)
232-
if os.path.isfile(md_link_path):
233-
soup = cite(md_link_path, link, soup, citation_part, config)
234-
else:
235-
link_found = search_doc(md_link_path, all_docs)
236-
if link_found != 0:
237-
soup = cite(link_found, link, soup, citation_part, config)
238-
return soup.original_encoding
239-
240228

241-
class EmbedFile(BasePlugin):
242-
243-
config_scheme = (("param", config_options.Type(str, default="")),)
244-
245-
def __init__(self):
246-
self.enabled = True
247-
self.total_time = 0
229+
if md_link_path != '' and len(link['src']) > 0:
230+
if '#' in link.get('alt', ''):
231+
# heading
232+
citation_part = re.sub('^(.*)#', '#', link['alt'])
233+
elif '#' in link.get('src', ''):
234+
citation_part = re.sub('^(.*)#', '#', link['src'])
235+
else:
236+
citation_part = link.get('alt', False)
237+
if citation_part:
238+
md_link_path = re.sub(
239+
'#(.*)\.md', '.md', str(md_link_path))
240+
md_link_path = md_link_path.replace('\.md', '.md')
241+
md_link_path = Path(md_link_path)
242+
if os.path.isfile(md_link_path):
243+
soup = cite(md_link_path, link, soup,
244+
citation_part, config)
245+
else:
246+
link_found = search_doc(md_link_path, all_docs)
247+
if link_found != 0:
248+
soup = cite(link_found, link, soup,
249+
citation_part, config)
250+
return str(soup)

0 commit comments

Comments
 (0)