To champion the single-responsibility and open/closed principles, we have tried to make it relatively painless to extend marked. If you are looking to add custom functionality, this is the place to start.
marked.use(options)
is the recommended way to extend marked. The options object can contain any option available in marked.
The renderer
and tokenizer
options can be an object with functions that will be merged into the renderer
and tokenizer
respectively.
The renderer
and tokenizer
functions can return false to fallback to the previous function.
All other options will overwrite previously set options.
The renderer defines the output of the parser.
Example: Overriding default heading token by adding an embedded anchor tag like on GitHub.
// Create reference instance
const marked = require('marked');
// Override function
const renderer = {
heading(text, level) {
const escapedText = text.toLowerCase().replace(/[^\w]+/g, '-');
return `
<h${level}>
<a name="${escapedText}" class="anchor" href="#${escapedText}">
<span class="header-link"></span>
</a>
${text}
</h${level}>`;
}
};
marked.use({ renderer });
// Run marked
console.log(marked('# heading+'));
Output:
<h1>
<a name="heading-" class="anchor" href="#heading-">
<span class="header-link"></span>
</a>
heading+
</h1>
- code(string code, string infostring, boolean escaped)
- blockquote(string quote)
- html(string html)
- heading(string text, number level, string raw, Slugger slugger)
- hr()
- list(string body, boolean ordered, number start)
- listitem(string text, boolean task, boolean checked)
- checkbox(boolean checked)
- paragraph(string text)
- table(string header, string body)
- tablerow(string content)
- tablecell(string content, object flags)
slugger
has the slug
method to create a unique id from value:
slugger.slug('foo') // foo
slugger.slug('foo') // foo-1
slugger.slug('foo') // foo-2
slugger.slug('foo 1') // foo-1-1
slugger.slug('foo-1') // foo-1-2
...
flags
has the following properties:
{
header: true || false,
align: 'center' || 'left' || 'right'
}
- strong(string text)
- em(string text)
- codespan(string code)
- br()
- del(string text)
- link(string href, string title, string text)
- image(string href, string title, string text)
- text(string text)
The tokenizer defines how to turn markdown text into tokens.
Example: Overriding default codespan
tokenizer to include LaTeX.
// Create reference instance
const marked = require('marked');
// Override function
const tokenizer = {
codespan(src) {
const match = src.match(/\$+([^\$\n]+?)\$+/);
if (match) {
return {
type: 'codespan',
raw: match[0],
text: match[1].trim()
};
}
// return false to use original codespan tokenizer
return false;
}
};
marked.use({ tokenizer });
// Run marked
console.log(marked('$ latex code $\n\n` other code `'));
Output:
<p><code>latex code</code></p>
<p><code>other code</code></p>
- space(string src)
- code(string src, array tokens)
- fences(string src)
- heading(string src)
- nptable(string src)
- hr(string src)
- blockquote(string src)
- list(string src)
- html(string src)
- def(string src)
- table(string src)
- lheading(string src)
- paragraph(string src)
- text(string src)
- escape(string src)
- tag(string src, bool inLink, bool inRawBlock)
- link(string src)
- reflink(string src, object links)
- strong(string src)
- em(string src)
- codespan(string src)
- br(string src)
- del(string src)
- autolink(string src, function mangle)
- url(string src, function mangle)
- inlineText(string src, bool inRawBlock, function smartypants)
mangle
is a method that changes text to HTML character references:
mangle('test@example.com')
// "test@example.com"
smartypants
is a method that translates plain ASCII punctuation characters into “smart” typographic punctuation HTML entities:
https://daringfireball.net/projects/smartypants/
smartypants('"this ... string"')
// "“this … string”"
The lexer takes a markdown string and calls the tokenizer functions.
The parser takes tokens as input and calls the renderer functions.
You also have direct access to the lexer and parser if you so desire.
const tokens = marked.lexer(markdown, options);
console.log(marked.parser(tokens, options));
const lexer = new marked.Lexer(options);
const tokens = lexer.lex(markdown);
console.log(tokens);
console.log(lexer.tokenizer.rules.block); // block level rules used
console.log(lexer.tokenizer.rules.inline); // inline level rules used
console.log(marked.Lexer.rules.block); // all block level rules
console.log(marked.Lexer.rules.inline); // all inline level rules
$ node
> require('marked').lexer('> I am using marked.')
[
{
type: "blockquote",
raw: "> I am using marked.",
tokens: [
{
type: "paragraph",
raw: "I am using marked.",
text: "I am using marked.",
tokens: [
{
type: "text",
raw: "I am using marked.",
text: "I am using marked."
}
]
}
]
},
links: {}
]
The Lexer builds an array of tokens, which will be passed to the Parser. The Parser processes each token in the token array:
const marked = require('marked');
const md = `
# heading
[link][1]
[1]: #heading "heading"
`;
const tokens = marked.lexer(md);
console.log(tokens);
const html = marked.parser(tokens);
console.log(html);
[
{
type: "heading",
raw: " # heading\n\n",
depth: 1,
text: "heading",
tokens: [
{
type: "text",
raw: "heading",
text: "heading"
}
]
},
{
type: "paragraph",
raw: " [link][1]",
text: " [link][1]",
tokens: [
{
type: "text",
raw: " ",
text: " "
},
{
type: "link",
raw: "[link][1]",
text: "link",
href: "#heading",
title: "heading",
tokens: [
{
type: "text",
raw: "link",
text: "link"
}
]
}
]
},
{
type: "space",
raw: "\n\n"
},
links: {
"1": {
href: "#heading",
title: "heading"
}
}
]
<h1 id="heading">heading</h1>
<p> <a href="#heading" title="heading">link</a></p>