docsite: remove lexers which have been fixed in Pygments 2.4.0 (#57508)
* Remove lexers which have been fixed in Pygments 2.4.0. * Add Pygments >= 2.4.0 to test runner. * Fix pages that triggered lexer errors. Co-Authored-By: Sviatoslav Sydorenko <wk.cvs.github@sydorenko.org.ua>
This commit is contained in:
parent
cae6304356
commit
505c99265c
8 changed files with 19 additions and 533 deletions
|
@ -3,7 +3,6 @@
|
||||||
#
|
#
|
||||||
# Copyright 2006-2017 by the Pygments team, see AUTHORS at
|
# Copyright 2006-2017 by the Pygments team, see AUTHORS at
|
||||||
# https://bitbucket.org/birkenfeld/pygments-main/raw/7941677dc77d4f2bf0bbd6140ade85a9454b8b80/AUTHORS
|
# https://bitbucket.org/birkenfeld/pygments-main/raw/7941677dc77d4f2bf0bbd6140ade85a9454b8b80/AUTHORS
|
||||||
# Copyright by Kirill Simonov (original author of YAML lexer).
|
|
||||||
# Copyright by Norman Richards (original author of JSON lexer).
|
# Copyright by Norman Richards (original author of JSON lexer).
|
||||||
#
|
#
|
||||||
# Licensed under BSD license:
|
# Licensed under BSD license:
|
||||||
|
@ -43,525 +42,6 @@ from pygments import token
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
|
||||||
class AnsibleYamlLexerContext(LexerContext):
|
|
||||||
"""Indentation context for the YAML lexer."""
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwds):
|
|
||||||
super(AnsibleYamlLexerContext, self).__init__(*args, **kwds)
|
|
||||||
self.indent_stack = []
|
|
||||||
self.indent = -1
|
|
||||||
self.next_indent = 0
|
|
||||||
self.block_scalar_indent = None
|
|
||||||
|
|
||||||
|
|
||||||
class AnsibleYamlLexer(ExtendedRegexLexer):
|
|
||||||
"""
|
|
||||||
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
|
|
||||||
language.
|
|
||||||
|
|
||||||
.. versionadded:: 0.11
|
|
||||||
"""
|
|
||||||
|
|
||||||
name = 'YAML'
|
|
||||||
aliases = ['yaml']
|
|
||||||
filenames = ['*.yaml', '*.yml']
|
|
||||||
mimetypes = ['text/x-yaml']
|
|
||||||
|
|
||||||
def something(token_class):
|
|
||||||
"""Do not produce empty tokens."""
|
|
||||||
def callback(lexer, match, context):
|
|
||||||
text = match.group()
|
|
||||||
if not text:
|
|
||||||
return
|
|
||||||
yield match.start(), token_class, text
|
|
||||||
context.pos = match.end()
|
|
||||||
return callback
|
|
||||||
|
|
||||||
def reset_indent(token_class):
|
|
||||||
"""Reset the indentation levels."""
|
|
||||||
def callback(lexer, match, context):
|
|
||||||
text = match.group()
|
|
||||||
context.indent_stack = []
|
|
||||||
context.indent = -1
|
|
||||||
context.next_indent = 0
|
|
||||||
context.block_scalar_indent = None
|
|
||||||
yield match.start(), token_class, text
|
|
||||||
context.pos = match.end()
|
|
||||||
return callback
|
|
||||||
|
|
||||||
def save_indent(token_class, start=False):
|
|
||||||
"""Save a possible indentation level."""
|
|
||||||
def callback(lexer, match, context):
|
|
||||||
text = match.group()
|
|
||||||
extra = ''
|
|
||||||
if start:
|
|
||||||
context.next_indent = len(text)
|
|
||||||
if context.next_indent < context.indent:
|
|
||||||
while context.next_indent < context.indent:
|
|
||||||
context.indent = context.indent_stack.pop()
|
|
||||||
if context.next_indent > context.indent:
|
|
||||||
extra = text[context.indent:]
|
|
||||||
text = text[:context.indent]
|
|
||||||
else:
|
|
||||||
context.next_indent += len(text)
|
|
||||||
if text:
|
|
||||||
yield match.start(), token_class, text
|
|
||||||
if extra:
|
|
||||||
yield match.start() + len(text), token_class.Error, extra
|
|
||||||
context.pos = match.end()
|
|
||||||
return callback
|
|
||||||
|
|
||||||
def set_indent(token_class, implicit=False):
|
|
||||||
"""Set the previously saved indentation level."""
|
|
||||||
def callback(lexer, match, context):
|
|
||||||
text = match.group()
|
|
||||||
if context.indent < context.next_indent:
|
|
||||||
context.indent_stack.append(context.indent)
|
|
||||||
context.indent = context.next_indent
|
|
||||||
if not implicit:
|
|
||||||
context.next_indent += len(text)
|
|
||||||
yield match.start(), token_class, text
|
|
||||||
context.pos = match.end()
|
|
||||||
return callback
|
|
||||||
|
|
||||||
def set_block_scalar_indent(token_class):
|
|
||||||
"""Set an explicit indentation level for a block scalar."""
|
|
||||||
def callback(lexer, match, context):
|
|
||||||
text = match.group()
|
|
||||||
context.block_scalar_indent = None
|
|
||||||
if not text:
|
|
||||||
return
|
|
||||||
increment = match.group(1)
|
|
||||||
if increment:
|
|
||||||
current_indent = max(context.indent, 0)
|
|
||||||
increment = int(increment)
|
|
||||||
context.block_scalar_indent = current_indent + increment
|
|
||||||
if text:
|
|
||||||
yield match.start(), token_class, text
|
|
||||||
context.pos = match.end()
|
|
||||||
return callback
|
|
||||||
|
|
||||||
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
|
|
||||||
"""Process an empty line in a block scalar."""
|
|
||||||
def callback(lexer, match, context):
|
|
||||||
text = match.group()
|
|
||||||
if (context.block_scalar_indent is None or
|
|
||||||
len(text) <= context.block_scalar_indent):
|
|
||||||
if text:
|
|
||||||
yield match.start(), indent_token_class, text
|
|
||||||
else:
|
|
||||||
indentation = text[:context.block_scalar_indent]
|
|
||||||
content = text[context.block_scalar_indent:]
|
|
||||||
yield match.start(), indent_token_class, indentation
|
|
||||||
yield (match.start() + context.block_scalar_indent,
|
|
||||||
content_token_class, content)
|
|
||||||
context.pos = match.end()
|
|
||||||
return callback
|
|
||||||
|
|
||||||
def parse_block_scalar_indent(token_class):
|
|
||||||
"""Process indentation spaces in a block scalar."""
|
|
||||||
def callback(lexer, match, context):
|
|
||||||
text = match.group()
|
|
||||||
if context.block_scalar_indent is None:
|
|
||||||
if len(text) <= max(context.indent, 0):
|
|
||||||
context.stack.pop()
|
|
||||||
context.stack.pop()
|
|
||||||
return
|
|
||||||
context.block_scalar_indent = len(text)
|
|
||||||
else:
|
|
||||||
if len(text) < context.block_scalar_indent:
|
|
||||||
context.stack.pop()
|
|
||||||
context.stack.pop()
|
|
||||||
return
|
|
||||||
if text:
|
|
||||||
yield match.start(), token_class, text
|
|
||||||
context.pos = match.end()
|
|
||||||
return callback
|
|
||||||
|
|
||||||
def parse_plain_scalar_indent(token_class):
|
|
||||||
"""Process indentation spaces in a plain scalar."""
|
|
||||||
def callback(lexer, match, context):
|
|
||||||
text = match.group()
|
|
||||||
if len(text) <= context.indent:
|
|
||||||
context.stack.pop()
|
|
||||||
context.stack.pop()
|
|
||||||
return
|
|
||||||
if text:
|
|
||||||
yield match.start(), token_class, text
|
|
||||||
context.pos = match.end()
|
|
||||||
return callback
|
|
||||||
|
|
||||||
tokens = {
|
|
||||||
# the root rules
|
|
||||||
'root': [
|
|
||||||
# ignored whitespaces
|
|
||||||
(r'[ ]+(?=#|$)', token.Text),
|
|
||||||
# line breaks
|
|
||||||
(r'\n+', token.Text),
|
|
||||||
# a comment
|
|
||||||
(r'#[^\n]*', token.Comment.Single),
|
|
||||||
# the '%YAML' directive
|
|
||||||
(r'^%YAML(?=[ ]|$)', reset_indent(token.Name.Tag), 'yaml-directive'),
|
|
||||||
# the %TAG directive
|
|
||||||
(r'^%TAG(?=[ ]|$)', reset_indent(token.Name.Tag), 'tag-directive'),
|
|
||||||
# document start and document end indicators
|
|
||||||
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(token.Name.Namespace),
|
|
||||||
'block-line'),
|
|
||||||
# indentation spaces
|
|
||||||
(r'[ ]*(?!\s|$)', save_indent(token.Text, start=True),
|
|
||||||
('block-line', 'indentation')),
|
|
||||||
],
|
|
||||||
|
|
||||||
# trailing whitespaces after directives or a block scalar indicator
|
|
||||||
'ignored-line': [
|
|
||||||
# ignored whitespaces
|
|
||||||
(r'[ ]+(?=#|$)', token.Text),
|
|
||||||
# a comment
|
|
||||||
(r'#[^\n]*', token.Comment.Single),
|
|
||||||
# line break
|
|
||||||
(r'\n', token.Text, '#pop:2'),
|
|
||||||
],
|
|
||||||
|
|
||||||
# the %YAML directive
|
|
||||||
'yaml-directive': [
|
|
||||||
# the version number
|
|
||||||
(r'([ ]+)([0-9]+\.[0-9]+)',
|
|
||||||
bygroups(token.Text, token.Number), 'ignored-line'),
|
|
||||||
],
|
|
||||||
|
|
||||||
# the %YAG directive
|
|
||||||
'tag-directive': [
|
|
||||||
# a tag handle and the corresponding prefix
|
|
||||||
(r'([ ]+)(!|![\w-]*!)'
|
|
||||||
r'([ ]+)(!|!?[\w;/?:@&=+$,.!~*\'()\[\]%-]+)',
|
|
||||||
bygroups(token.Text, token.Keyword.Type, token.Text, token.Keyword.Type),
|
|
||||||
'ignored-line'),
|
|
||||||
],
|
|
||||||
|
|
||||||
# block scalar indicators and indentation spaces
|
|
||||||
'indentation': [
|
|
||||||
# trailing whitespaces are ignored
|
|
||||||
(r'[ ]*$', something(token.Text), '#pop:2'),
|
|
||||||
# whitespaces preceeding block collection indicators
|
|
||||||
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(token.Text)),
|
|
||||||
# block collection indicators
|
|
||||||
(r'[?:-](?=[ ]|$)', set_indent(token.Punctuation.Indicator)),
|
|
||||||
# the beginning a block line
|
|
||||||
(r'[ ]*', save_indent(token.Text), '#pop'),
|
|
||||||
],
|
|
||||||
|
|
||||||
# an indented line in the block context
|
|
||||||
'block-line': [
|
|
||||||
# the line end
|
|
||||||
(r'[ ]*(?=#|$)', something(token.Text), '#pop'),
|
|
||||||
# whitespaces separating tokens
|
|
||||||
(r'[ ]+', token.Text),
|
|
||||||
# key with colon
|
|
||||||
(r'''([^,:?\[\]{}"'\n]+)(:)(?=[ ]|$)''',
|
|
||||||
bygroups(token.Name.Tag, set_indent(token.Punctuation, implicit=True))),
|
|
||||||
# tags, anchors and aliases,
|
|
||||||
include('descriptors'),
|
|
||||||
# block collections and scalars
|
|
||||||
include('block-nodes'),
|
|
||||||
# flow collections and quoted scalars
|
|
||||||
include('flow-nodes'),
|
|
||||||
# a plain scalar
|
|
||||||
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`-]|[?:-]\S)',
|
|
||||||
something(token.Name.Variable),
|
|
||||||
'plain-scalar-in-block-context'),
|
|
||||||
],
|
|
||||||
|
|
||||||
# tags, anchors, aliases
|
|
||||||
'descriptors': [
|
|
||||||
# a full-form tag
|
|
||||||
(r'!<[\w#;/?:@&=+$,.!~*\'()\[\]%-]+>', token.Keyword.Type),
|
|
||||||
# a tag in the form '!', '!suffix' or '!handle!suffix'
|
|
||||||
(r'!(?:[\w-]+!)?'
|
|
||||||
r'[\w#;/?:@&=+$,.!~*\'()\[\]%-]+', token.Keyword.Type),
|
|
||||||
# an anchor
|
|
||||||
(r'&[\w-]+', token.Name.Label),
|
|
||||||
# an alias
|
|
||||||
(r'\*[\w-]+', token.Name.Variable),
|
|
||||||
],
|
|
||||||
|
|
||||||
# block collections and scalars
|
|
||||||
'block-nodes': [
|
|
||||||
# implicit key
|
|
||||||
(r':(?=[ ]|$)', set_indent(token.Punctuation.Indicator, implicit=True)),
|
|
||||||
# literal and folded scalars
|
|
||||||
(r'[|>]', token.Punctuation.Indicator,
|
|
||||||
('block-scalar-content', 'block-scalar-header')),
|
|
||||||
],
|
|
||||||
|
|
||||||
# flow collections and quoted scalars
|
|
||||||
'flow-nodes': [
|
|
||||||
# a flow sequence
|
|
||||||
(r'\[', token.Punctuation.Indicator, 'flow-sequence'),
|
|
||||||
# a flow mapping
|
|
||||||
(r'\{', token.Punctuation.Indicator, 'flow-mapping'),
|
|
||||||
# a single-quoted scalar
|
|
||||||
(r'\'', token.String, 'single-quoted-scalar'),
|
|
||||||
# a double-quoted scalar
|
|
||||||
(r'\"', token.String, 'double-quoted-scalar'),
|
|
||||||
],
|
|
||||||
|
|
||||||
# the content of a flow collection
|
|
||||||
'flow-collection': [
|
|
||||||
# whitespaces
|
|
||||||
(r'[ ]+', token.Text),
|
|
||||||
# line breaks
|
|
||||||
(r'\n+', token.Text),
|
|
||||||
# a comment
|
|
||||||
(r'#[^\n]*', token.Comment.Single),
|
|
||||||
# simple indicators
|
|
||||||
(r'[?:,]', token.Punctuation.Indicator),
|
|
||||||
# tags, anchors and aliases
|
|
||||||
include('descriptors'),
|
|
||||||
# nested collections and quoted scalars
|
|
||||||
include('flow-nodes'),
|
|
||||||
# a plain scalar
|
|
||||||
(r'(?=[^\s?:,\[\]{}#&*!|>\'"%@`])',
|
|
||||||
something(token.Name.Variable),
|
|
||||||
'plain-scalar-in-flow-context'),
|
|
||||||
],
|
|
||||||
|
|
||||||
# a flow sequence indicated by '[' and ']'
|
|
||||||
'flow-sequence': [
|
|
||||||
# include flow collection rules
|
|
||||||
include('flow-collection'),
|
|
||||||
# the closing indicator
|
|
||||||
(r'\]', token.Punctuation.Indicator, '#pop'),
|
|
||||||
],
|
|
||||||
|
|
||||||
# a flow mapping indicated by '{' and '}'
|
|
||||||
'flow-mapping': [
|
|
||||||
# key with colon
|
|
||||||
(r'''([^,:?\[\]{}"'\n]+)(:)(?=[ ]|$)''',
|
|
||||||
bygroups(token.Name.Tag, token.Punctuation)),
|
|
||||||
# include flow collection rules
|
|
||||||
include('flow-collection'),
|
|
||||||
# the closing indicator
|
|
||||||
(r'\}', token.Punctuation.Indicator, '#pop'),
|
|
||||||
],
|
|
||||||
|
|
||||||
# block scalar lines
|
|
||||||
'block-scalar-content': [
|
|
||||||
# line break
|
|
||||||
(r'\n', token.Text),
|
|
||||||
# empty line
|
|
||||||
(r'^[ ]+$',
|
|
||||||
parse_block_scalar_empty_line(token.Text, token.Name.Constant)),
|
|
||||||
# indentation spaces (we may leave the state here)
|
|
||||||
(r'^[ ]*', parse_block_scalar_indent(token.Text)),
|
|
||||||
# line content
|
|
||||||
(r'[\S\t ]+', token.Name.Constant),
|
|
||||||
],
|
|
||||||
|
|
||||||
# the content of a literal or folded scalar
|
|
||||||
'block-scalar-header': [
|
|
||||||
# indentation indicator followed by chomping flag
|
|
||||||
(r'([1-9])?[+-]?(?=[ ]|$)',
|
|
||||||
set_block_scalar_indent(token.Punctuation.Indicator),
|
|
||||||
'ignored-line'),
|
|
||||||
# chomping flag followed by indentation indicator
|
|
||||||
(r'[+-]?([1-9])?(?=[ ]|$)',
|
|
||||||
set_block_scalar_indent(token.Punctuation.Indicator),
|
|
||||||
'ignored-line'),
|
|
||||||
],
|
|
||||||
|
|
||||||
# ignored and regular whitespaces in quoted scalars
|
|
||||||
'quoted-scalar-whitespaces': [
|
|
||||||
# leading and trailing whitespaces are ignored
|
|
||||||
(r'^[ ]+', token.Text),
|
|
||||||
(r'[ ]+$', token.Text),
|
|
||||||
# line breaks are ignored
|
|
||||||
(r'\n+', token.Text),
|
|
||||||
# other whitespaces are a part of the value
|
|
||||||
(r'[ ]+', token.Name.Variable),
|
|
||||||
],
|
|
||||||
|
|
||||||
# single-quoted scalars
|
|
||||||
'single-quoted-scalar': [
|
|
||||||
# include whitespace and line break rules
|
|
||||||
include('quoted-scalar-whitespaces'),
|
|
||||||
# escaping of the quote character
|
|
||||||
(r'\'\'', token.String.Escape),
|
|
||||||
# regular non-whitespace characters
|
|
||||||
(r'[^\s\']+', token.String),
|
|
||||||
# the closing quote
|
|
||||||
(r'\'', token.String, '#pop'),
|
|
||||||
],
|
|
||||||
|
|
||||||
# double-quoted scalars
|
|
||||||
'double-quoted-scalar': [
|
|
||||||
# include whitespace and line break rules
|
|
||||||
include('quoted-scalar-whitespaces'),
|
|
||||||
# escaping of special characters
|
|
||||||
(r'\\[0abt\tn\nvfre "\\N_LP]', token.String),
|
|
||||||
# escape codes
|
|
||||||
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
|
|
||||||
token.String.Escape),
|
|
||||||
# regular non-whitespace characters
|
|
||||||
(r'[^\s"\\]+', token.String),
|
|
||||||
# the closing quote
|
|
||||||
(r'"', token.String, '#pop'),
|
|
||||||
],
|
|
||||||
|
|
||||||
# the beginning of a new line while scanning a plain scalar
|
|
||||||
'plain-scalar-in-block-context-new-line': [
|
|
||||||
# empty lines
|
|
||||||
(r'^[ ]+$', token.Text),
|
|
||||||
# line breaks
|
|
||||||
(r'\n+', token.Text),
|
|
||||||
# document start and document end indicators
|
|
||||||
(r'^(?=---|\.\.\.)', something(token.Name.Namespace), '#pop:3'),
|
|
||||||
# indentation spaces (we may leave the block line state here)
|
|
||||||
(r'^[ ]*', parse_plain_scalar_indent(token.Text), '#pop'),
|
|
||||||
],
|
|
||||||
|
|
||||||
# a plain scalar in the block context
|
|
||||||
'plain-scalar-in-block-context': [
|
|
||||||
# the scalar ends with the ':' indicator
|
|
||||||
(r'[ ]*(?=:[ ]|:$)', something(token.Text), '#pop'),
|
|
||||||
# the scalar ends with whitespaces followed by a comment
|
|
||||||
(r'[ ]+(?=#)', token.Text, '#pop'),
|
|
||||||
# trailing whitespaces are ignored
|
|
||||||
(r'[ ]+$', token.Text),
|
|
||||||
# line breaks are ignored
|
|
||||||
(r'\n+', token.Text, 'plain-scalar-in-block-context-new-line'),
|
|
||||||
# other whitespaces are a part of the value
|
|
||||||
(r'[ ]+', token.Literal.Scalar.Plain),
|
|
||||||
# regular non-whitespace characters
|
|
||||||
(r'(?::(?!\s)|[^\s:])+', token.Literal.Scalar.Plain),
|
|
||||||
],
|
|
||||||
|
|
||||||
# a plain scalar is the flow context
|
|
||||||
'plain-scalar-in-flow-context': [
|
|
||||||
# the scalar ends with an indicator character
|
|
||||||
(r'[ ]*(?=[,:?\[\]{}])', something(token.Text), '#pop'),
|
|
||||||
# the scalar ends with a comment
|
|
||||||
(r'[ ]+(?=#)', token.Text, '#pop'),
|
|
||||||
# leading and trailing whitespaces are ignored
|
|
||||||
(r'^[ ]+', token.Text),
|
|
||||||
(r'[ ]+$', token.Text),
|
|
||||||
# line breaks are ignored
|
|
||||||
(r'\n+', token.Text),
|
|
||||||
# other whitespaces are a part of the value
|
|
||||||
(r'[ ]+', token.Name.Variable),
|
|
||||||
# regular non-whitespace characters
|
|
||||||
(r'[^\s,:?\[\]{}]+', token.Name.Variable),
|
|
||||||
],
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_tokens_unprocessed(self, text=None, context=None):
|
|
||||||
if context is None:
|
|
||||||
context = AnsibleYamlLexerContext(text, 0)
|
|
||||||
return super(AnsibleYamlLexer, self).get_tokens_unprocessed(text, context)
|
|
||||||
|
|
||||||
|
|
||||||
class AnsibleDjangoLexer(RegexLexer):
|
|
||||||
"""
|
|
||||||
Generic `django <http://www.djangoproject.com/documentation/templates/>`_
|
|
||||||
and `jinja <http://wsgiarea.pocoo.org/jinja/>`_ template lexer.
|
|
||||||
|
|
||||||
It just highlights django/jinja code between the preprocessor directives,
|
|
||||||
other data is left untouched by the lexer.
|
|
||||||
"""
|
|
||||||
|
|
||||||
name = 'Django/Jinja'
|
|
||||||
aliases = ['django', 'jinja']
|
|
||||||
mimetypes = ['application/x-django-templating', 'application/x-jinja']
|
|
||||||
|
|
||||||
flags = re.M | re.S
|
|
||||||
|
|
||||||
tokens = {
|
|
||||||
'root': [
|
|
||||||
(r'[^{]+', token.Other),
|
|
||||||
(r'\{\{', token.Comment.Preproc, 'var'),
|
|
||||||
# jinja/django comments
|
|
||||||
(r'\{[*#].*?[*#]\}', token.Comment),
|
|
||||||
# django comments
|
|
||||||
(r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
|
|
||||||
r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
|
|
||||||
bygroups(token.Comment.Preproc, token.Text, token.Keyword, token.Text, token.Comment.Preproc,
|
|
||||||
token.Comment, token.Comment.Preproc, token.Text, token.Keyword, token.Text,
|
|
||||||
token.Comment.Preproc)),
|
|
||||||
# raw jinja blocks
|
|
||||||
(r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
|
|
||||||
r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
|
|
||||||
bygroups(token.Comment.Preproc, token.Text, token.Keyword, token.Text, token.Comment.Preproc,
|
|
||||||
token.Text, token.Comment.Preproc, token.Text, token.Keyword, token.Text,
|
|
||||||
token.Comment.Preproc)),
|
|
||||||
# filter blocks
|
|
||||||
(r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_]\w*)',
|
|
||||||
bygroups(token.Comment.Preproc, token.Text, token.Keyword, token.Text, token.Name.Function),
|
|
||||||
'block'),
|
|
||||||
(r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
|
|
||||||
bygroups(token.Comment.Preproc, token.Text, token.Keyword), 'block'),
|
|
||||||
(r'\{', token.Other)
|
|
||||||
],
|
|
||||||
'varnames': [
|
|
||||||
(r'(\|)(\s*)([a-zA-Z_]\w*)',
|
|
||||||
bygroups(token.Operator, token.Text, token.Name.Function)),
|
|
||||||
(r'(is)(\s+)(not)?(\s+)?([a-zA-Z_]\w*)',
|
|
||||||
bygroups(token.Keyword, token.Text, token.Keyword, token.Text, token.Name.Function)),
|
|
||||||
(r'(_|true|false|none|True|False|None)\b', token.Keyword.Pseudo),
|
|
||||||
(r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
|
|
||||||
r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
|
|
||||||
token.Keyword),
|
|
||||||
(r'(loop|block|super|forloop)\b', token.Name.Builtin),
|
|
||||||
(r'[a-zA-Z_][\w-]*', token.Name.Variable),
|
|
||||||
(r'\.\w+', token.Name.Variable),
|
|
||||||
(r':?"(\\\\|\\"|[^"])*"', token.String.Double),
|
|
||||||
(r":?'(\\\\|\\'|[^'])*'", token.String.Single),
|
|
||||||
(r'([{}()\[\]+\-*/%,:~]|[><=]=?|!=)', token.Operator),
|
|
||||||
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
|
|
||||||
r"0[xX][0-9a-fA-F]+[Ll]?", token.Number),
|
|
||||||
],
|
|
||||||
'var': [
|
|
||||||
(r'\s+', token.Text),
|
|
||||||
(r'(-?)(\}\})', bygroups(token.Text, token.Comment.Preproc), '#pop'),
|
|
||||||
include('varnames')
|
|
||||||
],
|
|
||||||
'block': [
|
|
||||||
(r'\s+', token.Text),
|
|
||||||
(r'(-?)(%\})', bygroups(token.Text, token.Comment.Preproc), '#pop'),
|
|
||||||
include('varnames'),
|
|
||||||
(r'.', token.Punctuation)
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
def analyse_text(text):
|
|
||||||
rv = 0.0
|
|
||||||
if re.search(r'\{%\s*(block|extends)', text) is not None:
|
|
||||||
rv += 0.4
|
|
||||||
if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
|
|
||||||
rv += 0.1
|
|
||||||
if re.search(r'\{\{.*?\}\}', text) is not None:
|
|
||||||
rv += 0.1
|
|
||||||
return rv
|
|
||||||
|
|
||||||
|
|
||||||
class AnsibleYamlJinjaLexer(DelegatingLexer):
|
|
||||||
"""
|
|
||||||
Subclass of the `DjangoLexer` that highlights unlexed data with the
|
|
||||||
`AnsibleYamlLexer`.
|
|
||||||
|
|
||||||
Commonly used in Saltstack salt states.
|
|
||||||
|
|
||||||
.. versionadded:: 2.0
|
|
||||||
"""
|
|
||||||
|
|
||||||
name = 'YAML+Jinja'
|
|
||||||
aliases = ['yaml+jinja']
|
|
||||||
filenames = ['*.sls']
|
|
||||||
mimetypes = ['text/x-yaml+jinja']
|
|
||||||
|
|
||||||
def __init__(self, **options):
|
|
||||||
super(AnsibleYamlJinjaLexer, self).__init__(AnsibleYamlLexer, AnsibleDjangoLexer, **options)
|
|
||||||
|
|
||||||
|
|
||||||
class AnsibleOutputPrimaryLexer(RegexLexer):
|
class AnsibleOutputPrimaryLexer(RegexLexer):
|
||||||
name = 'Ansible-output-primary'
|
name = 'Ansible-output-primary'
|
||||||
|
|
||||||
|
@ -698,9 +178,6 @@ def setup(app):
|
||||||
See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions.
|
See http://www.sphinx-doc.org/en/stable/extdev/index.html#dev-extensions.
|
||||||
"""
|
"""
|
||||||
for lexer in [
|
for lexer in [
|
||||||
AnsibleDjangoLexer(startinline=True),
|
|
||||||
AnsibleYamlLexer(startinline=True),
|
|
||||||
AnsibleYamlJinjaLexer(startinline=True),
|
|
||||||
AnsibleOutputLexer(startinline=True)
|
AnsibleOutputLexer(startinline=True)
|
||||||
]:
|
]:
|
||||||
app.add_lexer(lexer.name, lexer)
|
app.add_lexer(lexer.name, lexer)
|
||||||
|
|
|
@ -4,3 +4,4 @@ PyYAML
|
||||||
rstcheck
|
rstcheck
|
||||||
sphinx
|
sphinx
|
||||||
sphinx-notfound-page
|
sphinx-notfound-page
|
||||||
|
Pygments >= 2.4.0
|
||||||
|
|
|
@ -33,7 +33,7 @@ Comparing ``loop`` and ``with_*``
|
||||||
|
|
||||||
you would need::
|
you would need::
|
||||||
|
|
||||||
loop: [1, [2,3] ,4] | flatten(1)
|
loop: "{{ [1, [2,3] ,4] | flatten(1) }}"
|
||||||
|
|
||||||
* Any ``with_*`` statement that requires using ``lookup`` within a loop should not be converted to use the ``loop`` keyword. For example, instead of doing:
|
* Any ``with_*`` statement that requires using ``lookup`` within a loop should not be converted to use the ``loop`` keyword. For example, instead of doing:
|
||||||
|
|
||||||
|
|
|
@ -105,7 +105,9 @@ fail if the option name is incorrect, a mandatory option is not set, or the
|
||||||
value is not a valid choice. When running Ansible with a verbosity level of 3
|
value is not a valid choice. When running Ansible with a verbosity level of 3
|
||||||
or more (``-vvv``), the return value will contain the possible invocation
|
or more (``-vvv``), the return value will contain the possible invocation
|
||||||
options based on the ``resource_name`` specified. Here is an example of the
|
options based on the ``resource_name`` specified. Here is an example of the
|
||||||
invocation output for the above ``Registry`` task::
|
invocation output for the above ``Registry`` task:
|
||||||
|
|
||||||
|
.. code-block:: ansible-output
|
||||||
|
|
||||||
changed: [2016] => {
|
changed: [2016] => {
|
||||||
"changed": true,
|
"changed": true,
|
||||||
|
|
|
@ -402,9 +402,7 @@ The YAML specification considers the following `escape sequences <http://yaml.or
|
||||||
|
|
||||||
* ``\U........`` -- 8-digit hex escape
|
* ``\U........`` -- 8-digit hex escape
|
||||||
|
|
||||||
Here are some examples on how to write Windows paths:
|
Here are some examples on how to write Windows paths::
|
||||||
|
|
||||||
.. code-block:: yaml+jinja
|
|
||||||
|
|
||||||
# GOOD
|
# GOOD
|
||||||
tempdir: C:\Windows\Temp
|
tempdir: C:\Windows\Temp
|
||||||
|
@ -418,11 +416,16 @@ Here are some examples on how to write Windows paths:
|
||||||
tempdir: 'C:\\Windows\\Temp'
|
tempdir: 'C:\\Windows\\Temp'
|
||||||
tempdir: C:/Windows/Temp
|
tempdir: C:/Windows/Temp
|
||||||
|
|
||||||
|
This is an example which will fail:
|
||||||
|
|
||||||
|
.. code-block:: text
|
||||||
|
|
||||||
# FAILS
|
# FAILS
|
||||||
tempdir: "C:\Windows\Temp"
|
tempdir: "C:\Windows\Temp"
|
||||||
|
|
||||||
|
This example shows the use of single quotes when they are required::
|
||||||
|
|
||||||
---
|
---
|
||||||
# Example of single quotes when they are required
|
|
||||||
- name: Copy tomcat config
|
- name: Copy tomcat config
|
||||||
win_copy:
|
win_copy:
|
||||||
src: log4j.xml
|
src: log4j.xml
|
||||||
|
|
|
@ -78,7 +78,7 @@ EXAMPLES = """
|
||||||
msg: "{{ lookup('passwordstore', 'example/test create=true overwrite=true')}}"
|
msg: "{{ lookup('passwordstore', 'example/test create=true overwrite=true')}}"
|
||||||
|
|
||||||
- name: Create an alphanumeric password
|
- name: Create an alphanumeric password
|
||||||
debug: msg="{{ lookup('passwordstore', 'example/test create=true nosymbols=true) }}"
|
debug: msg="{{ lookup('passwordstore', 'example/test create=true nosymbols=true') }}"
|
||||||
|
|
||||||
- name: Return the value for user in the KV pair user, username
|
- name: Return the value for user in the KV pair user, username
|
||||||
debug:
|
debug:
|
||||||
|
|
|
@ -39,12 +39,14 @@ options:
|
||||||
EXAMPLES = r"""
|
EXAMPLES = r"""
|
||||||
- name: return skydive metdata if present based on Name
|
- name: return skydive metdata if present based on Name
|
||||||
set_fact:
|
set_fact:
|
||||||
skydive_meta: "{{ lookup('skydive', filter={'query': \"G.V().Has('Name', 'sumit-VirtualBox')\"}) }}"
|
skydive_meta: >-
|
||||||
|
{{ lookup('skydive', filter={'query': "G.V().Has('Name', 'sumit-VirtualBox')"}) }}
|
||||||
|
|
||||||
- name: return all the skydive metdata having parameter Name
|
- name: return all the skydive metdata having parameter Name
|
||||||
set_fact:
|
set_fact:
|
||||||
skydive: "{{ lookup('skydive', filter={'query': \"G.V().Has('Name')\"},
|
skydive: >-
|
||||||
provider={'endpoint': 'localhost:8082', 'username': 'admin', 'password': 'password'}) }}"
|
{{ lookup('skydive', filter={'query': "G.V().Has('Name')"},
|
||||||
|
provider={'endpoint': 'localhost:8082', 'username': 'admin', 'password': 'password'}) }}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
RETURN = """
|
RETURN = """
|
||||||
|
|
|
@ -5,6 +5,7 @@ urllib3 < 1.24 ; python_version < '2.7' # urllib3 1.24 and later require python
|
||||||
pywinrm >= 0.3.0 # message encryption support
|
pywinrm >= 0.3.0 # message encryption support
|
||||||
sphinx < 1.6 ; python_version < '2.7' # sphinx 1.6 and later require python 2.7 or later
|
sphinx < 1.6 ; python_version < '2.7' # sphinx 1.6 and later require python 2.7 or later
|
||||||
sphinx < 1.8 ; python_version >= '2.7' # sphinx 1.8 and later are currently incompatible with rstcheck 3.3
|
sphinx < 1.8 ; python_version >= '2.7' # sphinx 1.8 and later are currently incompatible with rstcheck 3.3
|
||||||
|
pygments >= 2.4.0 # Pygments 2.4.0 includes bugfixes for YAML and YAML+Jinja lexers
|
||||||
wheel < 0.30.0 ; python_version < '2.7' # wheel 0.30.0 and later require python 2.7 or later
|
wheel < 0.30.0 ; python_version < '2.7' # wheel 0.30.0 and later require python 2.7 or later
|
||||||
yamllint != 1.8.0, < 1.14.0 ; python_version < '2.7' # yamllint 1.8.0 and 1.14.0+ require python 2.7+
|
yamllint != 1.8.0, < 1.14.0 ; python_version < '2.7' # yamllint 1.8.0 and 1.14.0+ require python 2.7+
|
||||||
pycrypto >= 2.6 # Need features found in 2.6 and greater
|
pycrypto >= 2.6 # Need features found in 2.6 and greater
|
||||||
|
|
Loading…
Reference in a new issue