Coverage for pygments.lexer : 49%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
# -*- coding: utf-8 -*- pygments.lexer ~~~~~~~~~~~~~~
Base lexer classes.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """
make_analysator, text_type, add_metaclass, iteritems, Future, guess_decode
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this', 'default', 'words']
(b'\xff\xfe\0\0', 'utf-32'), (b'\0\0\xfe\xff', 'utf-32be'), (b'\xff\xfe', 'utf-16'), (b'\xfe\xff', 'utf-16be')]
""" This metaclass automagically converts ``analyse_text`` methods into static methods which always return float values. """
""" Lexer for a specific language.
Basic options recognized: ``stripnl`` Strip leading and trailing newlines from the input (default: True). ``stripall`` Strip all leading and trailing whitespace from the input (default: False). ``ensurenl`` Make sure that the input ends with a newline (default: True). This is required for some lexers that consume input linewise.
.. versionadded:: 1.3
``tabsize`` If given and greater than 0, expand tabs in the input (default: 0). ``encoding`` If given, must be an encoding name. This encoding will be used to convert the input string to Unicode, if it is not already a Unicode string (default: ``'guess'``, which uses a simple UTF-8 / Locale / Latin1 detection. Can also be ``'chardet'`` to use the chardet library, if it is installed. ``inencoding`` Overrides the ``encoding`` if given. """
#: Name of the lexer
#: Shortcuts for the lexer
#: File name globs
#: Secondary file name globs
#: MIME types
#: Priority, should multiple lexers match and no content is provided
self.add_filter(filter_)
if self.options: return '<pygments.lexers.%s with %r>' % (self.__class__.__name__, self.options) else: return '<pygments.lexers.%s>' % self.__class__.__name__
""" Add a new stream filter to this lexer. """ if not isinstance(filter_, Filter): filter_ = get_filter_by_name(filter_, **options) self.filters.append(filter_)
""" Has to return a float between ``0`` and ``1`` that indicates if a lexer wants to highlight this text. Used by ``guess_lexer``. If this method returns ``0`` it won't highlight it in any case, if it returns ``1`` highlighting with this lexer is guaranteed.
The `LexerMeta` metaclass automatically wraps this function so that it works like a static method (no ``self`` or ``cls`` parameter) and the return value is automatically converted to `float`. If the return value is an object that is boolean `False` it's the same as if the return values was ``0.0``. """
""" Return an iterable of (tokentype, value) pairs generated from `text`. If `unfiltered` is set to `True`, the filtering mechanism is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if wanted and applies registered filters. """ elif self.encoding == 'chardet': try: import chardet except ImportError: raise ImportError('To enable chardet encoding guessing, ' 'please install the chardet library ' 'from http://chardet.feedparser.org/') # check for BOM first decoded = None for bom, encoding in _encoding_map: if text.startswith(bom): decoded = text[len(bom):].decode(encoding, 'replace') break # no BOM found, so use chardet if decoded is None: enc = chardet.detect(text[:1024]) # Guess using first 1KB decoded = text.decode(enc.get('encoding') or 'utf-8', 'replace') text = decoded else: text = text.decode(self.encoding) if text.startswith(u'\ufeff'): text = text[len(u'\ufeff'):] else: if text.startswith(u'\ufeff'): text = text[len(u'\ufeff'):]
# text now *is* a unicode string text = text.strip() text = text.expandtabs(self.tabsize)
""" Return an iterable of (index, tokentype, value) pairs where "index" is the starting position of the token within the input text.
In subclasses, implement this method as a generator to maximize effectiveness. """ raise NotImplementedError
""" This lexer takes two lexer as arguments. A root lexer and a language lexer. First everything is scanned using the language lexer, afterwards all ``Other`` tokens are lexed using the root lexer.
The lexers from the ``template`` lexer package use this base lexer. """
self.root_lexer = _root_lexer(**options) self.language_lexer = _language_lexer(**options) self.needle = _needle Lexer.__init__(self, **options)
buffered = '' insertions = [] lng_buffer = [] for i, t, v in self.language_lexer.get_tokens_unprocessed(text): if t is self.needle: if lng_buffer: insertions.append((len(buffered), lng_buffer)) lng_buffer = [] buffered += v else: lng_buffer.append((i, t, v)) if lng_buffer: insertions.append((len(buffered), lng_buffer)) return do_insertions(insertions, self.root_lexer.get_tokens_unprocessed(buffered))
# ------------------------------------------------------------------------------ # RegexLexer and ExtendedRegexLexer #
""" Indicates that a state should include rules from another state. """
""" Indicates the a state should inherit from its superclass. """
""" Indicates a state combined from multiple states. """
# tuple.__init__ doesn't do anything
""" A pseudo match object constructed from a string. """
self._text = text self._start = start
return self._start
return self._start + len(self._text)
if arg: raise IndexError('No such group') return self._text
return (self._text,)
return {}
""" Callback that yields multiple actions for each group in the match. """ continue else: data = match.group(i + 1) if data is not None: if ctx: ctx.pos = match.start(i + 1) for item in action(lexer, _PseudoMatch(match.start(i + 1), data), ctx): if item: yield item ctx.pos = match.end()
""" Special singleton used for indicating the caller class. Used by ``using``. """
""" Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which is handled separately.
`state` specifies the state that the new lexer will start in, and can be an enumerable such as ('root', 'inline', 'string') or a simple string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. """ s = kwargs.pop('state') if isinstance(s, (list, tuple)): gt_kwargs['stack'] = s else: gt_kwargs['stack'] = ('root', s)
def callback(lexer, match, ctx=None): # if keyword arguments are given the callback # function has to create a new lexer instance if kwargs: # XXX: cache that somehow kwargs.update(lexer.options) lx = lexer.__class__(**kwargs) else: lx = lexer s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): yield i + s, t, v if ctx: ctx.pos = match.end() else: # XXX: cache that somehow kwargs.update(lexer.options) lx = _other(**kwargs)
s = match.start() for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): yield i + s, t, v if ctx: ctx.pos = match.end()
""" Indicates a state or state action (e.g. #pop) to apply. For example default('#pop') is equivalent to ('', Token, '#pop') Note that state tuples may be used as well.
.. versionadded:: 2.0 """
""" Indicates a list of literal words that is transformed into an optimized regex that matches any of the words.
.. versionadded:: 2.0 """
""" Metaclass for RegexLexer, creates the self._tokens attribute from self.tokens on the first instantiation. """
"""Preprocess the regular expression component of a token definition."""
"""Preprocess the token component of a token definition.""" 'token type must be simple type or callable, not %r' % (token,)
"""Preprocess the state transition action of a token definition.""" # an existing state elif new_state == '#push': return new_state elif new_state[:5] == '#pop:': return -int(new_state[5:]) else: assert False, 'unknown new state %r' % new_state # combine a new state from existing ones processed, istate)) elif isinstance(new_state, tuple): # push more than one state for istate in new_state: assert (istate in unprocessed or istate in ('#pop', '#push')), \ 'unknown new state ' + istate return new_state else: assert False, 'unknown new state def %r' % new_state
"""Preprocess a single state definition.""" # it's a state reference str(tdef))) # should be processed already, but may not in the case of: # 1. the state has no counterpart in any parent # 2. the state includes more than one 'inherit' continue
except Exception as err: raise ValueError("uncompilable regex %r in state %r of %r: %s" % (tdef[0], state, cls, err))
else: unprocessed, processed)
"""Preprocess a dictionary of token definitions."""
""" Merge tokens from superclasses in MRO order, returning a single tokendef dictionary.
Any state that is not defined by a subclass will be inherited automatically. States that *are* defined by subclasses will, by default, override that state in the superclass. If a subclass wishes to inherit definitions from a superclass, it can use the special value "inherit", which will cause the superclass' state definition to be included at that point in the state. """
# N.b. because this is assigned by reference, sufficiently # deep hierarchies are processed incrementally (e.g. for # A(B), B(C), C(RegexLexer), B will be premodified so X(B) # will not see any inherits in B). inheritable[state] = inherit_ndx continue
inherit_ndx = inheritable.pop(state, None) if inherit_ndx is None: continue
# Replace the "inherit" value with the items curitems[inherit_ndx:inherit_ndx+1] = items try: # N.b. this is the index in items (that is, the superclass # copy), so offset required when storing below. new_inh_ndx = items.index(inherit) except ValueError: pass else: inheritable[state] = inherit_ndx + new_inh_ndx
"""Instantiate cls after preprocessing its token definitions.""" # don't process yet pass else:
""" Base for simple stateful regular expression-based lexers. Simplifies the lexing process so that you need only provide a list of states and regular expressions. """
#: Flags for compiling the regular expressions. #: Defaults to MULTILINE.
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}`` #: #: The initial state is 'root'. #: ``new_state`` can be omitted to signify no state transition. #: If it is a string, the state is pushed on the stack and changed. #: If it is a tuple of strings, all states are pushed on the stack and #: the current state will be the topmost. #: It can also be ``combined('state1', 'state2', ...)`` #: to signify a new, anonymous state combined from the rules of two #: or more existing ones. #: Furthermore, it can be '#pop' to signify going back one step in #: the state stack, or '#push' to push the current state on the stack #: again. #: #: The tuple can also be replaced with ``include('state')``, in which #: case the rules from the state named by the string are included in the #: current one.
""" Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``) """ else: # state transition statestack.pop() statestack.append(statestack[-1]) else: # pop elif new_state == '#push': statestack.append(statestack[-1]) else: assert False, "wrong state def: %r" % new_state else: # at EOL, reset state to "root" statestack = ['root'] statetokens = tokendefs['root'] yield pos, Text, u'\n' pos += 1 continue yield pos, Error, text[pos] pos += 1
""" A helper object that holds lexer position data. """
self.text = text self.pos = pos self.end = end or len(text) # end=0 not supported ;-) self.stack = stack or ['root']
return 'LexerContext(%r, %r, %r)' % ( self.text, self.pos, self.stack)
""" A RegexLexer that uses a context object to store its state. """
""" Split ``text`` into (tokentype, text) pairs. If ``context`` is given, use this lexer context instead. """ tokendefs = self._tokens if not context: ctx = LexerContext(text, 0) statetokens = tokendefs['root'] else: ctx = context statetokens = tokendefs[ctx.stack[-1]] text = ctx.text while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, ctx.pos, ctx.end) if m: if action is not None: if type(action) is _TokenType: yield ctx.pos, action, m.group() ctx.pos = m.end() else: for item in action(self, m, ctx): yield item if not new_state: # altered the state stack? statetokens = tokendefs[ctx.stack[-1]] # CAUTION: callback must set ctx.pos! if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': ctx.stack.pop() elif state == '#push': ctx.stack.append(ctx.stack[-1]) else: ctx.stack.append(state) elif isinstance(new_state, int): # pop del ctx.stack[new_state:] elif new_state == '#push': ctx.stack.append(ctx.stack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = tokendefs[ctx.stack[-1]] break else: try: if ctx.pos >= ctx.end: break if text[ctx.pos] == '\n': # at EOL, reset state to "root" ctx.stack = ['root'] statetokens = tokendefs['root'] yield ctx.pos, Text, u'\n' ctx.pos += 1 continue yield ctx.pos, Error, text[ctx.pos] ctx.pos += 1 except IndexError: break
""" Helper for lexers which must combine the results of several sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs. Each ``itokens`` iterable should be inserted at position ``index`` into the token stream given by the ``tokens`` argument.
The result is a combined token stream.
TODO: clean up the code here. """ insertions = iter(insertions) try: index, itokens = next(insertions) except StopIteration: # no insertions for item in tokens: yield item return
realpos = None insleft = True
# iterate over the token stream where we want to insert # the tokens from the insertion list. for i, t, v in tokens: # first iteration. store the postition of first item if realpos is None: realpos = i oldi = 0 while insleft and i + len(v) >= index: tmpval = v[oldi:index - i] yield realpos, t, tmpval realpos += len(tmpval) for it_index, it_token, it_value in itokens: yield realpos, it_token, it_value realpos += len(it_value) oldi = index - i try: index, itokens = next(insertions) except StopIteration: insleft = False break # not strictly necessary yield realpos, t, v[oldi:] realpos += len(v) - oldi
# leftover tokens while insleft: # no normal tokens, set realpos to zero realpos = realpos or 0 for p, t, v in itokens: yield realpos, t, v realpos += len(v) try: index, itokens = next(insertions) except StopIteration: insleft = False break # not strictly necessary
"""Metaclass for ProfilingRegexLexer, collects regex timing info."""
if isinstance(regex, words): rex = regex_opt(regex.words, prefix=regex.prefix, suffix=regex.suffix) else: rex = regex compiled = re.compile(rex, rflags)
def match_func(text, pos, endpos=sys.maxsize): info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0]) t0 = time.time() res = compiled.match(text, pos, endpos) t1 = time.time() info[0] += 1 info[1] += t1 - t0 return res return match_func
"""Drop-in replacement for RegexLexer that does profiling of its regexes."""
# this needs to be a stack, since using(this) will produce nested calls self.__class__._prof_data.append({}) for tok in RegexLexer.get_tokens_unprocessed(self, text, stack): yield tok rawdata = self.__class__._prof_data.pop() data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65], n, 1000 * t, 1000 * t / n) for ((s, r), (n, t)) in rawdata.items()), key=lambda x: x[self._prof_sort_index], reverse=True) sum_total = sum(x[3] for x in data)
print() print('Profiling result for %s lexing %d chars in %.3f ms' % (self.__class__.__name__, len(text), sum_total)) print('=' * 110) print('%-20s %-64s ncalls tottime percall' % ('state', 'regex')) print('-' * 110) for d in data: print('%-20s %-65s %5d %8.4f %8.4f' % d) print('=' * 110) |