Skip to content

Commit

Permalink
trim Python2 <-> 3 hybridation (#153)
Browse files Browse the repository at this point in the history
  • Loading branch information
a-detiste authored Oct 6, 2024
1 parent d025140 commit 7a8f3a8
Show file tree
Hide file tree
Showing 10 changed files with 8 additions and 28 deletions.
4 changes: 2 additions & 2 deletions asttokens/asttokens.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def get_text(self, node, padded=True):
return self._text[start: end]


class ASTTokens(ASTTextBase, object):
class ASTTokens(ASTTextBase):
"""
ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
as tokens, and is used to mark and access token and position information.
Expand Down Expand Up @@ -284,7 +284,7 @@ def get_text_positions(self, node, padded):
return start, end


class ASTText(ASTTextBase, object):
class ASTText(ASTTextBase):
"""
Supports the same ``get_text*`` methods as ``ASTTokens``,
but uses the AST to determine the text positions instead of tokens.
Expand Down
2 changes: 1 addition & 1 deletion asttokens/line_numbers.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

_line_start_re = re.compile(r'^', re.M)

class LineNumbers(object):
class LineNumbers:
"""
Class to convert between character offsets in a text string, and pairs (line, column) of 1-based
line and 0-based column numbers, as used by tokens and AST nodes.
Expand Down
2 changes: 1 addition & 1 deletion asttokens/mark_tokens.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
}


class MarkTokens(object):
class MarkTokens:
"""
Helper that visits all nodes in the AST tree and assigns .first_token and .last_token attributes
to each of them. This is the heart of the token-marking logic.
Expand Down
18 changes: 4 additions & 14 deletions asttokens/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,7 @@ class EnhancedAST(AST):

AstNode = Union[EnhancedAST, NodeNG]

if sys.version_info[0] == 2:
TokenInfo = Tuple[int, str, Tuple[int, int], Tuple[int, int], str]
else:
TokenInfo = tokenize.TokenInfo
TokenInfo = tokenize.TokenInfo


def token_repr(tok_type, string):
Expand Down Expand Up @@ -341,7 +338,7 @@ def replace(text, replacements):
return ''.join(parts)


class NodeMethods(object):
class NodeMethods:
"""
Helper to get `visit_{node_type}` methods given a node's class and cache the results.
"""
Expand All @@ -363,14 +360,7 @@ def get(self, obj, cls):
return method


if sys.version_info[0] == 2:
# Python 2 doesn't support non-ASCII identifiers, and making the real patched_generate_tokens support Python 2
# means working with raw tuples instead of tokenize.TokenInfo namedtuples.
def patched_generate_tokens(original_tokens):
# type: (Iterable[TokenInfo]) -> Iterator[TokenInfo]
return iter(original_tokens)
else:
def patched_generate_tokens(original_tokens):
def patched_generate_tokens(original_tokens):
# type: (Iterable[TokenInfo]) -> Iterator[TokenInfo]
"""
Fixes tokens yielded by `tokenize.generate_tokens` to handle more non-ASCII characters in identifiers.
Expand All @@ -395,7 +385,7 @@ def patched_generate_tokens(original_tokens):
for combined_token in combine_tokens(group):
yield combined_token

def combine_tokens(group):
def combine_tokens(group):
# type: (List[tokenize.TokenInfo]) -> List[tokenize.TokenInfo]
if not any(tok.type == tokenize.ERRORTOKEN for tok in group) or len({tok.line for tok in group}) != 1:
return group
Expand Down
2 changes: 0 additions & 2 deletions tests/test_astroid.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function

import astroid

from asttokens import ASTTokens
Expand Down
1 change: 0 additions & 1 deletion tests/test_asttokens.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import ast
import six
import token
Expand Down
1 change: 0 additions & 1 deletion tests/test_line_numbers.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from .context import asttokens

Expand Down
2 changes: 0 additions & 2 deletions tests/test_mark_tokens.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function

import ast
import inspect
import io
Expand Down
2 changes: 0 additions & 2 deletions tests/test_util.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function

import ast
import io
import sys
Expand Down
2 changes: 0 additions & 2 deletions tests/tools.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from __future__ import unicode_literals, print_function

import ast
import io
import os
Expand Down

0 comments on commit 7a8f3a8

Please sign in to comment.