Removed the Requirement to Install Python and NodeJS (Now Bundled with Borealis)

This commit is contained in:
2025-04-24 00:42:19 -06:00
parent 785265d3e7
commit 9c68cdea84
7786 changed files with 2386458 additions and 217 deletions

View File

@ -0,0 +1,43 @@
"""Support functions for testing scripts in the Tools directory."""
import contextlib
import importlib
import os.path
import unittest
from test import support
from test.support import import_helper
if not support.has_subprocess_support:
raise unittest.SkipTest("test module requires subprocess")
basepath = os.path.normpath(
os.path.dirname( # <src/install dir>
os.path.dirname( # Lib
os.path.dirname( # test
os.path.dirname(__file__))))) # test_tools
toolsdir = os.path.join(basepath, 'Tools')
scriptsdir = os.path.join(toolsdir, 'scripts')
def skip_if_missing(tool=None):
if tool:
tooldir = os.path.join(toolsdir, tool)
else:
tool = 'scripts'
tooldir = scriptsdir
if not os.path.isdir(tooldir):
raise unittest.SkipTest(f'{tool} directory could not be found')
@contextlib.contextmanager
def imports_under_tool(name, *subdirs):
tooldir = os.path.join(toolsdir, name, *subdirs)
with import_helper.DirsOnSysPath(tooldir) as cm:
yield cm
def import_tool(toolname):
with import_helper.DirsOnSysPath(scriptsdir):
return importlib.import_module(toolname)
def load_tests(*args):
return support.load_package_tests(os.path.dirname(__file__), *args)

View File

@ -0,0 +1,4 @@
from test.test_tools import load_tests
import unittest
unittest.main()

View File

@ -0,0 +1,45 @@
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR ORGANIZATION
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"POT-Creation-Date: 2000-01-01 00:00+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: pygettext.py 1.5\n"
#: escapes.py:5
msgid ""
"\"\t\n"
"\r\\"
msgstr ""
#: escapes.py:8
msgid ""
"\000\001\002\003\004\005\006\007\010\t\n"
"\013\014\r\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037"
msgstr ""
#: escapes.py:13
msgid " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
msgstr ""
#: escapes.py:17
msgid "\177"
msgstr ""
#: escapes.py:20
msgid "€   ÿ"
msgstr ""
#: escapes.py:23
msgid "α ㄱ 𓂀"
msgstr ""

View File

@ -0,0 +1,40 @@
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR ORGANIZATION
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"POT-Creation-Date: 2000-01-01 00:00+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: pygettext.py 1.5\n"
#: docstrings.py:7
#, docstring
msgid ""
msgstr ""
#: docstrings.py:18
#, docstring
msgid ""
"multiline\n"
" docstring\n"
" "
msgstr ""
#: docstrings.py:25
#, docstring
msgid "docstring1"
msgstr ""
#: docstrings.py:30
#, docstring
msgid "Hello, {}!"
msgstr ""

View File

@ -0,0 +1,41 @@
# Test docstring extraction
from gettext import gettext as _
# Empty docstring
def test(x):
""""""
# Leading empty line
def test2(x):
"""docstring""" # XXX This should be extracted but isn't.
# XXX Multiline docstrings should be cleaned with `inspect.cleandoc`.
def test3(x):
"""multiline
docstring
"""
# Multiple docstrings - only the first should be extracted
def test4(x):
"""docstring1"""
"""docstring2"""
def test5(x):
"""Hello, {}!""".format("world!") # XXX This should not be extracted.
# Nested docstrings
def test6(x):
def inner(y):
"""nested docstring""" # XXX This should be extracted but isn't.
class Outer:
class Inner:
"nested class docstring" # XXX This should be extracted but isn't.

View File

@ -0,0 +1,45 @@
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR ORGANIZATION
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"POT-Creation-Date: 2000-01-01 00:00+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: pygettext.py 1.5\n"
#: escapes.py:5
msgid ""
"\"\t\n"
"\r\\"
msgstr ""
#: escapes.py:8
msgid ""
"\000\001\002\003\004\005\006\007\010\t\n"
"\013\014\r\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037"
msgstr ""
#: escapes.py:13
msgid " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
msgstr ""
#: escapes.py:17
msgid "\177"
msgstr ""
#: escapes.py:20
msgid "\302\200 \302\240 \303\277"
msgstr ""
#: escapes.py:23
msgid "\316\261 \343\204\261 \360\223\202\200"
msgstr ""

View File

@ -0,0 +1,23 @@
import gettext as _
# Special characters that are always escaped in the POT file
_('"\t\n\r\\')
# All ascii characters 0-31
_('\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n'
'\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15'
'\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f')
# All ascii characters 32-126
_(' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
# ascii char 127
_('\x7f')
# some characters in the 128-255 range
_('\x80 \xa0 ÿ')
# some characters >= 256 encoded as 2, 3 and 4 bytes, respectively
_('α ㄱ 𓂀')

View File

@ -0,0 +1,35 @@
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR ORGANIZATION
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"POT-Creation-Date: 2000-01-01 00:00+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: pygettext.py 1.5\n"
#: fileloc.py:5 fileloc.py:6
msgid "foo"
msgstr ""
#: fileloc.py:9
msgid "bar"
msgstr ""
#: fileloc.py:14 fileloc.py:18
#, docstring
msgid "docstring"
msgstr ""
#: fileloc.py:22 fileloc.py:26
#, docstring
msgid "baz"
msgstr ""

View File

@ -0,0 +1,26 @@
# Test file locations
from gettext import gettext as _
# Duplicate strings
_('foo')
_('foo')
# Duplicate strings on the same line should only add one location to the output
_('bar'), _('bar')
# Duplicate docstrings
class A:
"""docstring"""
def f():
"""docstring"""
# Duplicate message and docstring
_('baz')
def g():
"""baz"""

View File

@ -0,0 +1,67 @@
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR ORGANIZATION
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"POT-Creation-Date: 2000-01-01 00:00+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: pygettext.py 1.5\n"
#: messages.py:5
msgid ""
msgstr ""
#: messages.py:8 messages.py:9
msgid "parentheses"
msgstr ""
#: messages.py:12
msgid "Hello, world!"
msgstr ""
#: messages.py:15
msgid ""
"Hello,\n"
" multiline!\n"
msgstr ""
#: messages.py:29
msgid "Hello, {}!"
msgstr ""
#: messages.py:33
msgid "1"
msgstr ""
#: messages.py:33
msgid "2"
msgstr ""
#: messages.py:34 messages.py:35
msgid "A"
msgstr ""
#: messages.py:34 messages.py:35
msgid "B"
msgstr ""
#: messages.py:36
msgid "set"
msgstr ""
#: messages.py:42
msgid "nested string"
msgstr ""
#: messages.py:47
msgid "baz"
msgstr ""

View File

@ -0,0 +1,64 @@
# Test message extraction
from gettext import gettext as _
# Empty string
_("")
# Extra parentheses
(_("parentheses"))
((_("parentheses")))
# Multiline strings
_("Hello, "
"world!")
_("""Hello,
multiline!
""")
# Invalid arguments
_()
_(None)
_(1)
_(False)
_(x="kwargs are not allowed")
_("foo", "bar")
_("something", x="something else")
# .format()
_("Hello, {}!").format("world") # valid
_("Hello, {}!".format("world")) # invalid
# Nested structures
_("1"), _("2")
arr = [_("A"), _("B")]
obj = {'a': _("A"), 'b': _("B")}
{{{_('set')}}}
# Nested functions and classes
def test():
_("nested string") # XXX This should be extracted but isn't.
[_("nested string")]
class Foo:
def bar(self):
return _("baz")
def bar(x=_('default value')): # XXX This should be extracted but isn't.
pass
def baz(x=[_('default value')]): # XXX This should be extracted but isn't.
pass
# Shadowing _()
def _(x):
pass
def _(x="don't extract me"):
pass

View File

@ -0,0 +1 @@
[]

Binary file not shown.

View File

@ -0,0 +1,23 @@
# Fuzzy translations are not written to the .mo file.
#, fuzzy
msgid "foo"
msgstr "bar"
# comment
#, fuzzy
msgctxt "abc"
msgid "foo"
msgstr "bar"
#, fuzzy
# comment
msgctxt "xyz"
msgid "foo"
msgstr "bar"
#, fuzzy
msgctxt "abc"
msgid "One email sent."
msgid_plural "%d emails sent."
msgstr[0] "One email sent."
msgstr[1] "%d emails sent."

View File

@ -0,0 +1,58 @@
[
[
"",
"Project-Id-Version: PACKAGE VERSION\nPO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\nLast-Translator: FULL NAME <EMAIL@ADDRESS>\nLanguage-Team: LANGUAGE <LL@li.org>\nMIME-Version: 1.0\nContent-Type: text/plain; charset=UTF-8\nContent-Transfer-Encoding: 8bit\n"
],
[
"\n newlines \n",
"\n translated \n"
],
[
"\"escapes\"",
"\"translated\""
],
[
"Multilinestring",
"Multilinetranslation"
],
[
"abc\u0004foo",
"bar"
],
[
"bar",
"baz"
],
[
"xyz\u0004foo",
"bar"
],
[
[
"One email sent.",
0
],
"One email sent."
],
[
[
"One email sent.",
1
],
"%d emails sent."
],
[
[
"abc\u0004One email sent.",
0
],
"One email sent."
],
[
[
"abc\u0004One email sent.",
1
],
"%d emails sent."
]
]

Binary file not shown.

View File

@ -0,0 +1,47 @@
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"POT-Creation-Date: 2024-10-26 18:06+0200\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
msgid "foo"
msgstr ""
msgid "bar"
msgstr "baz"
msgctxt "abc"
msgid "foo"
msgstr "bar"
# comment
msgctxt "xyz"
msgid "foo"
msgstr "bar"
msgid "Multiline"
"string"
msgstr "Multiline"
"translation"
msgid "\"escapes\""
msgstr "\"translated\""
msgid "\n newlines \n"
msgstr "\n translated \n"
msgid "One email sent."
msgid_plural "%d emails sent."
msgstr[0] "One email sent."
msgstr[1] "%d emails sent."
msgctxt "abc"
msgid "One email sent."
msgid_plural "%d emails sent."
msgstr[0] "One email sent."
msgstr[1] "%d emails sent."

View File

@ -0,0 +1,37 @@
"""Sanity-check tests for the "freeze" tool."""
import sys
import textwrap
import unittest
from test import support
from test.support import os_helper
from test.test_tools import imports_under_tool, skip_if_missing
skip_if_missing('freeze')
with imports_under_tool('freeze', 'test'):
import freeze as helper
@support.requires_zlib()
@unittest.skipIf(sys.platform.startswith('win'), 'not supported on Windows')
@unittest.skipIf(sys.platform == 'darwin' and sys._framework,
'not supported for frameworks builds on macOS')
@support.skip_if_buildbot('not all buildbots have enough space')
# gh-103053: Skip test if Python is built with Profile Guided Optimization
# (PGO), since the test is just too slow in this case.
@unittest.skipIf(support.check_cflags_pgo(),
'test is too slow with PGO')
class TestFreeze(unittest.TestCase):
@support.requires_resource('cpu') # Building Python is slow
def test_freeze_simple_script(self):
script = textwrap.dedent("""
import sys
print('running...')
sys.exit(0)
""")
with os_helper.temp_dir() as outdir:
outdir, scriptfile, python = helper.prepare(script, outdir)
executable = helper.freeze(python, scriptfile, outdir)
text = helper.run(executable)
self.assertEqual(text, 'running...')

View File

@ -0,0 +1,444 @@
"""Tests to cover the Tools/i18n package"""
import os
import re
import sys
import unittest
from textwrap import dedent
from pathlib import Path
from test.support.script_helper import assert_python_ok
from test.test_tools import skip_if_missing, toolsdir
from test.support.os_helper import temp_cwd, temp_dir
skip_if_missing()
DATA_DIR = Path(__file__).resolve().parent / 'i18n_data'
def normalize_POT_file(pot):
"""Normalize the POT creation timestamp, charset and
file locations to make the POT file easier to compare.
"""
# Normalize the creation date.
date_pattern = re.compile(r'"POT-Creation-Date: .+?\\n"')
header = r'"POT-Creation-Date: 2000-01-01 00:00+0000\\n"'
pot = re.sub(date_pattern, header, pot)
# Normalize charset to UTF-8 (currently there's no way to specify the output charset).
charset_pattern = re.compile(r'"Content-Type: text/plain; charset=.+?\\n"')
charset = r'"Content-Type: text/plain; charset=UTF-8\\n"'
pot = re.sub(charset_pattern, charset, pot)
# Normalize file location path separators in case this test is
# running on Windows (which uses '\').
fileloc_pattern = re.compile(r'#:.+')
def replace(match):
return match[0].replace(os.sep, "/")
pot = re.sub(fileloc_pattern, replace, pot)
return pot
class Test_pygettext(unittest.TestCase):
"""Tests for the pygettext.py tool"""
script = Path(toolsdir, 'i18n', 'pygettext.py')
def get_header(self, data):
""" utility: return the header of a .po file as a dictionary """
headers = {}
for line in data.split('\n'):
if not line or line.startswith(('#', 'msgid', 'msgstr')):
continue
line = line.strip('"')
key, val = line.split(':', 1)
headers[key] = val.strip()
return headers
def get_msgids(self, data):
""" utility: return all msgids in .po file as a list of strings """
msgids = []
reading_msgid = False
cur_msgid = []
for line in data.split('\n'):
if reading_msgid:
if line.startswith('"'):
cur_msgid.append(line.strip('"'))
else:
msgids.append('\n'.join(cur_msgid))
cur_msgid = []
reading_msgid = False
continue
if line.startswith('msgid '):
line = line[len('msgid '):]
cur_msgid.append(line.strip('"'))
reading_msgid = True
else:
if reading_msgid:
msgids.append('\n'.join(cur_msgid))
return msgids
def assert_POT_equal(self, expected, actual):
"""Check if two POT files are equal"""
self.maxDiff = None
self.assertEqual(normalize_POT_file(expected), normalize_POT_file(actual))
def extract_from_str(self, module_content, *, args=(), strict=True):
"""Return all msgids extracted from module_content."""
filename = 'test.py'
with temp_cwd(None):
with open(filename, 'w', encoding='utf-8') as fp:
fp.write(module_content)
res = assert_python_ok('-Xutf8', self.script, *args, filename)
if strict:
self.assertEqual(res.err, b'')
with open('messages.pot', encoding='utf-8') as fp:
data = fp.read()
return self.get_msgids(data)
def extract_docstrings_from_str(self, module_content):
"""Return all docstrings extracted from module_content."""
return self.extract_from_str(module_content, args=('--docstrings',), strict=False)
def test_header(self):
"""Make sure the required fields are in the header, according to:
http://www.gnu.org/software/gettext/manual/gettext.html#Header-Entry
"""
with temp_cwd(None) as cwd:
assert_python_ok('-Xutf8', self.script)
with open('messages.pot', encoding='utf-8') as fp:
data = fp.read()
header = self.get_header(data)
self.assertIn("Project-Id-Version", header)
self.assertIn("POT-Creation-Date", header)
self.assertIn("PO-Revision-Date", header)
self.assertIn("Last-Translator", header)
self.assertIn("Language-Team", header)
self.assertIn("MIME-Version", header)
self.assertIn("Content-Type", header)
self.assertIn("Content-Transfer-Encoding", header)
self.assertIn("Generated-By", header)
# not clear if these should be required in POT (template) files
#self.assertIn("Report-Msgid-Bugs-To", header)
#self.assertIn("Language", header)
#"Plural-Forms" is optional
@unittest.skipIf(sys.platform.startswith('aix'),
'bpo-29972: broken test on AIX')
def test_POT_Creation_Date(self):
""" Match the date format from xgettext for POT-Creation-Date """
from datetime import datetime
with temp_cwd(None) as cwd:
assert_python_ok('-Xutf8', self.script)
with open('messages.pot', encoding='utf-8') as fp:
data = fp.read()
header = self.get_header(data)
creationDate = header['POT-Creation-Date']
# peel off the escaped newline at the end of string
if creationDate.endswith('\\n'):
creationDate = creationDate[:-len('\\n')]
# This will raise if the date format does not exactly match.
datetime.strptime(creationDate, '%Y-%m-%d %H:%M%z')
def test_funcdocstring(self):
for doc in ('"""doc"""', "r'''doc'''", "R'doc'", 'u"doc"'):
with self.subTest(doc):
msgids = self.extract_docstrings_from_str(dedent('''\
def foo(bar):
%s
''' % doc))
self.assertIn('doc', msgids)
def test_funcdocstring_bytes(self):
msgids = self.extract_docstrings_from_str(dedent('''\
def foo(bar):
b"""doc"""
'''))
self.assertFalse([msgid for msgid in msgids if 'doc' in msgid])
def test_funcdocstring_fstring(self):
msgids = self.extract_docstrings_from_str(dedent('''\
def foo(bar):
f"""doc"""
'''))
self.assertFalse([msgid for msgid in msgids if 'doc' in msgid])
def test_classdocstring(self):
for doc in ('"""doc"""', "r'''doc'''", "R'doc'", 'u"doc"'):
with self.subTest(doc):
msgids = self.extract_docstrings_from_str(dedent('''\
class C:
%s
''' % doc))
self.assertIn('doc', msgids)
def test_classdocstring_bytes(self):
msgids = self.extract_docstrings_from_str(dedent('''\
class C:
b"""doc"""
'''))
self.assertFalse([msgid for msgid in msgids if 'doc' in msgid])
def test_classdocstring_fstring(self):
msgids = self.extract_docstrings_from_str(dedent('''\
class C:
f"""doc"""
'''))
self.assertFalse([msgid for msgid in msgids if 'doc' in msgid])
def test_moduledocstring(self):
for doc in ('"""doc"""', "r'''doc'''", "R'doc'", 'u"doc"'):
with self.subTest(doc):
msgids = self.extract_docstrings_from_str(dedent('''\
%s
''' % doc))
self.assertIn('doc', msgids)
def test_moduledocstring_bytes(self):
msgids = self.extract_docstrings_from_str(dedent('''\
b"""doc"""
'''))
self.assertFalse([msgid for msgid in msgids if 'doc' in msgid])
def test_moduledocstring_fstring(self):
msgids = self.extract_docstrings_from_str(dedent('''\
f"""doc"""
'''))
self.assertFalse([msgid for msgid in msgids if 'doc' in msgid])
def test_msgid(self):
msgids = self.extract_docstrings_from_str(
'''_("""doc""" r'str' u"ing")''')
self.assertIn('docstring', msgids)
def test_msgid_bytes(self):
msgids = self.extract_docstrings_from_str('_(b"""doc""")')
self.assertFalse([msgid for msgid in msgids if 'doc' in msgid])
def test_msgid_fstring(self):
msgids = self.extract_docstrings_from_str('_(f"""doc""")')
self.assertFalse([msgid for msgid in msgids if 'doc' in msgid])
def test_funcdocstring_annotated_args(self):
""" Test docstrings for functions with annotated args """
msgids = self.extract_docstrings_from_str(dedent('''\
def foo(bar: str):
"""doc"""
'''))
self.assertIn('doc', msgids)
def test_funcdocstring_annotated_return(self):
""" Test docstrings for functions with annotated return type """
msgids = self.extract_docstrings_from_str(dedent('''\
def foo(bar) -> str:
"""doc"""
'''))
self.assertIn('doc', msgids)
def test_funcdocstring_defvalue_args(self):
""" Test docstring for functions with default arg values """
msgids = self.extract_docstrings_from_str(dedent('''\
def foo(bar=()):
"""doc"""
'''))
self.assertIn('doc', msgids)
def test_funcdocstring_multiple_funcs(self):
""" Test docstring extraction for multiple functions combining
annotated args, annotated return types and default arg values
"""
msgids = self.extract_docstrings_from_str(dedent('''\
def foo1(bar: tuple=()) -> str:
"""doc1"""
def foo2(bar: List[1:2]) -> (lambda x: x):
"""doc2"""
def foo3(bar: 'func'=lambda x: x) -> {1: 2}:
"""doc3"""
'''))
self.assertIn('doc1', msgids)
self.assertIn('doc2', msgids)
self.assertIn('doc3', msgids)
def test_classdocstring_early_colon(self):
""" Test docstring extraction for a class with colons occurring within
the parentheses.
"""
msgids = self.extract_docstrings_from_str(dedent('''\
class D(L[1:2], F({1: 2}), metaclass=M(lambda x: x)):
"""doc"""
'''))
self.assertIn('doc', msgids)
def test_calls_in_fstrings(self):
msgids = self.extract_docstrings_from_str(dedent('''\
f"{_('foo bar')}"
'''))
self.assertIn('foo bar', msgids)
def test_calls_in_fstrings_raw(self):
msgids = self.extract_docstrings_from_str(dedent('''\
rf"{_('foo bar')}"
'''))
self.assertIn('foo bar', msgids)
def test_calls_in_fstrings_nested(self):
msgids = self.extract_docstrings_from_str(dedent('''\
f"""{f'{_("foo bar")}'}"""
'''))
self.assertIn('foo bar', msgids)
def test_calls_in_fstrings_attribute(self):
msgids = self.extract_docstrings_from_str(dedent('''\
f"{obj._('foo bar')}"
'''))
self.assertIn('foo bar', msgids)
def test_calls_in_fstrings_with_call_on_call(self):
msgids = self.extract_docstrings_from_str(dedent('''\
f"{type(str)('foo bar')}"
'''))
self.assertNotIn('foo bar', msgids)
def test_calls_in_fstrings_with_format(self):
msgids = self.extract_docstrings_from_str(dedent('''\
f"{_('foo {bar}').format(bar='baz')}"
'''))
self.assertIn('foo {bar}', msgids)
def test_calls_in_fstrings_with_wrong_input_1(self):
msgids = self.extract_docstrings_from_str(dedent('''\
f"{_(f'foo {bar}')}"
'''))
self.assertFalse([msgid for msgid in msgids if 'foo {bar}' in msgid])
def test_calls_in_fstrings_with_wrong_input_2(self):
msgids = self.extract_docstrings_from_str(dedent('''\
f"{_(1)}"
'''))
self.assertNotIn(1, msgids)
def test_calls_in_fstring_with_multiple_args(self):
msgids = self.extract_docstrings_from_str(dedent('''\
f"{_('foo', 'bar')}"
'''))
self.assertNotIn('foo', msgids)
self.assertNotIn('bar', msgids)
def test_calls_in_fstring_with_keyword_args(self):
msgids = self.extract_docstrings_from_str(dedent('''\
f"{_('foo', bar='baz')}"
'''))
self.assertNotIn('foo', msgids)
self.assertNotIn('bar', msgids)
self.assertNotIn('baz', msgids)
def test_calls_in_fstring_with_partially_wrong_expression(self):
msgids = self.extract_docstrings_from_str(dedent('''\
f"{_(f'foo') + _('bar')}"
'''))
self.assertNotIn('foo', msgids)
self.assertIn('bar', msgids)
def test_function_and_class_names(self):
"""Test that function and class names are not mistakenly extracted."""
msgids = self.extract_from_str(dedent('''\
def _(x):
pass
def _(x="foo"):
pass
async def _(x):
pass
class _(object):
pass
'''))
self.assertEqual(msgids, [''])
def test_pygettext_output(self):
"""Test that the pygettext output exactly matches snapshots."""
for input_file, output_file, output in extract_from_snapshots():
with self.subTest(input_file=input_file):
expected = output_file.read_text(encoding='utf-8')
self.assert_POT_equal(expected, output)
def test_files_list(self):
"""Make sure the directories are inspected for source files
bpo-31920
"""
text1 = 'Text to translate1'
text2 = 'Text to translate2'
text3 = 'Text to ignore'
with temp_cwd(None), temp_dir(None) as sdir:
pymod = Path(sdir, 'pypkg', 'pymod.py')
pymod.parent.mkdir()
pymod.write_text(f'_({text1!r})', encoding='utf-8')
pymod2 = Path(sdir, 'pkg.py', 'pymod2.py')
pymod2.parent.mkdir()
pymod2.write_text(f'_({text2!r})', encoding='utf-8')
pymod3 = Path(sdir, 'CVS', 'pymod3.py')
pymod3.parent.mkdir()
pymod3.write_text(f'_({text3!r})', encoding='utf-8')
assert_python_ok('-Xutf8', self.script, sdir)
data = Path('messages.pot').read_text(encoding='utf-8')
self.assertIn(f'msgid "{text1}"', data)
self.assertIn(f'msgid "{text2}"', data)
self.assertNotIn(text3, data)
def extract_from_snapshots():
snapshots = {
'messages.py': ('--docstrings',),
'fileloc.py': ('--docstrings',),
'docstrings.py': ('--docstrings',),
# == Test character escaping
# Escape ascii and unicode:
'escapes.py': ('--escape',),
# Escape only ascii and let unicode pass through:
('escapes.py', 'ascii-escapes.pot'): (),
}
for filename, args in snapshots.items():
if isinstance(filename, tuple):
filename, output_file = filename
output_file = DATA_DIR / output_file
input_file = DATA_DIR / filename
else:
input_file = DATA_DIR / filename
output_file = input_file.with_suffix('.pot')
contents = input_file.read_bytes()
with temp_cwd(None):
Path(input_file.name).write_bytes(contents)
assert_python_ok('-Xutf8', Test_pygettext.script, *args,
input_file.name)
yield (input_file, output_file,
Path('messages.pot').read_text(encoding='utf-8'))
def update_POT_snapshots():
for _, output_file, output in extract_from_snapshots():
output = normalize_POT_file(output)
output_file.write_text(output, encoding='utf-8')
if __name__ == '__main__':
# To regenerate POT files
if len(sys.argv) > 1 and sys.argv[1] == '--snapshot-update':
update_POT_snapshots()
sys.exit(0)
unittest.main()

View File

@ -0,0 +1,81 @@
"""
Tests for `Makefile`.
"""
import os
import unittest
from test import support
import sysconfig
MAKEFILE = sysconfig.get_makefile_filename()
if not support.check_impl_detail(cpython=True):
raise unittest.SkipTest('cpython only')
if not os.path.exists(MAKEFILE) or not os.path.isfile(MAKEFILE):
raise unittest.SkipTest('Makefile could not be found')
class TestMakefile(unittest.TestCase):
def list_test_dirs(self):
result = []
found_testsubdirs = False
with open(MAKEFILE, 'r', encoding='utf-8') as f:
for line in f:
if line.startswith('TESTSUBDIRS='):
found_testsubdirs = True
result.append(
line.removeprefix('TESTSUBDIRS=').replace(
'\\', '',
).strip(),
)
continue
if found_testsubdirs:
if '\t' not in line:
break
result.append(line.replace('\\', '').strip())
return result
@unittest.skipUnless(support.TEST_MODULES_ENABLED, "requires test modules")
def test_makefile_test_folders(self):
test_dirs = self.list_test_dirs()
idle_test = 'idlelib/idle_test'
self.assertIn(idle_test, test_dirs)
used = set([idle_test])
for dirpath, dirs, files in os.walk(support.TEST_HOME_DIR):
dirname = os.path.basename(dirpath)
# Skip temporary dirs:
if dirname == '__pycache__' or dirname.startswith('.'):
dirs.clear() # do not process subfolders
continue
# Skip empty dirs:
if not dirs and not files:
continue
# Skip dirs with hidden-only files:
if files and all(
filename.startswith('.') or filename == '__pycache__'
for filename in files
):
continue
relpath = os.path.relpath(dirpath, support.STDLIB_DIR)
with self.subTest(relpath=relpath):
self.assertIn(
relpath,
test_dirs,
msg=(
f"{relpath!r} is not included in the Makefile's list "
"of test directories to install"
)
)
used.add(relpath)
# Don't check the wheel dir when Python is built --with-wheel-pkg-dir
if sysconfig.get_config_var('WHEEL_PKG_DIR'):
test_dirs.remove('test/wheeldata')
used.discard('test/wheeldata')
# Check that there are no extra entries:
unique_test_dirs = set(test_dirs)
self.assertSetEqual(unique_test_dirs, used)
self.assertEqual(len(test_dirs), len(unique_test_dirs))

View File

@ -0,0 +1,122 @@
import unittest
from test.test_tools import skip_if_missing, imports_under_tool
from test import support
from test.support.hypothesis_helper import hypothesis
st = hypothesis.strategies
given = hypothesis.given
example = hypothesis.example
skip_if_missing("unicode")
with imports_under_tool("unicode"):
from dawg import Dawg, build_compression_dawg, lookup, inverse_lookup
@st.composite
def char_name_db(draw, min_length=1, max_length=30):
m = draw(st.integers(min_value=min_length, max_value=max_length))
names = draw(
st.sets(st.text("abcd", min_size=1, max_size=10), min_size=m, max_size=m)
)
characters = draw(st.sets(st.characters(), min_size=m, max_size=m))
return list(zip(names, characters))
class TestDawg(unittest.TestCase):
"""Tests for the directed acyclic word graph data structure that is used
to store the unicode character names in unicodedata. Tests ported from PyPy
"""
def test_dawg_direct_simple(self):
dawg = Dawg()
dawg.insert("a", -4)
dawg.insert("c", -2)
dawg.insert("cat", -1)
dawg.insert("catarr", 0)
dawg.insert("catnip", 1)
dawg.insert("zcatnip", 5)
packed, data, inverse = dawg.finish()
self.assertEqual(lookup(packed, data, b"a"), -4)
self.assertEqual(lookup(packed, data, b"c"), -2)
self.assertEqual(lookup(packed, data, b"cat"), -1)
self.assertEqual(lookup(packed, data, b"catarr"), 0)
self.assertEqual(lookup(packed, data, b"catnip"), 1)
self.assertEqual(lookup(packed, data, b"zcatnip"), 5)
self.assertRaises(KeyError, lookup, packed, data, b"b")
self.assertRaises(KeyError, lookup, packed, data, b"catni")
self.assertRaises(KeyError, lookup, packed, data, b"catnipp")
self.assertEqual(inverse_lookup(packed, inverse, -4), b"a")
self.assertEqual(inverse_lookup(packed, inverse, -2), b"c")
self.assertEqual(inverse_lookup(packed, inverse, -1), b"cat")
self.assertEqual(inverse_lookup(packed, inverse, 0), b"catarr")
self.assertEqual(inverse_lookup(packed, inverse, 1), b"catnip")
self.assertEqual(inverse_lookup(packed, inverse, 5), b"zcatnip")
self.assertRaises(KeyError, inverse_lookup, packed, inverse, 12)
def test_forbid_empty_dawg(self):
dawg = Dawg()
self.assertRaises(ValueError, dawg.finish)
@given(char_name_db())
@example([("abc", "a"), ("abd", "b")])
@example(
[
("bab", "1"),
("a", ":"),
("ad", "@"),
("b", "<"),
("aacc", "?"),
("dab", "D"),
("aa", "0"),
("ab", "F"),
("aaa", "7"),
("cbd", "="),
("abad", ";"),
("ac", "B"),
("abb", "4"),
("bb", "2"),
("aab", "9"),
("caaaaba", "E"),
("ca", ">"),
("bbaaa", "5"),
("d", "3"),
("baac", "8"),
("c", "6"),
("ba", "A"),
]
)
@example(
[
("bcdac", "9"),
("acc", "g"),
("d", "d"),
("daabdda", "0"),
("aba", ";"),
("c", "6"),
("aa", "7"),
("abbd", "c"),
("badbd", "?"),
("bbd", "f"),
("cc", "@"),
("bb", "8"),
("daca", ">"),
("ba", ":"),
("baac", "3"),
("dbdddac", "a"),
("a", "2"),
("cabd", "b"),
("b", "="),
("abd", "4"),
("adcbd", "5"),
("abc", "e"),
("ab", "1"),
]
)
def test_dawg(self, data):
# suppress debug prints
with support.captured_stdout() as output:
# it's enough to build it, building will also check the result
build_compression_dawg(data)

View File

@ -0,0 +1,159 @@
"""Tests for the Tools/i18n/msgfmt.py tool."""
import json
import sys
import unittest
from gettext import GNUTranslations
from pathlib import Path
from test.support.os_helper import temp_cwd
from test.support.script_helper import assert_python_failure, assert_python_ok
from test.test_tools import skip_if_missing, toolsdir
skip_if_missing('i18n')
data_dir = (Path(__file__).parent / 'msgfmt_data').resolve()
script_dir = Path(toolsdir) / 'i18n'
msgfmt = script_dir / 'msgfmt.py'
def compile_messages(po_file, mo_file):
assert_python_ok(msgfmt, '-o', mo_file, po_file)
class CompilationTest(unittest.TestCase):
def test_compilation(self):
self.maxDiff = None
with temp_cwd():
for po_file in data_dir.glob('*.po'):
with self.subTest(po_file=po_file):
mo_file = po_file.with_suffix('.mo')
with open(mo_file, 'rb') as f:
expected = GNUTranslations(f)
tmp_mo_file = mo_file.name
compile_messages(po_file, tmp_mo_file)
with open(tmp_mo_file, 'rb') as f:
actual = GNUTranslations(f)
self.assertDictEqual(actual._catalog, expected._catalog)
def test_translations(self):
with open(data_dir / 'general.mo', 'rb') as f:
t = GNUTranslations(f)
self.assertEqual(t.gettext('foo'), 'foo')
self.assertEqual(t.gettext('bar'), 'baz')
self.assertEqual(t.pgettext('abc', 'foo'), 'bar')
self.assertEqual(t.pgettext('xyz', 'foo'), 'bar')
self.assertEqual(t.gettext('Multilinestring'), 'Multilinetranslation')
self.assertEqual(t.gettext('"escapes"'), '"translated"')
self.assertEqual(t.gettext('\n newlines \n'), '\n translated \n')
self.assertEqual(t.ngettext('One email sent.', '%d emails sent.', 1),
'One email sent.')
self.assertEqual(t.ngettext('One email sent.', '%d emails sent.', 2),
'%d emails sent.')
self.assertEqual(t.npgettext('abc', 'One email sent.',
'%d emails sent.', 1),
'One email sent.')
self.assertEqual(t.npgettext('abc', 'One email sent.',
'%d emails sent.', 2),
'%d emails sent.')
def test_invalid_msgid_plural(self):
with temp_cwd():
Path('invalid.po').write_text('''\
msgid_plural "plural"
msgstr[0] "singular"
''')
res = assert_python_failure(msgfmt, 'invalid.po')
err = res.err.decode('utf-8')
self.assertIn('msgid_plural not preceded by msgid', err)
def test_plural_without_msgid_plural(self):
with temp_cwd():
Path('invalid.po').write_text('''\
msgid "foo"
msgstr[0] "bar"
''')
res = assert_python_failure(msgfmt, 'invalid.po')
err = res.err.decode('utf-8')
self.assertIn('plural without msgid_plural', err)
def test_indexed_msgstr_without_msgid_plural(self):
with temp_cwd():
Path('invalid.po').write_text('''\
msgid "foo"
msgid_plural "foos"
msgstr "bar"
''')
res = assert_python_failure(msgfmt, 'invalid.po')
err = res.err.decode('utf-8')
self.assertIn('indexed msgstr required for plural', err)
def test_generic_syntax_error(self):
with temp_cwd():
Path('invalid.po').write_text('''\
"foo"
''')
res = assert_python_failure(msgfmt, 'invalid.po')
err = res.err.decode('utf-8')
self.assertIn('Syntax error', err)
class CLITest(unittest.TestCase):
def test_help(self):
for option in ('--help', '-h'):
res = assert_python_ok(msgfmt, option)
err = res.err.decode('utf-8')
self.assertIn('Generate binary message catalog from textual translation description.', err)
def test_version(self):
for option in ('--version', '-V'):
res = assert_python_ok(msgfmt, option)
out = res.out.decode('utf-8').strip()
self.assertEqual('msgfmt.py 1.2', out)
def test_invalid_option(self):
res = assert_python_failure(msgfmt, '--invalid-option')
err = res.err.decode('utf-8')
self.assertIn('Generate binary message catalog from textual translation description.', err)
self.assertIn('option --invalid-option not recognized', err)
def test_no_input_file(self):
res = assert_python_ok(msgfmt)
err = res.err.decode('utf-8').replace('\r\n', '\n')
self.assertIn('No input file given\n'
"Try `msgfmt --help' for more information.", err)
def test_nonexistent_file(self):
assert_python_failure(msgfmt, 'nonexistent.po')
def update_catalog_snapshots():
for po_file in data_dir.glob('*.po'):
mo_file = po_file.with_suffix('.mo')
compile_messages(po_file, mo_file)
# Create a human-readable JSON file which is
# easier to review than the binary .mo file.
with open(mo_file, 'rb') as f:
translations = GNUTranslations(f)
catalog_file = po_file.with_suffix('.json')
with open(catalog_file, 'w') as f:
data = translations._catalog.items()
data = sorted(data, key=lambda x: (isinstance(x[0], tuple), x[0]))
json.dump(data, f, indent=4)
f.write('\n')
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == '--snapshot-update':
update_catalog_snapshots()
sys.exit(0)
unittest.main()

View File

@ -0,0 +1,35 @@
"""Tests for scripts in the Tools directory.
This file contains regression tests for some of the scripts found in the
Tools directory of a Python checkout or tarball, such as reindent.py.
"""
import os
import unittest
from test.support.script_helper import assert_python_ok
from test.support import findfile
from test.test_tools import toolsdir, skip_if_missing
skip_if_missing()
class ReindentTests(unittest.TestCase):
script = os.path.join(toolsdir, 'patchcheck', 'reindent.py')
def test_noargs(self):
assert_python_ok(self.script)
def test_help(self):
rc, out, err = assert_python_ok(self.script, '-h')
self.assertEqual(out, b'')
self.assertGreater(err, b'')
def test_reindent_file_with_bad_encoding(self):
bad_coding_path = findfile('bad_coding.py', subdir='tokenizedata')
rc, out, err = assert_python_ok(self.script, '-r', bad_coding_path)
self.assertEqual(out, b'')
self.assertNotEqual(err, b'')
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,30 @@
"""Tests for scripts in the Tools/scripts directory.
This file contains extremely basic regression tests for the scripts found in
the Tools directory of a Python checkout or tarball which don't have separate
tests of their own.
"""
import os
import unittest
from test.support import import_helper
from test.test_tools import scriptsdir, import_tool, skip_if_missing
skip_if_missing()
class TestSundryScripts(unittest.TestCase):
# import logging registers "atfork" functions which keep indirectly the
# logging module dictionary alive. Mock the function to be able to unload
# cleanly the logging module.
@import_helper.mock_register_at_fork
def test_sundry(self, mock_os):
for fn in os.listdir(scriptsdir):
if not fn.endswith('.py'):
continue
name = fn[:-3]
import_tool(name)
if __name__ == '__main__':
unittest.main()