mirror of
https://github.com/AdaCore/cpython.git
synced 2026-02-12 12:57:15 -08:00
Patch #445762: Support --disable-unicode
- Do not compile unicodeobject, unicodectype, and unicodedata if Unicode is disabled - check for Py_USING_UNICODE in all places that use Unicode functions - disables unicode literals, and the builtin functions - add the types.StringTypes list - remove Unicode literals from most tests.
This commit is contained in:
@@ -30,7 +30,9 @@ extern DL_IMPORT(PyTypeObject) PyInt_Type;
|
||||
#define PyInt_Check(op) ((op)->ob_type == &PyInt_Type)
|
||||
|
||||
extern DL_IMPORT(PyObject *) PyInt_FromString(char*, char**, int);
|
||||
#ifdef Py_USING_UNICODE
|
||||
extern DL_IMPORT(PyObject *) PyInt_FromUnicode(Py_UNICODE*, int, int);
|
||||
#endif
|
||||
extern DL_IMPORT(PyObject *) PyInt_FromLong(long);
|
||||
extern DL_IMPORT(long) PyInt_AsLong(PyObject *);
|
||||
extern DL_IMPORT(long) PyInt_GetMax(void);
|
||||
|
||||
@@ -42,7 +42,9 @@ extern DL_IMPORT(unsigned LONG_LONG) PyLong_AsUnsignedLongLong(PyObject *);
|
||||
#endif /* HAVE_LONG_LONG */
|
||||
|
||||
DL_IMPORT(PyObject *) PyLong_FromString(char *, char **, int);
|
||||
#ifdef Py_USING_UNICODE
|
||||
DL_IMPORT(PyObject *) PyLong_FromUnicode(Py_UNICODE*, int, int);
|
||||
#endif
|
||||
|
||||
/* _PyLong_FromByteArray: View the n unsigned bytes as a binary integer in
|
||||
base 256, and return a Python long with the same numeric value.
|
||||
|
||||
@@ -320,7 +320,9 @@ extern DL_IMPORT(int) PyObject_Print(PyObject *, FILE *, int);
|
||||
extern DL_IMPORT(void) _PyObject_Dump(PyObject *);
|
||||
extern DL_IMPORT(PyObject *) PyObject_Repr(PyObject *);
|
||||
extern DL_IMPORT(PyObject *) PyObject_Str(PyObject *);
|
||||
#ifdef Py_USING_UNICODE
|
||||
extern DL_IMPORT(PyObject *) PyObject_Unicode(PyObject *);
|
||||
#endif
|
||||
extern DL_IMPORT(int) PyObject_Compare(PyObject *, PyObject *);
|
||||
extern DL_IMPORT(PyObject *) PyObject_RichCompare(PyObject *, PyObject *, int);
|
||||
extern DL_IMPORT(int) PyObject_RichCompareBool(PyObject *, PyObject *, int);
|
||||
|
||||
@@ -58,6 +58,12 @@ Copyright (c) Corporation for National Research Initiatives.
|
||||
|
||||
/* --- Internal Unicode Format -------------------------------------------- */
|
||||
|
||||
#ifndef Py_USING_UNICODE
|
||||
|
||||
#define PyUnicode_Check(op) 0
|
||||
|
||||
#else
|
||||
|
||||
/* FIXME: MvL's new implementation assumes that Py_UNICODE_SIZE is
|
||||
properly set, but the default rules below doesn't set it. I'll
|
||||
sort this out some other day -- fredrik@pythonware.com */
|
||||
@@ -1087,4 +1093,5 @@ extern DL_IMPORT(int) _PyUnicode_IsAlpha(
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* Py_USING_UNICODE */
|
||||
#endif /* !Py_UNICODEOBJECT_H */
|
||||
|
||||
@@ -82,7 +82,7 @@ ConfigParser -- responsible for for parsing a list of
|
||||
write the configuration state in .ini format
|
||||
"""
|
||||
|
||||
import string
|
||||
import string, types
|
||||
import re
|
||||
|
||||
__all__ = ["NoSectionError","DuplicateSectionError","NoOptionError",
|
||||
@@ -222,7 +222,7 @@ class ConfigParser:
|
||||
configuration files in the list will be read. A single
|
||||
filename may also be given.
|
||||
"""
|
||||
if type(filenames) in [type(''), type(u'')]:
|
||||
if type(filenames) in types.StringTypes:
|
||||
filenames = [filenames]
|
||||
for filename in filenames:
|
||||
try:
|
||||
|
||||
10
Lib/copy.py
10
Lib/copy.py
@@ -91,7 +91,10 @@ d[types.IntType] = _copy_atomic
|
||||
d[types.LongType] = _copy_atomic
|
||||
d[types.FloatType] = _copy_atomic
|
||||
d[types.StringType] = _copy_atomic
|
||||
d[types.UnicodeType] = _copy_atomic
|
||||
try:
|
||||
d[types.UnicodeType] = _copy_atomic
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
d[types.CodeType] = _copy_atomic
|
||||
except AttributeError:
|
||||
@@ -170,7 +173,10 @@ d[types.IntType] = _deepcopy_atomic
|
||||
d[types.LongType] = _deepcopy_atomic
|
||||
d[types.FloatType] = _deepcopy_atomic
|
||||
d[types.StringType] = _deepcopy_atomic
|
||||
d[types.UnicodeType] = _deepcopy_atomic
|
||||
try:
|
||||
d[types.UnicodeType] = _deepcopy_atomic
|
||||
except AttributeError:
|
||||
pass
|
||||
d[types.CodeType] = _deepcopy_atomic
|
||||
d[types.TypeType] = _deepcopy_atomic
|
||||
d[types.XRangeType] = _deepcopy_atomic
|
||||
|
||||
@@ -305,7 +305,8 @@ if 0:
|
||||
encoding = "undefined"
|
||||
|
||||
if encoding != "ascii":
|
||||
sys.setdefaultencoding(encoding)
|
||||
# On Non-Unicode builds this will raise an AttributeError...
|
||||
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
|
||||
|
||||
#
|
||||
# Run custom site specific code, if available.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# test_pickle and test_cpickle both use this.
|
||||
|
||||
from test_support import TestFailed
|
||||
from test_support import TestFailed, have_unicode
|
||||
import sys
|
||||
|
||||
# break into multiple strings to please font-lock-mode
|
||||
@@ -191,7 +191,11 @@ def dotest(pickle):
|
||||
print "accepted insecure string: %s" % repr(buf)
|
||||
|
||||
# Test some Unicode end cases
|
||||
endcases = [u'', u'<\\u>', u'<\\\u1234>', u'<\n>', u'<\\>']
|
||||
if have_unicode:
|
||||
endcases = [unicode(''), unicode('<\\u>'), unicode('<\\\u1234>'),
|
||||
unicode('<\n>'), unicode('<\\>')]
|
||||
else:
|
||||
endcases = []
|
||||
for u in endcases:
|
||||
try:
|
||||
u2 = pickle.loads(pickle.dumps(u))
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Common tests shared by test_string and test_userstring"""
|
||||
|
||||
import string
|
||||
from test_support import verify, verbose, TestFailed
|
||||
from test_support import verify, verbose, TestFailed, have_unicode
|
||||
|
||||
transtable = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
|
||||
|
||||
@@ -125,11 +125,12 @@ def run_method_tests(test):
|
||||
test('join', ' ', 'w x y z', Sequence())
|
||||
test('join', 'a', 'abc', ('abc',))
|
||||
test('join', 'a', 'z', UserList(['z']))
|
||||
test('join', u'.', u'a.b.c', ['a', 'b', 'c'])
|
||||
test('join', '.', u'a.b.c', [u'a', 'b', 'c'])
|
||||
test('join', '.', u'a.b.c', ['a', u'b', 'c'])
|
||||
test('join', '.', u'a.b.c', ['a', 'b', u'c'])
|
||||
test('join', '.', TypeError, ['a', u'b', 3])
|
||||
if have_unicode:
|
||||
test('join', unicode('.'), unicode('a.b.c'), ['a', 'b', 'c'])
|
||||
test('join', '.', unicode('a.b.c'), [unicode('a'), 'b', 'c'])
|
||||
test('join', '.', unicode('a.b.c'), ['a', unicode('b'), 'c'])
|
||||
test('join', '.', unicode('a.b.c'), ['a', 'b', unicode('c')])
|
||||
test('join', '.', TypeError, ['a', unicode('b'), 3])
|
||||
for i in [5, 25, 125]:
|
||||
test('join', '-', ((('a' * i) + '-') * i)[:-1],
|
||||
['a' * i] * i)
|
||||
|
||||
@@ -119,7 +119,9 @@ if complex(0.0, 3.14j) != -3.14+0j: raise TestFailed, 'complex(0.0, 3.14j)'
|
||||
if complex(0j, 3.14) != 3.14j: raise TestFailed, 'complex(0j, 3.14)'
|
||||
if complex(0.0, 3.14) != 3.14j: raise TestFailed, 'complex(0.0, 3.14)'
|
||||
if complex(" 3.14+J ") != 3.14+1j: raise TestFailed, 'complex(" 3.14+J )"'
|
||||
if complex(u" 3.14+J ") != 3.14+1j: raise TestFailed, 'complex(u" 3.14+J )"'
|
||||
if have_unicode:
|
||||
if complex(unicode(" 3.14+J ")) != 3.14+1j:
|
||||
raise TestFailed, 'complex(u" 3.14+J )"'
|
||||
class Z:
|
||||
def __complex__(self): return 3.14j
|
||||
z = Z()
|
||||
@@ -174,18 +176,20 @@ if eval('b', globals, locals) != 200:
|
||||
raise TestFailed, "eval(3)"
|
||||
if eval('c', globals, locals) != 300:
|
||||
raise TestFailed, "eval(4)"
|
||||
if eval(u'1+1') != 2: raise TestFailed, 'eval(u\'1+1\')'
|
||||
if eval(u' 1+1\n') != 2: raise TestFailed, 'eval(u\' 1+1\\n\')'
|
||||
if have_unicode:
|
||||
if eval(unicode('1+1')) != 2: raise TestFailed, 'eval(u\'1+1\')'
|
||||
if eval(unicode(' 1+1\n')) != 2: raise TestFailed, 'eval(u\' 1+1\\n\')'
|
||||
globals = {'a': 1, 'b': 2}
|
||||
locals = {'b': 200, 'c': 300}
|
||||
if eval(u'a', globals) != 1:
|
||||
raise TestFailed, "eval(1) == %s" % eval(u'a', globals)
|
||||
if eval(u'a', globals, locals) != 1:
|
||||
raise TestFailed, "eval(2)"
|
||||
if eval(u'b', globals, locals) != 200:
|
||||
raise TestFailed, "eval(3)"
|
||||
if eval(u'c', globals, locals) != 300:
|
||||
raise TestFailed, "eval(4)"
|
||||
if have_unicode:
|
||||
if eval(unicode('a'), globals) != 1:
|
||||
raise TestFailed, "eval(1) == %s" % eval(unicode('a'), globals)
|
||||
if eval(unicode('a'), globals, locals) != 1:
|
||||
raise TestFailed, "eval(2)"
|
||||
if eval(unicode('b'), globals, locals) != 200:
|
||||
raise TestFailed, "eval(3)"
|
||||
if eval(unicode('c'), globals, locals) != 300:
|
||||
raise TestFailed, "eval(4)"
|
||||
|
||||
print 'execfile'
|
||||
z = 0
|
||||
@@ -249,9 +253,11 @@ if float(3.14) != 3.14: raise TestFailed, 'float(3.14)'
|
||||
if float(314) != 314.0: raise TestFailed, 'float(314)'
|
||||
if float(314L) != 314.0: raise TestFailed, 'float(314L)'
|
||||
if float(" 3.14 ") != 3.14: raise TestFailed, 'float(" 3.14 ")'
|
||||
if float(u" 3.14 ") != 3.14: raise TestFailed, 'float(u" 3.14 ")'
|
||||
if float(u" \u0663.\u0661\u0664 ") != 3.14:
|
||||
raise TestFailed, 'float(u" \u0663.\u0661\u0664 ")'
|
||||
if have_unicode:
|
||||
if float(unicode(" 3.14 ")) != 3.14:
|
||||
raise TestFailed, 'float(u" 3.14 ")'
|
||||
if float(unicode(" \u0663.\u0661\u0664 ")) != 3.14:
|
||||
raise TestFailed, 'float(u" \u0663.\u0661\u0664 ")'
|
||||
|
||||
print 'getattr'
|
||||
import sys
|
||||
@@ -324,7 +330,9 @@ if int(3.5) != 3: raise TestFailed, 'int(3.5)'
|
||||
if int(-3.5) != -3: raise TestFailed, 'int(-3.5)'
|
||||
# Different base:
|
||||
if int("10",16) != 16L: raise TestFailed, 'int("10",16)'
|
||||
if int(u"10",16) != 16L: raise TestFailed, 'int(u"10",16)'
|
||||
if have_unicode:
|
||||
if int(unicode("10"),16) != 16L:
|
||||
raise TestFailed, 'int(u"10",16)'
|
||||
# Test conversion from strings and various anomalies
|
||||
L = [
|
||||
('0', 0),
|
||||
@@ -343,23 +351,26 @@ L = [
|
||||
(' 1\02 ', ValueError),
|
||||
('', ValueError),
|
||||
(' ', ValueError),
|
||||
(' \t\t ', ValueError),
|
||||
(u'0', 0),
|
||||
(u'1', 1),
|
||||
(u'9', 9),
|
||||
(u'10', 10),
|
||||
(u'99', 99),
|
||||
(u'100', 100),
|
||||
(u'314', 314),
|
||||
(u' 314', 314),
|
||||
(u'\u0663\u0661\u0664 ', 314),
|
||||
(u' \t\t 314 \t\t ', 314),
|
||||
(u' 1x', ValueError),
|
||||
(u' 1 ', 1),
|
||||
(u' 1\02 ', ValueError),
|
||||
(u'', ValueError),
|
||||
(u' ', ValueError),
|
||||
(u' \t\t ', ValueError),
|
||||
(' \t\t ', ValueError)
|
||||
]
|
||||
if have_unicode:
|
||||
L += [
|
||||
(unicode('0'), 0),
|
||||
(unicode('1'), 1),
|
||||
(unicode('9'), 9),
|
||||
(unicode('10'), 10),
|
||||
(unicode('99'), 99),
|
||||
(unicode('100'), 100),
|
||||
(unicode('314'), 314),
|
||||
(unicode(' 314'), 314),
|
||||
(unicode('\u0663\u0661\u0664 '), 314),
|
||||
(unicode(' \t\t 314 \t\t '), 314),
|
||||
(unicode(' 1x'), ValueError),
|
||||
(unicode(' 1 '), 1),
|
||||
(unicode(' 1\02 '), ValueError),
|
||||
(unicode(''), ValueError),
|
||||
(unicode(' '), ValueError),
|
||||
(unicode(' \t\t '), ValueError),
|
||||
]
|
||||
for s, v in L:
|
||||
for sign in "", "+", "-":
|
||||
@@ -460,16 +471,23 @@ if long(-3.9) != -3L: raise TestFailed, 'long(-3.9)'
|
||||
if long(3.5) != 3L: raise TestFailed, 'long(3.5)'
|
||||
if long(-3.5) != -3L: raise TestFailed, 'long(-3.5)'
|
||||
if long("-3") != -3L: raise TestFailed, 'long("-3")'
|
||||
if long(u"-3") != -3L: raise TestFailed, 'long(u"-3")'
|
||||
if have_unicode:
|
||||
if long(unicode("-3")) != -3L:
|
||||
raise TestFailed, 'long(u"-3")'
|
||||
# Different base:
|
||||
if long("10",16) != 16L: raise TestFailed, 'long("10",16)'
|
||||
if long(u"10",16) != 16L: raise TestFailed, 'long(u"10",16)'
|
||||
if have_unicode:
|
||||
if long(unicode("10"),16) != 16L:
|
||||
raise TestFailed, 'long(u"10",16)'
|
||||
# Check conversions from string (same test set as for int(), and then some)
|
||||
LL = [
|
||||
('1' + '0'*20, 10L**20),
|
||||
('1' + '0'*100, 10L**100),
|
||||
(u'1' + u'0'*20, 10L**20),
|
||||
(u'1' + u'0'*100, 10L**100),
|
||||
('1' + '0'*100, 10L**100)
|
||||
]
|
||||
if have_unicode:
|
||||
L+=[
|
||||
(unicode('1') + unicode('0')*20, 10L**20),
|
||||
(unicode('1') + unicode('0')*100, 10L**100),
|
||||
]
|
||||
for s, v in L + LL:
|
||||
for sign in "", "+", "-":
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from test_support import TestFailed
|
||||
from test_support import TestFailed, have_unicode
|
||||
|
||||
class base_set:
|
||||
|
||||
@@ -63,62 +63,65 @@ try:
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# Test char in Unicode
|
||||
|
||||
check('c' in u'abc', "'c' not in u'abc'")
|
||||
check('d' not in u'abc', "'d' in u'abc'")
|
||||
if have_unicode:
|
||||
|
||||
try:
|
||||
'' in u'abc'
|
||||
check(0, "'' in u'abc' did not raise error")
|
||||
except TypeError:
|
||||
pass
|
||||
# Test char in Unicode
|
||||
|
||||
try:
|
||||
'ab' in u'abc'
|
||||
check(0, "'ab' in u'abc' did not raise error")
|
||||
except TypeError:
|
||||
pass
|
||||
check('c' in unicode('abc'), "'c' not in u'abc'")
|
||||
check('d' not in unicode('abc'), "'d' in u'abc'")
|
||||
|
||||
try:
|
||||
None in u'abc'
|
||||
check(0, "None in u'abc' did not raise error")
|
||||
except TypeError:
|
||||
pass
|
||||
try:
|
||||
'' in unicode('abc')
|
||||
check(0, "'' in u'abc' did not raise error")
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# Test Unicode char in Unicode
|
||||
try:
|
||||
'ab' in unicode('abc')
|
||||
check(0, "'ab' in u'abc' did not raise error")
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
check(u'c' in u'abc', "u'c' not in u'abc'")
|
||||
check(u'd' not in u'abc', "u'd' in u'abc'")
|
||||
try:
|
||||
None in unicode('abc')
|
||||
check(0, "None in u'abc' did not raise error")
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
u'' in u'abc'
|
||||
check(0, "u'' in u'abc' did not raise error")
|
||||
except TypeError:
|
||||
pass
|
||||
# Test Unicode char in Unicode
|
||||
|
||||
try:
|
||||
u'ab' in u'abc'
|
||||
check(0, "u'ab' in u'abc' did not raise error")
|
||||
except TypeError:
|
||||
pass
|
||||
check(unicode('c') in unicode('abc'), "u'c' not in u'abc'")
|
||||
check(unicode('d') not in unicode('abc'), "u'd' in u'abc'")
|
||||
|
||||
# Test Unicode char in string
|
||||
try:
|
||||
unicode('') in unicode('abc')
|
||||
check(0, "u'' in u'abc' did not raise error")
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
check(u'c' in 'abc', "u'c' not in 'abc'")
|
||||
check(u'd' not in 'abc', "u'd' in 'abc'")
|
||||
try:
|
||||
unicode('ab') in unicode('abc')
|
||||
check(0, "u'ab' in u'abc' did not raise error")
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
u'' in 'abc'
|
||||
check(0, "u'' in 'abc' did not raise error")
|
||||
except TypeError:
|
||||
pass
|
||||
# Test Unicode char in string
|
||||
|
||||
try:
|
||||
u'ab' in 'abc'
|
||||
check(0, "u'ab' in 'abc' did not raise error")
|
||||
except TypeError:
|
||||
pass
|
||||
check(unicode('c') in 'abc', "u'c' not in 'abc'")
|
||||
check(unicode('d') not in 'abc', "u'd' in 'abc'")
|
||||
|
||||
try:
|
||||
unicode('') in 'abc'
|
||||
check(0, "u'' in 'abc' did not raise error")
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
unicode('ab') in 'abc'
|
||||
check(0, "u'ab' in 'abc' did not raise error")
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# A collection of tests on builtin sequence types
|
||||
a = range(10)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from test_support import verbose
|
||||
from test_support import verbose, have_unicode
|
||||
import sys
|
||||
|
||||
# test string formatting operator (I am not sure if this is being tested
|
||||
@@ -34,7 +34,8 @@ def testformat(formatstr, args, output=None):
|
||||
|
||||
def testboth(formatstr, *args):
|
||||
testformat(formatstr, *args)
|
||||
testformat(unicode(formatstr), *args)
|
||||
if have_unicode:
|
||||
testformat(unicode(formatstr), *args)
|
||||
|
||||
|
||||
testboth("%.1d", (1,), "1")
|
||||
@@ -212,5 +213,6 @@ def test_exc(formatstr, args, exception, excmsg):
|
||||
|
||||
test_exc('abc %a', 1, ValueError,
|
||||
"unsupported format character 'a' (0x61) at index 5")
|
||||
test_exc(u'abc %\u3000', 1, ValueError,
|
||||
"unsupported format character '?' (0x3000) at index 5")
|
||||
if have_unicode:
|
||||
test_exc(unicode('abc %\u3000'), 1, ValueError,
|
||||
"unsupported format character '?' (0x3000) at index 5")
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Test iterators.
|
||||
|
||||
import unittest
|
||||
from test_support import run_unittest, TESTFN, unlink
|
||||
from test_support import run_unittest, TESTFN, unlink, have_unicode
|
||||
|
||||
# Test result of triple loop (too big to inline)
|
||||
TRIPLETS = [(0, 0, 0), (0, 0, 1), (0, 0, 2),
|
||||
@@ -214,8 +214,11 @@ class TestCase(unittest.TestCase):
|
||||
self.check_for_loop(iter("abcde"), ["a", "b", "c", "d", "e"])
|
||||
|
||||
# Test a Unicode string
|
||||
def test_iter_unicode(self):
|
||||
self.check_for_loop(iter(u"abcde"), [u"a", u"b", u"c", u"d", u"e"])
|
||||
if have_unicode:
|
||||
def test_iter_unicode(self):
|
||||
self.check_for_loop(iter(unicode("abcde")),
|
||||
[unicode("a"), unicode("b"), unicode("c"),
|
||||
unicode("d"), unicode("e")])
|
||||
|
||||
# Test a directory
|
||||
def test_iter_dict(self):
|
||||
@@ -477,6 +480,7 @@ class TestCase(unittest.TestCase):
|
||||
d = {"one": 1, "two": 2, "three": 3}
|
||||
self.assertEqual(reduce(add, d), "".join(d.keys()))
|
||||
|
||||
# This test case will be removed if we don't have Unicode
|
||||
def test_unicode_join_endcase(self):
|
||||
|
||||
# This class inserts a Unicode object into its argument's natural
|
||||
@@ -493,7 +497,7 @@ class TestCase(unittest.TestCase):
|
||||
i = self.i
|
||||
self.i = i+1
|
||||
if i == 2:
|
||||
return u"fooled you!"
|
||||
return unicode("fooled you!")
|
||||
return self.it.next()
|
||||
|
||||
f = open(TESTFN, "w")
|
||||
@@ -510,13 +514,15 @@ class TestCase(unittest.TestCase):
|
||||
# and pass that on to unicode.join().
|
||||
try:
|
||||
got = " - ".join(OhPhooey(f))
|
||||
self.assertEqual(got, u"a\n - b\n - fooled you! - c\n")
|
||||
self.assertEqual(got, unicode("a\n - b\n - fooled you! - c\n"))
|
||||
finally:
|
||||
f.close()
|
||||
try:
|
||||
unlink(TESTFN)
|
||||
except OSError:
|
||||
pass
|
||||
if not have_unicode:
|
||||
def test_unicode_join_endcase(self): pass
|
||||
|
||||
# Test iterators with 'x in y' and 'x not in y'.
|
||||
def test_in_and_not_in(self):
|
||||
|
||||
@@ -3,6 +3,11 @@ import unittest
|
||||
|
||||
import test_support
|
||||
|
||||
try:
|
||||
uni = unicode
|
||||
except NameError:
|
||||
def uni(x):return x
|
||||
|
||||
|
||||
class QueryTestCase(unittest.TestCase):
|
||||
|
||||
@@ -14,7 +19,7 @@ class QueryTestCase(unittest.TestCase):
|
||||
def test_basic(self):
|
||||
"""Verify .isrecursive() and .isreadable() w/o recursion."""
|
||||
verify = self.assert_
|
||||
for safe in (2, 2.0, 2j, "abc", [3], (2,2), {3: 3}, u"yaddayadda",
|
||||
for safe in (2, 2.0, 2j, "abc", [3], (2,2), {3: 3}, uni("yaddayadda"),
|
||||
self.a, self.b):
|
||||
verify(not pprint.isrecursive(safe),
|
||||
"expected not isrecursive for " + `safe`)
|
||||
@@ -58,8 +63,8 @@ class QueryTestCase(unittest.TestCase):
|
||||
def test_same_as_repr(self):
|
||||
"Simple objects and small containers that should be same as repr()."
|
||||
verify = self.assert_
|
||||
for simple in (0, 0L, 0+0j, 0.0, "", u"", (), [], {}, verify, pprint,
|
||||
-6, -6L, -6-6j, -1.5, "x", u"x", (3,), [3], {3: 6},
|
||||
for simple in (0, 0L, 0+0j, 0.0, "", uni(""), (), [], {}, verify, pprint,
|
||||
-6, -6L, -6-6j, -1.5, "x", uni("x"), (3,), [3], {3: 6},
|
||||
(1,2), [3,4], {5: 6, 7: 8},
|
||||
{"xy\tab\n": (3,), 5: [[]], (): {}},
|
||||
range(10, -11, -1)
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
import sys
|
||||
sys.path=['.']+sys.path
|
||||
|
||||
from test_support import verbose, TestFailed
|
||||
from test_support import verbose, TestFailed, have_unicode
|
||||
import sre
|
||||
import sys, os, string, traceback
|
||||
|
||||
@@ -378,7 +378,8 @@ for t in tests:
|
||||
|
||||
# Try the match with UNICODE locale enabled, and check
|
||||
# that it still succeeds.
|
||||
obj=sre.compile(pattern, sre.UNICODE)
|
||||
result=obj.search(s)
|
||||
if result==None:
|
||||
print '=== Fails on unicode-sensitive match', t
|
||||
if have_unicode:
|
||||
obj=sre.compile(pattern, sre.UNICODE)
|
||||
result=obj.search(s)
|
||||
if result==None:
|
||||
print '=== Fails on unicode-sensitive match', t
|
||||
|
||||
@@ -56,6 +56,12 @@ def fcmp(x, y): # fuzzy comparison function
|
||||
return cmp(len(x), len(y))
|
||||
return cmp(x, y)
|
||||
|
||||
try:
|
||||
unicode
|
||||
have_unicode = 1
|
||||
except NameError:
|
||||
have_unicode = 0
|
||||
|
||||
import os
|
||||
# Filename used for testing
|
||||
if os.name == 'java':
|
||||
@@ -64,9 +70,10 @@ if os.name == 'java':
|
||||
elif os.name != 'riscos':
|
||||
TESTFN = '@test'
|
||||
# Unicode name only used if TEST_FN_ENCODING exists for the platform.
|
||||
TESTFN_UNICODE=u"@test-\xe0\xf2" # 2 latin characters.
|
||||
if os.name=="nt":
|
||||
TESTFN_ENCODING="mbcs"
|
||||
if have_unicode:
|
||||
TESTFN_UNICODE=unicode("@test-\xe0\xf2", "latin-1") # 2 latin characters.
|
||||
if os.name=="nt":
|
||||
TESTFN_ENCODING="mbcs"
|
||||
else:
|
||||
TESTFN = 'test'
|
||||
del os
|
||||
|
||||
@@ -4,21 +4,24 @@
|
||||
from _winreg import *
|
||||
import os, sys
|
||||
|
||||
from test_support import verify
|
||||
from test_support import verify, have_unicode
|
||||
|
||||
test_key_name = "SOFTWARE\\Python Registry Test Key - Delete Me"
|
||||
|
||||
test_data = [
|
||||
("Int Value", 45, REG_DWORD),
|
||||
("String Val", "A string value", REG_SZ,),
|
||||
(u"Unicode Val", u"A Unicode value", REG_SZ,),
|
||||
("StringExpand", "The path is %path%", REG_EXPAND_SZ),
|
||||
("UnicodeExpand", u"The path is %path%", REG_EXPAND_SZ),
|
||||
("Multi-string", ["Lots", "of", "string", "values"], REG_MULTI_SZ),
|
||||
("Multi-unicode", [u"Lots", u"of", u"unicode", u"values"], REG_MULTI_SZ),
|
||||
("Multi-mixed", [u"Unicode", u"and", "string", "values"],REG_MULTI_SZ),
|
||||
("Raw Data", ("binary"+chr(0)+"data"), REG_BINARY),
|
||||
]
|
||||
if have_unicode:
|
||||
test_data+=[
|
||||
(unicode("Unicode Val"), unicode("A Unicode value"), REG_SZ,),
|
||||
("UnicodeExpand", unicode("The path is %path%"), REG_EXPAND_SZ),
|
||||
("Multi-unicode", [unicode("Lots"), unicode("of"), unicode("unicode"), unicode("values")], REG_MULTI_SZ),
|
||||
("Multi-mixed", [unicode("Unicode"), unicode("and"), "string", "values"],REG_MULTI_SZ),
|
||||
]
|
||||
|
||||
def WriteTestData(root_key):
|
||||
# Set the default value for this key.
|
||||
|
||||
@@ -19,7 +19,12 @@ except NameError:
|
||||
pass
|
||||
|
||||
StringType = str
|
||||
UnicodeType = unicode
|
||||
try:
|
||||
UnicodeType = unicode
|
||||
StringTypes = [StringType, UnicodeType]
|
||||
except NameError:
|
||||
StringTypes = [StringType]
|
||||
|
||||
BufferType = type(buffer(''))
|
||||
|
||||
TupleType = tuple
|
||||
|
||||
@@ -147,6 +147,7 @@ LIBOBJS= @LIBOBJS@
|
||||
DLINCLDIR= @DLINCLDIR@
|
||||
DYNLOADFILE= @DYNLOADFILE@
|
||||
MACHDEP_OBJS= @MACHDEP_OBJS@
|
||||
UNICODE_OBJS= @UNICODE_OBJS@
|
||||
|
||||
PYTHON= python$(EXE)
|
||||
|
||||
@@ -263,8 +264,7 @@ OBJECT_OBJS= \
|
||||
Objects/stringobject.o \
|
||||
Objects/tupleobject.o \
|
||||
Objects/typeobject.o \
|
||||
Objects/unicodeobject.o \
|
||||
Objects/unicodectype.o
|
||||
$(UNICODE_OBJS)
|
||||
|
||||
|
||||
##########################################################################
|
||||
|
||||
18
Misc/NEWS
18
Misc/NEWS
@@ -1,6 +1,12 @@
|
||||
What's New in Python 2.2a2?
|
||||
===========================
|
||||
|
||||
Build
|
||||
|
||||
- configure supports a new option --enable-unicode, with the values
|
||||
ucs2 and ucs4 (new in 2.2a1). With --disable-unicode, the Unicode
|
||||
type and supporting code is completely removed from the interpreter.
|
||||
|
||||
Tools
|
||||
|
||||
- The new Tools/scripts/cleanfuture.py can be used to automatically
|
||||
@@ -57,6 +63,12 @@ C API
|
||||
sure to check the Unicode width compatibility in their extensions by
|
||||
using at least one of the mangled Unicode APIs in the extension.
|
||||
|
||||
- Two new flags METH_NOARGS and METH_O are available in method definition
|
||||
tables to simplify implementation of methods with no arguments and a
|
||||
single untyped argument. Calling such methods is more efficient than
|
||||
calling corresponding METH_VARARGS methods. METH_OLDARGS is now
|
||||
deprecated.
|
||||
|
||||
Windows
|
||||
|
||||
- "import module" now compiles module.pyw if it exists and nothing else
|
||||
@@ -90,12 +102,6 @@ Core
|
||||
(These warnings currently don't conform to the warnings framework of
|
||||
PEP 230; we intend to fix this in 2.2a2.)
|
||||
|
||||
- Two new flags METH_NOARGS and METH_O are available in method definition
|
||||
tables to simplify implementation of methods with no arguments and a
|
||||
single untyped argument. Calling such methods is more efficient than
|
||||
calling corresponding METH_VARARGS methods. METH_OLDARGS is now
|
||||
deprecated.
|
||||
|
||||
- The UTF-16 codec was modified to be more RFC compliant. It will now
|
||||
only remove BOM characters at the start of the string and then
|
||||
only if running in native mode (UTF-16-LE and -BE won't remove a
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user