aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--extra_tests/ctypes_tests/test_buffers.py9
-rw-r--r--extra_tests/test_os.py4
-rw-r--r--lib-python/2.7/HTMLParser.py5
-rw-r--r--lib-python/2.7/SocketServer.py3
-rw-r--r--lib-python/2.7/_pyio.py1
-rw-r--r--lib-python/2.7/_strptime.py4
-rw-r--r--lib-python/2.7/_threading_local.py6
-rw-r--r--lib-python/2.7/aifc.py19
-rw-r--r--lib-python/2.7/argparse.py6
-rw-r--r--lib-python/2.7/asynchat.py2
-rw-r--r--lib-python/2.7/asyncore.py6
-rw-r--r--lib-python/2.7/bsddb/test/test_associate.py23
-rw-r--r--lib-python/2.7/bsddb/test/test_basics.py16
-rw-r--r--lib-python/2.7/bsddb/test/test_dbenv.py88
-rw-r--r--lib-python/2.7/bsddb/test/test_dbshelve.py2
-rw-r--r--lib-python/2.7/bsddb/test/test_dbtables.py4
-rw-r--r--lib-python/2.7/bsddb/test/test_distributed_transactions.py4
-rw-r--r--lib-python/2.7/bsddb/test/test_lock.py13
-rw-r--r--lib-python/2.7/bsddb/test/test_misc.py2
-rw-r--r--lib-python/2.7/bsddb/test/test_recno.py2
-rw-r--r--lib-python/2.7/bsddb/test/test_replication.py20
-rw-r--r--lib-python/2.7/bsddb/test/test_sequence.py2
-rw-r--r--lib-python/2.7/bsddb/test/test_thread.py6
-rwxr-xr-xlib-python/2.7/cProfile.py9
-rwxr-xr-xlib-python/2.7/cgi.py48
-rw-r--r--lib-python/2.7/cgitb.py4
-rw-r--r--lib-python/2.7/codecs.py8
-rw-r--r--lib-python/2.7/compiler/pyassem.py2
-rw-r--r--lib-python/2.7/compiler/transformer.py2
-rw-r--r--lib-python/2.7/cookielib.py47
-rw-r--r--lib-python/2.7/copy_reg.py6
-rw-r--r--lib-python/2.7/csv.py2
-rw-r--r--lib-python/2.7/ctypes/__init__.py4
-rw-r--r--lib-python/2.7/ctypes/test/test_anon.py13
-rw-r--r--lib-python/2.7/ctypes/test/test_arrays.py28
-rw-r--r--lib-python/2.7/ctypes/test/test_as_parameter.py2
-rw-r--r--lib-python/2.7/ctypes/test/test_callbacks.py11
-rw-r--r--lib-python/2.7/ctypes/test/test_frombuffer.py9
-rw-r--r--lib-python/2.7/ctypes/test/test_funcptr.py5
-rw-r--r--lib-python/2.7/ctypes/test/test_loading.py7
-rw-r--r--lib-python/2.7/ctypes/test/test_parameters.py31
-rw-r--r--lib-python/2.7/ctypes/test/test_pep3118.py85
-rw-r--r--lib-python/2.7/ctypes/test/test_pointers.py5
-rw-r--r--lib-python/2.7/ctypes/test/test_strings.py7
-rw-r--r--lib-python/2.7/ctypes/test/test_struct_fields.py24
-rw-r--r--lib-python/2.7/ctypes/test/test_structures.py23
-rw-r--r--lib-python/2.7/ctypes/test/test_unicode.py2
-rw-r--r--lib-python/2.7/ctypes/test/test_win32.py18
-rw-r--r--lib-python/2.7/curses/ascii.py18
-rw-r--r--lib-python/2.7/curses/has_key.py2
-rw-r--r--lib-python/2.7/curses/textpad.py31
-rw-r--r--lib-python/2.7/decimal.py2
-rw-r--r--lib-python/2.7/difflib.py2
-rw-r--r--lib-python/2.7/distutils/archive_util.py8
-rw-r--r--lib-python/2.7/distutils/ccompiler.py5
-rw-r--r--lib-python/2.7/distutils/command/bdist_dumb.py2
-rw-r--r--lib-python/2.7/distutils/command/bdist_msi.py4
-rw-r--r--lib-python/2.7/distutils/command/bdist_rpm.py2
-rw-r--r--lib-python/2.7/distutils/command/bdist_wininst.py4
-rw-r--r--lib-python/2.7/distutils/command/build.py2
-rw-r--r--lib-python/2.7/distutils/command/build_ext.py2
-rw-r--r--lib-python/2.7/distutils/command/check.py3
-rw-r--r--lib-python/2.7/distutils/command/upload.py6
-rw-r--r--lib-python/2.7/distutils/spawn.py3
-rw-r--r--lib-python/2.7/distutils/sysconfig_cpython.py20
-rw-r--r--lib-python/2.7/distutils/tests/includetest.rst1
-rw-r--r--lib-python/2.7/distutils/tests/test_archive_util.py2
-rw-r--r--lib-python/2.7/distutils/tests/test_bdist_dumb.py2
-rw-r--r--lib-python/2.7/distutils/tests/test_bdist_rpm.py2
-rw-r--r--lib-python/2.7/distutils/tests/test_build_ext.py1
-rw-r--r--lib-python/2.7/distutils/tests/test_ccompiler.py24
-rw-r--r--lib-python/2.7/distutils/tests/test_check.py32
-rw-r--r--lib-python/2.7/distutils/tests/test_install.py1
-rw-r--r--lib-python/2.7/distutils/tests/test_sdist.py12
-rw-r--r--lib-python/2.7/distutils/tests/test_spawn.py49
-rw-r--r--lib-python/2.7/distutils/tests/test_sysconfig.py99
-rw-r--r--lib-python/2.7/distutils/tests/test_upload.py26
-rw-r--r--lib-python/2.7/distutils/tests/test_util.py45
-rw-r--r--lib-python/2.7/distutils/util.py9
-rw-r--r--lib-python/2.7/doctest.py2
-rw-r--r--lib-python/2.7/email/_parseaddr.py11
-rw-r--r--lib-python/2.7/email/feedparser.py20
-rw-r--r--lib-python/2.7/email/test/test_email.py14
-rw-r--r--lib-python/2.7/email/test/test_email_renamed.py5
-rw-r--r--lib-python/2.7/email/utils.py6
-rw-r--r--lib-python/2.7/encodings/uu_codec.py4
-rw-r--r--lib-python/2.7/ensurepip/__init__.py4
-rw-r--r--lib-python/2.7/ensurepip/__main__.py3
-rw-r--r--lib-python/2.7/ensurepip/_uninstall.py5
-rw-r--r--lib-python/2.7/fpformat.py3
-rw-r--r--lib-python/2.7/ftplib.py88
-rw-r--r--lib-python/2.7/functools.py9
-rw-r--r--lib-python/2.7/gzip.py5
-rw-r--r--lib-python/2.7/httplib.py8
-rw-r--r--lib-python/2.7/idlelib/CallTipWindow.py1
-rw-r--r--lib-python/2.7/idlelib/FileList.py2
-rw-r--r--lib-python/2.7/idlelib/HyperParser.py2
-rw-r--r--lib-python/2.7/idlelib/IOBinding.py2
-rw-r--r--lib-python/2.7/idlelib/NEWS.txt27
-rwxr-xr-xlib-python/2.7/idlelib/PyShell.py10
-rw-r--r--lib-python/2.7/idlelib/SearchDialogBase.py2
-rw-r--r--lib-python/2.7/idlelib/aboutDialog.py1
-rw-r--r--lib-python/2.7/idlelib/configDialog.py2
-rw-r--r--lib-python/2.7/idlelib/configHelpSourceEdit.py2
-rw-r--r--lib-python/2.7/idlelib/configSectionNameDialog.py3
-rw-r--r--lib-python/2.7/idlelib/help.html2
-rw-r--r--lib-python/2.7/idlelib/idle_test/htest.py8
-rw-r--r--lib-python/2.7/idlelib/idle_test/mock_tk.py2
-rw-r--r--lib-python/2.7/idlelib/idle_test/test_config_name.py2
-rw-r--r--lib-python/2.7/idlelib/idle_test/test_searchdialogbase.py13
-rw-r--r--lib-python/2.7/idlelib/keybindingDialog.py4
-rw-r--r--lib-python/2.7/idlelib/run.py14
-rw-r--r--lib-python/2.7/idlelib/textView.py5
-rw-r--r--lib-python/2.7/imaplib.py19
-rw-r--r--lib-python/2.7/inspect.py18
-rw-r--r--lib-python/2.7/json/__init__.py2
-rw-r--r--lib-python/2.7/json/tests/test_speedups.py28
-rw-r--r--lib-python/2.7/lib-tk/Tix.py4
-rw-r--r--lib-python/2.7/lib-tk/Tkinter.py43
-rw-r--r--lib-python/2.7/lib-tk/test/test_tkinter/test_font.py12
-rw-r--r--lib-python/2.7/lib-tk/test/test_tkinter/test_misc.py122
-rw-r--r--lib-python/2.7/lib-tk/test/test_tkinter/test_widgets.py38
-rw-r--r--lib-python/2.7/lib-tk/test/test_ttk/support.py14
-rw-r--r--lib-python/2.7/lib-tk/test/test_ttk/test_extensions.py25
-rw-r--r--lib-python/2.7/lib-tk/test/test_ttk/test_widgets.py20
-rw-r--r--lib-python/2.7/lib-tk/tkFont.py4
-rw-r--r--lib-python/2.7/lib-tk/ttk.py14
-rw-r--r--lib-python/2.7/lib-tk/turtle.py12
-rw-r--r--lib-python/2.7/lib2to3/fixer_base.py2
-rw-r--r--lib-python/2.7/lib2to3/fixes/fix_execfile.py3
-rw-r--r--lib-python/2.7/lib2to3/patcomp.py15
-rw-r--r--lib-python/2.7/lib2to3/pgen2/driver.py21
-rw-r--r--lib-python/2.7/lib2to3/pgen2/grammar.py4
-rw-r--r--lib-python/2.7/lib2to3/pgen2/pgen.py2
-rw-r--r--lib-python/2.7/lib2to3/pygram.py4
-rw-r--r--lib-python/2.7/lib2to3/refactor.py8
-rw-r--r--lib-python/2.7/lib2to3/tests/test_fixers.py16
-rw-r--r--lib-python/2.7/lib2to3/tests/test_parser.py15
-rw-r--r--lib-python/2.7/locale.py75
-rw-r--r--lib-python/2.7/logging/__init__.py19
-rw-r--r--lib-python/2.7/logging/handlers.py27
-rw-r--r--lib-python/2.7/mimetypes.py2
-rw-r--r--lib-python/2.7/msilib/__init__.py2
-rw-r--r--lib-python/2.7/multiprocessing/forking.py3
-rw-r--r--lib-python/2.7/multiprocessing/managers.py5
-rw-r--r--lib-python/2.7/multiprocessing/pool.py9
-rw-r--r--lib-python/2.7/multiprocessing/process.py19
-rw-r--r--lib-python/2.7/multiprocessing/queues.py20
-rw-r--r--lib-python/2.7/multiprocessing/util.py5
-rw-r--r--lib-python/2.7/netrc.py12
-rw-r--r--lib-python/2.7/pickletools.py12
-rw-r--r--lib-python/2.7/pkgutil.py7
-rwxr-xr-xlib-python/2.7/platform.py95
-rw-r--r--lib-python/2.7/poplib.py2
-rw-r--r--lib-python/2.7/posixpath.py9
-rwxr-xr-xlib-python/2.7/pydoc.py10
-rw-r--r--lib-python/2.7/pydoc_data/topics.py13657
-rw-r--r--lib-python/2.7/random.py6
-rw-r--r--lib-python/2.7/robotparser.py5
-rw-r--r--lib-python/2.7/shutil.py67
-rwxr-xr-xlib-python/2.7/smtplib.py1
-rw-r--r--lib-python/2.7/sqlite3/test/factory.py19
-rw-r--r--lib-python/2.7/sqlite3/test/regression.py132
-rw-r--r--lib-python/2.7/sqlite3/test/types.py3
-rw-r--r--lib-python/2.7/sre_compile.py2
-rw-r--r--lib-python/2.7/sre_parse.py64
-rw-r--r--lib-python/2.7/ssl.py42
-rw-r--r--lib-python/2.7/subprocess.py67
-rw-r--r--lib-python/2.7/sysconfig.py5
-rwxr-xr-xlib-python/2.7/tabnanny.py2
-rw-r--r--lib-python/2.7/telnetlib.py4
-rw-r--r--lib-python/2.7/test/__main__.py3
-rw-r--r--lib-python/2.7/test/allsans.pem106
-rwxr-xr-xlib-python/2.7/test/bisect_cmd.py181
-rw-r--r--lib-python/2.7/test/crashers/warnings_del_crasher.py29
-rw-r--r--lib-python/2.7/test/dh1024.pem7
-rw-r--r--lib-python/2.7/test/ffdh3072.pem41
-rw-r--r--lib-python/2.7/test/fork_wait.py23
-rw-r--r--lib-python/2.7/test/inspect_fodder.py6
-rw-r--r--lib-python/2.7/test/keycert.passwd.pem85
-rw-r--r--lib-python/2.7/test/list_tests.py7
-rw-r--r--lib-python/2.7/test/lock_tests.py26
-rw-r--r--lib-python/2.7/test/make_ssl_certs.py8
-rw-r--r--lib-python/2.7/test/mapping_tests.py9
-rw-r--r--lib-python/2.7/test/multibytecodec_support.py (renamed from lib-python/2.7/test/test_multibytecodec_support.py)2
-rw-r--r--lib-python/2.7/test/pickletester.py43
-rw-r--r--lib-python/2.7/test/pycacert.pem139
-rw-r--r--lib-python/2.7/test/pythoninfo.py757
-rwxr-xr-xlib-python/2.7/test/regrtest.py936
-rw-r--r--lib-python/2.7/test/revocation.crl19
-rw-r--r--lib-python/2.7/test/script_helper.py171
-rw-r--r--lib-python/2.7/test/sha256.pem128
-rw-r--r--lib-python/2.7/test/ssl_cert.pem37
-rw-r--r--lib-python/2.7/test/ssl_key.passwd.pem52
-rw-r--r--lib-python/2.7/test/ssl_key.pem52
-rw-r--r--lib-python/2.7/test/ssl_servers.py5
-rw-r--r--lib-python/2.7/test/support/__init__.py2222
-rw-r--r--lib-python/2.7/test/support/script_helper.py170
-rw-r--r--lib-python/2.7/test/talos-2019-0758.pem22
-rw-r--r--lib-python/2.7/test/test_abc.py1
-rw-r--r--lib-python/2.7/test/test_aifc.py20
-rw-r--r--lib-python/2.7/test/test_argparse.py42
-rw-r--r--lib-python/2.7/test/test_ast.py9
-rw-r--r--lib-python/2.7/test/test_asyncore.py43
-rw-r--r--lib-python/2.7/test/test_atexit.py16
-rw-r--r--lib-python/2.7/test/test_audioop.py4
-rw-r--r--lib-python/2.7/test/test_bdb.py1034
-rw-r--r--lib-python/2.7/test/test_bisect.py1
-rw-r--r--lib-python/2.7/test/test_bsddb.py8
-rw-r--r--lib-python/2.7/test/test_bsddb3.py4
-rw-r--r--lib-python/2.7/test/test_buffer.py19
-rw-r--r--lib-python/2.7/test/test_builtin.py24
-rw-r--r--lib-python/2.7/test/test_bytes.py108
-rw-r--r--lib-python/2.7/test/test_capi.py36
-rw-r--r--lib-python/2.7/test/test_cgi.py55
-rw-r--r--lib-python/2.7/test/test_class.py43
-rw-r--r--lib-python/2.7/test/test_codecencodings_cn.py28
-rw-r--r--lib-python/2.7/test/test_codecencodings_hk.py6
-rw-r--r--lib-python/2.7/test/test_codecencodings_iso2022.py14
-rw-r--r--lib-python/2.7/test/test_codecencodings_jp.py22
-rw-r--r--lib-python/2.7/test/test_codecencodings_kr.py14
-rw-r--r--lib-python/2.7/test/test_codecencodings_tw.py6
-rw-r--r--lib-python/2.7/test/test_codecmaps_cn.py8
-rw-r--r--lib-python/2.7/test/test_codecmaps_hk.py4
-rw-r--r--lib-python/2.7/test/test_codecmaps_jp.py12
-rw-r--r--lib-python/2.7/test/test_codecmaps_kr.py8
-rw-r--r--lib-python/2.7/test/test_codecmaps_tw.py6
-rw-r--r--lib-python/2.7/test/test_codecs.py18
-rw-r--r--lib-python/2.7/test/test_compile.py2
-rw-r--r--lib-python/2.7/test/test_compiler.py2
-rw-r--r--lib-python/2.7/test/test_complex.py29
-rw-r--r--lib-python/2.7/test/test_cookielib.py74
-rw-r--r--lib-python/2.7/test/test_copy_reg.py10
-rw-r--r--lib-python/2.7/test/test_cprofile.py10
-rw-r--r--lib-python/2.7/test/test_crypt.py12
-rw-r--r--lib-python/2.7/test/test_csv.py9
-rw-r--r--lib-python/2.7/test/test_curses.py106
-rw-r--r--lib-python/2.7/test/test_datetime.py63
-rw-r--r--lib-python/2.7/test/test_decimal.py4
-rw-r--r--lib-python/2.7/test/test_deque.py15
-rw-r--r--lib-python/2.7/test/test_descr.py158
-rw-r--r--lib-python/2.7/test/test_dict.py100
-rw-r--r--lib-python/2.7/test/test_difflib.py22
-rw-r--r--lib-python/2.7/test/test_doctest.py49
-rw-r--r--lib-python/2.7/test/test_ensurepip.py19
-rw-r--r--lib-python/2.7/test/test_file.py168
-rw-r--r--lib-python/2.7/test/test_file2k.py27
-rw-r--r--lib-python/2.7/test/test_fileio.py8
-rw-r--r--lib-python/2.7/test/test_fnmatch.py115
-rw-r--r--lib-python/2.7/test/test_fpformat.py10
-rw-r--r--lib-python/2.7/test/test_ftplib.py32
-rw-r--r--lib-python/2.7/test/test_functools.py61
-rw-r--r--lib-python/2.7/test/test_gc.py4
-rw-r--r--lib-python/2.7/test/test_gdb.py106
-rw-r--r--lib-python/2.7/test/test_gdbm.py11
-rw-r--r--lib-python/2.7/test/test_generators.py11
-rw-r--r--lib-python/2.7/test/test_getargs2.py146
-rw-r--r--lib-python/2.7/test/test_glob.py6
-rw-r--r--lib-python/2.7/test/test_grammar.py341
-rw-r--r--lib-python/2.7/test/test_gzip.py35
-rw-r--r--lib-python/2.7/test/test_hashlib.py20
-rw-r--r--lib-python/2.7/test/test_httpservers.py9
-rw-r--r--lib-python/2.7/test/test_imaplib.py15
-rw-r--r--lib-python/2.7/test/test_import.py29
-rw-r--r--lib-python/2.7/test/test_import_magic.py60
-rw-r--r--lib-python/2.7/test/test_inspect.py62
-rw-r--r--lib-python/2.7/test/test_io.py99
-rw-r--r--lib-python/2.7/test/test_itertools.py143
-rw-r--r--lib-python/2.7/test/test_kqueue.py33
-rw-r--r--lib-python/2.7/test/test_linecache.py4
-rw-r--r--lib-python/2.7/test/test_locale.py2
-rw-r--r--lib-python/2.7/test/test_mailbox.py2
-rw-r--r--lib-python/2.7/test/test_marshal.py18
-rw-r--r--lib-python/2.7/test/test_memoryio.py1
-rw-r--r--lib-python/2.7/test/test_memoryview.py20
-rw-r--r--lib-python/2.7/test/test_minidom.py57
-rw-r--r--lib-python/2.7/test/test_mmap.py7
-rw-r--r--lib-python/2.7/test/test_msilib.py56
-rw-r--r--lib-python/2.7/test/test_multiprocessing.py224
-rw-r--r--lib-python/2.7/test/test_netrc.py10
-rw-r--r--lib-python/2.7/test/test_ordered_dict.py13
-rw-r--r--lib-python/2.7/test/test_os.py142
-rw-r--r--lib-python/2.7/test/test_parser.py104
-rw-r--r--lib-python/2.7/test/test_pkg.py2
-rw-r--r--lib-python/2.7/test/test_platform.py16
-rw-r--r--lib-python/2.7/test/test_poll.py22
-rw-r--r--lib-python/2.7/test/test_poplib.py10
-rw-r--r--lib-python/2.7/test/test_posix.py20
-rw-r--r--lib-python/2.7/test/test_posixpath.py72
-rw-r--r--lib-python/2.7/test/test_pty.py30
-rw-r--r--lib-python/2.7/test/test_py_compile.py2
-rw-r--r--lib-python/2.7/test/test_pydoc.py2
-rw-r--r--lib-python/2.7/test/test_random.py21
-rw-r--r--lib-python/2.7/test/test_re.py94
-rw-r--r--lib-python/2.7/test/test_regrtest.py833
-rw-r--r--lib-python/2.7/test/test_robotparser.py432
-rw-r--r--lib-python/2.7/test/test_sax.py18
-rw-r--r--lib-python/2.7/test/test_shutil.py7
-rw-r--r--lib-python/2.7/test/test_signal.py43
-rw-r--r--lib-python/2.7/test/test_site.py77
-rw-r--r--lib-python/2.7/test/test_smtplib.py4
-rw-r--r--lib-python/2.7/test/test_socket.py88
-rw-r--r--lib-python/2.7/test/test_socketserver.py24
-rw-r--r--lib-python/2.7/test/test_ssl.py289
-rw-r--r--lib-python/2.7/test/test_startfile.py27
-rw-r--r--lib-python/2.7/test/test_str.py110
-rw-r--r--lib-python/2.7/test/test_strftime.py6
-rw-r--r--lib-python/2.7/test/test_strop.py3
-rw-r--r--lib-python/2.7/test/test_strptime.py6
-rw-r--r--lib-python/2.7/test/test_subprocess.py88
-rw-r--r--lib-python/2.7/test/test_sundry.py1
-rw-r--r--lib-python/2.7/test/test_support.py1763
-rw-r--r--lib-python/2.7/test/test_syntax.py344
-rw-r--r--lib-python/2.7/test/test_sys.py16
-rw-r--r--lib-python/2.7/test/test_sys_settrace.py734
-rw-r--r--lib-python/2.7/test/test_sysconfig.py2
-rw-r--r--lib-python/2.7/test/test_tcl.py77
-rw-r--r--lib-python/2.7/test/test_tempfile.py13
-rw-r--r--lib-python/2.7/test/test_test_support.py465
-rw-r--r--lib-python/2.7/test/test_thread.py109
-rw-r--r--lib-python/2.7/test/test_threadsignals.py8
-rw-r--r--lib-python/2.7/test/test_time.py85
-rw-r--r--lib-python/2.7/test/test_timeout.py1
-rw-r--r--lib-python/2.7/test/test_tools.py34
-rw-r--r--lib-python/2.7/test/test_unicode.py8
-rw-r--r--lib-python/2.7/test/test_unicodedata.py13
-rw-r--r--lib-python/2.7/test/test_urllib.py59
-rw-r--r--lib-python/2.7/test/test_urllib2.py25
-rw-r--r--lib-python/2.7/test/test_urllib2_localnet.py149
-rw-r--r--lib-python/2.7/test/test_urllib2net.py31
-rw-r--r--lib-python/2.7/test/test_urllibnet.py67
-rw-r--r--lib-python/2.7/test/test_urlparse.py41
-rw-r--r--lib-python/2.7/test/test_uu.py125
-rw-r--r--lib-python/2.7/test/test_uuid.py57
-rw-r--r--lib-python/2.7/test/test_warnings.py44
-rw-r--r--lib-python/2.7/test/test_weakref.py57
-rw-r--r--lib-python/2.7/test/test_wsgiref.py82
-rw-r--r--lib-python/2.7/test/test_xml_etree.py4247
-rw-r--r--lib-python/2.7/test/test_xml_etree_c.py59
-rw-r--r--lib-python/2.7/test/test_zipfile.py14
-rw-r--r--lib-python/2.7/test/wrongcert.pem32
-rw-r--r--lib-python/2.7/test/xmltestdata/expat224_utf8_bug.xml2
-rw-r--r--lib-python/2.7/textwrap.py2
-rw-r--r--lib-python/2.7/threading.py3
-rwxr-xr-xlib-python/2.7/trace.py2
-rw-r--r--lib-python/2.7/unittest/case.py5
-rw-r--r--lib-python/2.7/unittest/loader.py8
-rw-r--r--lib-python/2.7/unittest/signals.py2
-rw-r--r--lib-python/2.7/unittest/test/test_loader.py8
-rw-r--r--lib-python/2.7/urlparse.py42
-rwxr-xr-xlib-python/2.7/uu.py7
-rw-r--r--lib-python/2.7/uuid.py34
-rw-r--r--lib-python/2.7/warnings.py8
-rw-r--r--lib-python/2.7/weakref.py35
-rwxr-xr-xlib-python/2.7/webbrowser.py7
-rw-r--r--lib-python/2.7/xml/dom/domreg.py4
-rw-r--r--lib-python/2.7/xml/dom/minidom.py4
-rw-r--r--lib-python/2.7/xml/etree/ElementTree.py20
-rw-r--r--lib-python/2.7/xml/sax/__init__.py2
-rw-r--r--lib-python/2.7/xml/sax/expatreader.py25
-rw-r--r--lib-python/2.7/zipfile.py58
-rw-r--r--lib-python/conftest.py4
-rw-r--r--lib-python/stdlib-upgrade.txt12
-rw-r--r--lib-python/stdlib-version.txt7
-rw-r--r--lib_pypy/_cffi_ssl/_stdssl/__init__.py4
-rw-r--r--lib_pypy/_cffi_ssl/_stdssl/certificate.py3
-rw-r--r--lib_pypy/_ctypes/array.py29
-rw-r--r--lib_pypy/_ctypes/basics.py7
-rw-r--r--lib_pypy/_ctypes/pointer.py22
-rw-r--r--lib_pypy/_ctypes/primitive.py25
-rw-r--r--lib_pypy/_ctypes/structure.py11
-rw-r--r--lib_pypy/_ctypes_test.c207
-rw-r--r--lib_pypy/_sqlite3.py3
-rw-r--r--lib_pypy/_testcapimodule.c264
-rw-r--r--lib_pypy/_tkinter/tclobj.py2
-rw-r--r--lib_pypy/datetime.py7
-rw-r--r--pypy/doc/whatsnew-head.rst6
-rw-r--r--pypy/interpreter/astcompiler/astbuilder.py2
-rw-r--r--pypy/interpreter/astcompiler/test/test_astbuilder.py6
-rw-r--r--pypy/interpreter/executioncontext.py4
-rw-r--r--pypy/interpreter/pyframe.py27
-rw-r--r--pypy/interpreter/test/test_typedef.py2
-rw-r--r--pypy/module/_io/interp_iobase.py19
-rw-r--r--pypy/module/_io/interp_textio.py9
-rw-r--r--pypy/module/_io/test/apptest_io.py14
-rw-r--r--pypy/module/_io/test/apptest_textio.py36
-rw-r--r--pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c28
-rw-r--r--pypy/module/_multibytecodec/test/test_c_codecs.py13
-rw-r--r--pypy/module/_random/interp_random.py4
-rw-r--r--pypy/module/_random/test/test_random.py12
-rw-r--r--pypy/module/_socket/test/test_sock_app.py12
-rw-r--r--pypy/module/cpyext/longobject.py2
-rw-r--r--pypy/module/cpyext/sliceobject.py7
-rw-r--r--pypy/module/cpyext/test/test_longobject.py10
-rw-r--r--pypy/module/cpyext/test/test_sliceobject.py4
-rw-r--r--pypy/module/itertools/interp_itertools.py10
-rw-r--r--pypy/module/itertools/test/test_itertools.py28
-rw-r--r--pypy/module/posix/interp_posix.py9
-rw-r--r--pypy/module/posix/test/test_posix2.py10
-rw-r--r--pypy/module/select/interp_kqueue.py10
-rw-r--r--pypy/module/select/test/test_kqueue.py26
-rw-r--r--pypy/module/signal/interp_signal.py9
-rw-r--r--pypy/module/test_lib_pypy/test_os_wait.py5
-rw-r--r--pypy/objspace/descroperation.py2
-rw-r--r--pypy/objspace/std/bytearrayobject.py5
-rw-r--r--pypy/objspace/std/test/test_bytearrayobject.py12
-rw-r--r--pypy/objspace/std/test/test_newformat.py14
-rw-r--r--pypy/objspace/std/typeobject.py2
-rw-r--r--pypy/objspace/test/apptest_descriptor.py9
-rw-r--r--rpython/rlib/rsre/rpy/sre_compile.py2
-rw-r--r--rpython/rlib/rsre/rpy/sre_parse.py64
411 files changed, 31633 insertions, 6823 deletions
diff --git a/extra_tests/ctypes_tests/test_buffers.py b/extra_tests/ctypes_tests/test_buffers.py
index 1e360a62b3..b862cb317c 100644
--- a/extra_tests/ctypes_tests/test_buffers.py
+++ b/extra_tests/ctypes_tests/test_buffers.py
@@ -45,6 +45,9 @@ def normalize(fmt):
else:
return fmt
+s_long = {4: 'l', 8: 'q'}[sizeof(c_long)]
+s_ulong = {4: 'L', 8: 'Q'}[sizeof(c_long)]
+
@pytest.mark.parametrize("tp, fmt", [
## simple types
(c_char, "<c"),
@@ -52,15 +55,15 @@ def normalize(fmt):
(c_ubyte, "<B"),
(c_short, "<h"),
(c_ushort, "<H"),
- (c_long, "<l"),
- (c_ulong, "<L"),
+ (c_long, "<" + s_long),
+ (c_ulong, "<" + s_ulong),
(c_float, "<f"),
(c_double, "<d"),
(c_bool, "<?"),
(py_object, "<O"),
## pointers
(POINTER(c_byte), "&<b"),
- (POINTER(POINTER(c_long)), "&&<l"),
+ (POINTER(POINTER(c_long)), "&&<" + s_long),
## arrays and pointers
(c_double * 4, "<d"),
(c_float * 4 * 3 * 2, "<f"),
diff --git a/extra_tests/test_os.py b/extra_tests/test_os.py
index 0f60f370b8..334590df21 100644
--- a/extra_tests/test_os.py
+++ b/extra_tests/test_os.py
@@ -62,11 +62,11 @@ if hasattr(os, "execv"):
pid = os.fork()
if pid == 0:
os.execve("/bin/sh",
- ["sh", "-c", "echo -n $ddd > /tmp/onefile2"],
+ ["sh", "-c", "echo $ddd > /tmp/onefile2"],
{'ddd': 'xxx'},
)
os.waitpid(pid, 0)
- assert open("/tmp/onefile2").read() == "xxx"
+ assert open("/tmp/onefile2").read().rstrip() == "xxx"
os.unlink("/tmp/onefile2")
def test_execve_unicode():
diff --git a/lib-python/2.7/HTMLParser.py b/lib-python/2.7/HTMLParser.py
index 3f97830a9a..fb9380e128 100644
--- a/lib-python/2.7/HTMLParser.py
+++ b/lib-python/2.7/HTMLParser.py
@@ -462,11 +462,12 @@ class HTMLParser(markupbase.ParserBase):
else:
# Cannot use name2codepoint directly, because HTMLParser supports apos,
# which is not part of HTML 4
- import htmlentitydefs
if HTMLParser.entitydefs is None:
- entitydefs = HTMLParser.entitydefs = {'apos':u"'"}
+ import htmlentitydefs
+ entitydefs = {'apos':u"'"}
for k, v in htmlentitydefs.name2codepoint.iteritems():
entitydefs[k] = unichr(v)
+ HTMLParser.entitydefs = entitydefs
try:
return self.entitydefs[s]
except KeyError:
diff --git a/lib-python/2.7/SocketServer.py b/lib-python/2.7/SocketServer.py
index 122430e362..df56830f05 100644
--- a/lib-python/2.7/SocketServer.py
+++ b/lib-python/2.7/SocketServer.py
@@ -229,6 +229,9 @@ class BaseServer:
# shutdown request and wastes cpu at all other times.
r, w, e = _eintr_retry(select.select, [self], [], [],
poll_interval)
+ # bpo-35017: shutdown() called during select(), exit immediately.
+ if self.__shutdown_request:
+ break
if self in r:
self._handle_request_noblock()
finally:
diff --git a/lib-python/2.7/_pyio.py b/lib-python/2.7/_pyio.py
index f022a4e88b..98c2d58d0d 100644
--- a/lib-python/2.7/_pyio.py
+++ b/lib-python/2.7/_pyio.py
@@ -1619,6 +1619,7 @@ class TextIOWrapper(TextIOBase):
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
+ self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
diff --git a/lib-python/2.7/_strptime.py b/lib-python/2.7/_strptime.py
index feac05a001..8eb2718d5c 100644
--- a/lib-python/2.7/_strptime.py
+++ b/lib-python/2.7/_strptime.py
@@ -254,8 +254,8 @@ class TimeRE(dict):
# format directives (%m, etc.).
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
- whitespace_replacement = re_compile('\s+')
- format = whitespace_replacement.sub('\s+', format)
+ whitespace_replacement = re_compile(r'\s+')
+ format = whitespace_replacement.sub(r'\\s+', format)
while '%' in format:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
diff --git a/lib-python/2.7/_threading_local.py b/lib-python/2.7/_threading_local.py
index 223987c55f..fc092e6a71 100644
--- a/lib-python/2.7/_threading_local.py
+++ b/lib-python/2.7/_threading_local.py
@@ -57,11 +57,7 @@ You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
- ... initialized = False
... def __init__(self, **kw):
- ... if self.initialized:
- ... raise SystemError('__init__ called too many times')
- ... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
@@ -98,7 +94,7 @@ As before, we can access the data in a separate thread:
>>> thread.start()
>>> thread.join()
>>> log
- [[('color', 'red'), ('initialized', True)], 11]
+ [[('color', 'red')], 11]
without affecting this thread's data:
diff --git a/lib-python/2.7/aifc.py b/lib-python/2.7/aifc.py
index c9a021ee9d..981f801069 100644
--- a/lib-python/2.7/aifc.py
+++ b/lib-python/2.7/aifc.py
@@ -288,6 +288,8 @@ class Aifc_read:
# _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
# _framesize -- size of one frame in the file
+ _file = None # Set here since __del__ checks it
+
def initfp(self, file):
self._version = 0
self._decomp = None
@@ -306,6 +308,7 @@ class Aifc_read:
else:
raise Error, 'not an AIFF or AIFF-C file'
self._comm_chunk_read = 0
+ self._ssnd_chunk = None
while 1:
self._ssnd_seek_needed = 1
try:
@@ -341,10 +344,16 @@ class Aifc_read:
self._decomp.SetParams(params)
def __init__(self, f):
- if type(f) == type(''):
+ if isinstance(f, basestring):
f = __builtin__.open(f, 'rb')
- # else, assume it is an open file object already
- self.initfp(f)
+ try:
+ self.initfp(f)
+ except:
+ f.close()
+ raise
+ else:
+ # assume it is an open file object already
+ self.initfp(f)
#
# User visible methods.
@@ -562,8 +571,10 @@ class Aifc_write:
# _datalength -- the size of the audio samples written to the header
# _datawritten -- the size of the audio samples actually written
+ _file = None # Set here since __del__ checks it
+
def __init__(self, f):
- if type(f) == type(''):
+ if isinstance(f, basestring):
filename = f
f = __builtin__.open(f, 'wb')
else:
diff --git a/lib-python/2.7/argparse.py b/lib-python/2.7/argparse.py
index d9f59e3d26..e0b86dc502 100644
--- a/lib-python/2.7/argparse.py
+++ b/lib-python/2.7/argparse.py
@@ -324,7 +324,11 @@ class HelpFormatter(object):
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
- part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
+ part_regexp = (
+ r'\(.*?\)+(?=\s|$)|'
+ r'\[.*?\]+(?=\s|$)|'
+ r'\S+'
+ )
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
diff --git a/lib-python/2.7/asynchat.py b/lib-python/2.7/asynchat.py
index 57459a0821..392ee61a45 100644
--- a/lib-python/2.7/asynchat.py
+++ b/lib-python/2.7/asynchat.py
@@ -133,7 +133,7 @@ class async_chat (asyncore.dispatcher):
# no terminator, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
- elif isinstance(terminator, int) or isinstance(terminator, long):
+ elif isinstance(terminator, (int, long)):
# numeric terminator
n = terminator
if lb < n:
diff --git a/lib-python/2.7/asyncore.py b/lib-python/2.7/asyncore.py
index 29099bdf5c..105982f790 100644
--- a/lib-python/2.7/asyncore.py
+++ b/lib-python/2.7/asyncore.py
@@ -633,7 +633,11 @@ if os.name == 'posix':
write = send
def close(self):
- os.close(self.fd)
+ if self.fd < 0:
+ return
+ fd = self.fd
+ self.fd = -1
+ os.close(fd)
def fileno(self):
return self.fd
diff --git a/lib-python/2.7/bsddb/test/test_associate.py b/lib-python/2.7/bsddb/test/test_associate.py
index 7a49e11ef7..4a8d2aed99 100644
--- a/lib-python/2.7/bsddb/test/test_associate.py
+++ b/lib-python/2.7/bsddb/test/test_associate.py
@@ -114,6 +114,22 @@ class AssociateErrorTestCase(unittest.TestCase):
dupDB.close()
self.fail("DBError exception was expected")
+ @unittest.skipUnless(db.version() >= (4, 6), 'Needs 4.6+')
+ def test_associateListError(self):
+ db1 = db.DB(self.env)
+ db1.open('bad.db', "a.db", db.DB_BTREE, db.DB_CREATE)
+ db2 = db.DB(self.env)
+ db2.open('bad.db', "b.db", db.DB_BTREE, db.DB_CREATE)
+
+ db1.associate(db2, lambda a, b: [0])
+
+ msg = "TypeError: The list returned by DB->associate callback" \
+ " should be a list of strings."
+ with test_support.captured_output("stderr") as s:
+ db1.put("0", "1")
+ db1.close()
+ db2.close()
+ self.assertEquals(s.getvalue().strip(), msg)
#----------------------------------------------------------------------
@@ -233,7 +249,7 @@ class AssociateTestCase(unittest.TestCase):
self.assertEqual(vals, None, vals)
vals = secDB.pget('Unknown', txn=txn)
- self.assertTrue(vals[0] == 99 or vals[0] == '99', vals)
+ self.assertIn(vals[0], (99, '99'), vals)
vals[1].index('Unknown')
vals[1].index('Unnamed')
vals[1].index('unknown')
@@ -247,7 +263,8 @@ class AssociateTestCase(unittest.TestCase):
if type(self.keytype) == type(''):
self.assertTrue(int(rec[0])) # for primary db, key is a number
else:
- self.assertTrue(rec[0] and type(rec[0]) == type(0))
+ self.assertTrue(rec[0])
+ self.assertIs(type(rec[0]), int)
count = count + 1
if verbose:
print rec
@@ -262,7 +279,7 @@ class AssociateTestCase(unittest.TestCase):
# test cursor pget
vals = self.cur.pget('Unknown', flags=db.DB_LAST)
- self.assertTrue(vals[1] == 99 or vals[1] == '99', vals)
+ self.assertIn(vals[1], (99, '99'), vals)
self.assertEqual(vals[0], 'Unknown')
vals[2].index('Unknown')
vals[2].index('Unnamed')
diff --git a/lib-python/2.7/bsddb/test/test_basics.py b/lib-python/2.7/bsddb/test/test_basics.py
index 1459d3636c..6b8cf7d110 100644
--- a/lib-python/2.7/bsddb/test/test_basics.py
+++ b/lib-python/2.7/bsddb/test/test_basics.py
@@ -597,7 +597,7 @@ class BasicTestCase(unittest.TestCase):
d.put("abcde", "ABCDE");
num = d.truncate()
- self.assertTrue(num >= 1, "truncate returned <= 0 on non-empty database")
+ self.assertGreaterEqual(num, 1, "truncate returned <= 0 on non-empty database")
num = d.truncate()
self.assertEqual(num, 0,
"truncate on empty DB returned nonzero (%r)" % (num,))
@@ -616,9 +616,9 @@ class BasicTestCase(unittest.TestCase):
if db.version() >= (4, 6):
def test08_exists(self) :
self.d.put("abcde", "ABCDE")
- self.assertTrue(self.d.exists("abcde") == True,
+ self.assertEqual(self.d.exists("abcde"), True,
"DB->exists() returns wrong value")
- self.assertTrue(self.d.exists("x") == False,
+ self.assertEqual(self.d.exists("x"), False,
"DB->exists() returns wrong value")
#----------------------------------------
@@ -773,7 +773,7 @@ class BasicTransactionTestCase(BasicTestCase):
if verbose:
print 'log file: ' + log
logs = self.env.log_archive(db.DB_ARCH_REMOVE)
- self.assertTrue(not logs)
+ self.assertFalse(logs)
self.txn = self.env.txn_begin()
@@ -785,9 +785,9 @@ class BasicTransactionTestCase(BasicTestCase):
self.d.put("abcde", "ABCDE", txn=txn)
txn.commit()
txn = self.env.txn_begin()
- self.assertTrue(self.d.exists("abcde", txn=txn) == True,
+ self.assertEqual(self.d.exists("abcde", txn=txn), True,
"DB->exists() returns wrong value")
- self.assertTrue(self.d.exists("x", txn=txn) == False,
+ self.assertEqual(self.d.exists("x", txn=txn), False,
"DB->exists() returns wrong value")
txn.abort()
@@ -802,7 +802,7 @@ class BasicTransactionTestCase(BasicTestCase):
d.put("abcde", "ABCDE");
txn = self.env.txn_begin()
num = d.truncate(txn)
- self.assertTrue(num >= 1, "truncate returned <= 0 on non-empty database")
+ self.assertGreaterEqual(num, 1, "truncate returned <= 0 on non-empty database")
num = d.truncate(txn)
self.assertEqual(num, 0,
"truncate on empty DB returned nonzero (%r)" % (num,))
@@ -1086,7 +1086,7 @@ class PrivateObject(unittest.TestCase) :
a = "example of private object"
self.obj.set_private(a)
b = self.obj.get_private()
- self.assertTrue(a is b) # Object identity
+ self.assertIs(a, b) # Object identity
def test03_leak_assignment(self) :
a = "example of private object"
diff --git a/lib-python/2.7/bsddb/test/test_dbenv.py b/lib-python/2.7/bsddb/test/test_dbenv.py
index 76ef7db69e..5429ca5809 100644
--- a/lib-python/2.7/bsddb/test/test_dbenv.py
+++ b/lib-python/2.7/bsddb/test/test_dbenv.py
@@ -54,15 +54,15 @@ class DBEnv_general(DBEnv) :
self.env.set_cache_max(0, size)
size2 = self.env.get_cache_max()
self.assertEqual(0, size2[0])
- self.assertTrue(size <= size2[1])
- self.assertTrue(2*size > size2[1])
+ self.assertLessEqual(size, size2[1])
+ self.assertGreater(2*size, size2[1])
if db.version() >= (4, 4) :
def test_mutex_stat(self) :
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
db.DB_INIT_LOCK)
stat = self.env.mutex_stat()
- self.assertTrue("mutex_inuse_max" in stat)
+ self.assertIn("mutex_inuse_max", stat)
def test_lg_filemode(self) :
for i in [0600, 0660, 0666] :
@@ -128,8 +128,8 @@ class DBEnv_general(DBEnv) :
i = i*1024*1024
self.env.set_lg_regionmax(i)
j = self.env.get_lg_regionmax()
- self.assertTrue(i <= j)
- self.assertTrue(2*i > j)
+ self.assertLessEqual(i, j)
+ self.assertGreater(2*i, j)
def test_lk_detect(self) :
flags= [db.DB_LOCK_DEFAULT, db.DB_LOCK_EXPIRE, db.DB_LOCK_MAXLOCKS,
@@ -150,10 +150,10 @@ class DBEnv_general(DBEnv) :
def test_lg_bsize(self) :
log_size = 70*1024
self.env.set_lg_bsize(log_size)
- self.assertTrue(self.env.get_lg_bsize() >= log_size)
- self.assertTrue(self.env.get_lg_bsize() < 4*log_size)
+ self.assertGreaterEqual(self.env.get_lg_bsize(), log_size)
+ self.assertLess(self.env.get_lg_bsize(), 4*log_size)
self.env.set_lg_bsize(4*log_size)
- self.assertTrue(self.env.get_lg_bsize() >= 4*log_size)
+ self.assertGreaterEqual(self.env.get_lg_bsize(), 4*log_size)
def test_setget_data_dirs(self) :
dirs = ("a", "b", "c", "d")
@@ -185,7 +185,7 @@ class DBEnv_general(DBEnv) :
self.assertEqual(cachesize2[0], cachesize3[0])
self.assertEqual(cachesize2[2], cachesize3[2])
# In Berkeley DB 5.1, the cachesize can change when opening the Env
- self.assertTrue(cachesize2[1] <= cachesize3[1])
+ self.assertLessEqual(cachesize2[1], cachesize3[1])
def test_set_cachesize_dbenv_db(self) :
# You can not configure the cachesize using
@@ -299,7 +299,7 @@ class DBEnv_log(DBEnv) :
msg = "This is a test..."
self.env.log_printf(msg)
logc = self.env.log_cursor()
- self.assertTrue(msg in (logc.last()[1]))
+ self.assertIn(msg, logc.last()[1])
if db.version() >= (4, 7) :
def test_log_config(self) :
@@ -341,21 +341,21 @@ class DBEnv_log_txn(DBEnv) :
txn.commit()
logc = self.env.log_cursor()
logc.last() # Skip the commit
- self.assertTrue(msg in (logc.prev()[1]))
+ self.assertIn(msg, logc.prev()[1])
msg = "This is another test..."
txn = self.env.txn_begin()
self.env.log_printf(msg, txn=txn)
txn.abort() # Do not store the new message
logc.last() # Skip the abort
- self.assertTrue(msg not in (logc.prev()[1]))
+ self.assertNotIn(msg, logc.prev()[1])
msg = "This is a third test..."
txn = self.env.txn_begin()
self.env.log_printf(msg, txn=txn)
txn.commit() # Do not store the new message
logc.last() # Skip the commit
- self.assertTrue(msg in (logc.prev()[1]))
+ self.assertIn(msg, logc.prev()[1])
class DBEnv_memp(DBEnv):
@@ -372,39 +372,39 @@ class DBEnv_memp(DBEnv):
def test_memp_1_trickle(self) :
self.db.put("hi", "bye")
- self.assertTrue(self.env.memp_trickle(100) > 0)
+ self.assertGreater(self.env.memp_trickle(100), 0)
# Preserve the order, do "memp_trickle" test first
def test_memp_2_sync(self) :
self.db.put("hi", "bye")
self.env.memp_sync() # Full flush
# Nothing to do...
- self.assertTrue(self.env.memp_trickle(100) == 0)
+ self.assertEqual(self.env.memp_trickle(100), 0)
self.db.put("hi", "bye2")
self.env.memp_sync((1, 0)) # NOP, probably
# Something to do... or not
- self.assertTrue(self.env.memp_trickle(100) >= 0)
+ self.assertGreaterEqual(self.env.memp_trickle(100), 0)
self.db.put("hi", "bye3")
self.env.memp_sync((123, 99)) # Full flush
# Nothing to do...
- self.assertTrue(self.env.memp_trickle(100) == 0)
+ self.assertEqual(self.env.memp_trickle(100), 0)
def test_memp_stat_1(self) :
stats = self.env.memp_stat() # No param
- self.assertTrue(len(stats)==2)
- self.assertTrue("cache_miss" in stats[0])
+ self.assertEqual(len(stats), 2)
+ self.assertIn("cache_miss", stats[0])
stats = self.env.memp_stat(db.DB_STAT_CLEAR) # Positional param
- self.assertTrue("cache_miss" in stats[0])
+ self.assertIn("cache_miss", stats[0])
stats = self.env.memp_stat(flags=0) # Keyword param
- self.assertTrue("cache_miss" in stats[0])
+ self.assertIn("cache_miss", stats[0])
def test_memp_stat_2(self) :
stats=self.env.memp_stat()[1]
- self.assertTrue(len(stats))==1
- self.assertTrue("test" in stats)
- self.assertTrue("page_in" in stats["test"])
+ self.assertEqual(len(stats), 1)
+ self.assertIn("test", stats)
+ self.assertIn("page_in", stats["test"])
class DBEnv_logcursor(DBEnv):
def setUp(self):
@@ -426,28 +426,28 @@ class DBEnv_logcursor(DBEnv):
DBEnv.tearDown(self)
def _check_return(self, value) :
- self.assertTrue(isinstance(value, tuple))
+ self.assertIsInstance(value, tuple)
self.assertEqual(len(value), 2)
- self.assertTrue(isinstance(value[0], tuple))
+ self.assertIsInstance(value[0], tuple)
self.assertEqual(len(value[0]), 2)
- self.assertTrue(isinstance(value[0][0], int))
- self.assertTrue(isinstance(value[0][1], int))
- self.assertTrue(isinstance(value[1], str))
+ self.assertIsInstance(value[0][0], int)
+ self.assertIsInstance(value[0][1], int)
+ self.assertIsInstance(value[1], str)
# Preserve test order
def test_1_first(self) :
logc = self.env.log_cursor()
v = logc.first()
self._check_return(v)
- self.assertTrue((1, 1) < v[0])
- self.assertTrue(len(v[1])>0)
+ self.assertLess((1, 1), v[0])
+ self.assertGreater(len(v[1]), 0)
def test_2_last(self) :
logc = self.env.log_cursor()
lsn_first = logc.first()[0]
v = logc.last()
self._check_return(v)
- self.assertTrue(lsn_first < v[0])
+ self.assertLess(lsn_first, v[0])
def test_3_next(self) :
logc = self.env.log_cursor()
@@ -456,16 +456,16 @@ class DBEnv_logcursor(DBEnv):
lsn_first = logc.first()[0]
v = logc.next()
self._check_return(v)
- self.assertTrue(lsn_first < v[0])
- self.assertTrue(lsn_last > v[0])
+ self.assertLess(lsn_first, v[0])
+ self.assertGreater(lsn_last, v[0])
v2 = logc.next()
- self.assertTrue(v2[0] > v[0])
- self.assertTrue(lsn_last > v2[0])
+ self.assertGreater(v2[0], v[0])
+ self.assertGreater(lsn_last, v2[0])
v3 = logc.next()
- self.assertTrue(v3[0] > v2[0])
- self.assertTrue(lsn_last > v3[0])
+ self.assertGreater(v3[0], v2[0])
+ self.assertGreater(lsn_last, v3[0])
def test_4_prev(self) :
logc = self.env.log_cursor()
@@ -474,16 +474,16 @@ class DBEnv_logcursor(DBEnv):
lsn_last = logc.last()[0]
v = logc.prev()
self._check_return(v)
- self.assertTrue(lsn_first < v[0])
- self.assertTrue(lsn_last > v[0])
+ self.assertLess(lsn_first, v[0])
+ self.assertGreater(lsn_last, v[0])
v2 = logc.prev()
- self.assertTrue(v2[0] < v[0])
- self.assertTrue(lsn_first < v2[0])
+ self.assertLess(v2[0], v[0])
+ self.assertLess(lsn_first, v2[0])
v3 = logc.prev()
- self.assertTrue(v3[0] < v2[0])
- self.assertTrue(lsn_first < v3[0])
+ self.assertLess(v3[0], v2[0])
+ self.assertLess(lsn_first, v3[0])
def test_5_current(self) :
logc = self.env.log_cursor()
diff --git a/lib-python/2.7/bsddb/test/test_dbshelve.py b/lib-python/2.7/bsddb/test/test_dbshelve.py
index e5609c5b47..6ec1e28a92 100644
--- a/lib-python/2.7/bsddb/test/test_dbshelve.py
+++ b/lib-python/2.7/bsddb/test/test_dbshelve.py
@@ -248,7 +248,7 @@ class DBShelveTestCase(unittest.TestCase):
self.assertEqual(value.L, [x] * 10)
else:
- self.assertTrue(0, 'Unknown key type, fix the test')
+ self.fail('Unknown key type, fix the test')
#----------------------------------------------------------------------
diff --git a/lib-python/2.7/bsddb/test/test_dbtables.py b/lib-python/2.7/bsddb/test/test_dbtables.py
index 250c4925fd..ad8565dbea 100644
--- a/lib-python/2.7/bsddb/test/test_dbtables.py
+++ b/lib-python/2.7/bsddb/test/test_dbtables.py
@@ -82,8 +82,8 @@ class TableDBTestCase(unittest.TestCase):
colval = pickle.loads(values[0][colname])
else :
colval = pickle.loads(bytes(values[0][colname], "iso8859-1"))
- self.assertTrue(colval > 3.141)
- self.assertTrue(colval < 3.142)
+ self.assertGreater(colval, 3.141)
+ self.assertLess(colval, 3.142)
def test02(self):
diff --git a/lib-python/2.7/bsddb/test/test_distributed_transactions.py b/lib-python/2.7/bsddb/test/test_distributed_transactions.py
index 9058575542..65ace9c401 100644
--- a/lib-python/2.7/bsddb/test/test_distributed_transactions.py
+++ b/lib-python/2.7/bsddb/test/test_distributed_transactions.py
@@ -79,7 +79,7 @@ class DBTxn_distributed(unittest.TestCase):
recovered_txns=self.dbenv.txn_recover()
self.assertEqual(self.num_txns,len(recovered_txns))
for gid,txn in recovered_txns :
- self.assertTrue(gid in txns)
+ self.assertIn(gid, txns)
del txn
del recovered_txns
@@ -122,7 +122,7 @@ class DBTxn_distributed(unittest.TestCase):
# Be sure there are not pending transactions.
# Check also database size.
recovered_txns=self.dbenv.txn_recover()
- self.assertTrue(len(recovered_txns)==0)
+ self.assertEqual(len(recovered_txns), 0)
self.assertEqual(len(committed_txns),self.db.stat()["nkeys"])
class DBTxn_distributedSYNC(DBTxn_distributed):
diff --git a/lib-python/2.7/bsddb/test/test_lock.py b/lib-python/2.7/bsddb/test/test_lock.py
index fd87ea2e74..22bf8cde7b 100644
--- a/lib-python/2.7/bsddb/test/test_lock.py
+++ b/lib-python/2.7/bsddb/test/test_lock.py
@@ -2,6 +2,7 @@
TestCases for testing the locking sub-system.
"""
+import sys
import time
import unittest
@@ -10,7 +11,6 @@ from test_all import db, test_support, verbose, have_threads, \
if have_threads :
from threading import Thread
- import sys
if sys.version_info[0] < 3 :
from threading import currentThread
else :
@@ -129,7 +129,14 @@ class LockingTestCase(unittest.TestCase):
end_time=time.time()
deadlock_detection.end=True
# Floating point rounding
- self.assertTrue((end_time-start_time) >= 0.0999)
+ if sys.platform == 'win32':
+ # bpo-30850: On Windows, tolerate 50 ms whereas 100 ms is expected.
+ # The lock sometimes times out after only 58 ms. Windows clocks
+ # have a bad resolution and bad accuracy.
+ min_dt = 0.050
+ else:
+ min_dt = 0.0999
+ self.assertGreaterEqual(end_time-start_time, min_dt)
self.env.lock_put(lock)
t.join()
@@ -137,7 +144,7 @@ class LockingTestCase(unittest.TestCase):
self.env.lock_id_free(anID2)
if db.version() >= (4,6):
- self.assertTrue(deadlock_detection.count>0)
+ self.assertGreater(deadlock_detection.count, 0)
def theThread(self, lockType):
import sys
diff --git a/lib-python/2.7/bsddb/test/test_misc.py b/lib-python/2.7/bsddb/test/test_misc.py
index b1e928f53b..e2ff2af184 100644
--- a/lib-python/2.7/bsddb/test/test_misc.py
+++ b/lib-python/2.7/bsddb/test/test_misc.py
@@ -25,7 +25,7 @@ class MiscTestCase(unittest.TestCase):
def test02_db_home(self):
env = db.DBEnv()
# check for crash fixed when db_home is used before open()
- self.assertTrue(env.db_home is None)
+ self.assertIsNone(env.db_home)
env.open(self.homeDir, db.DB_CREATE)
if sys.version_info[0] < 3 :
self.assertEqual(self.homeDir, env.db_home)
diff --git a/lib-python/2.7/bsddb/test/test_recno.py b/lib-python/2.7/bsddb/test/test_recno.py
index b0e30de673..10974200f3 100644
--- a/lib-python/2.7/bsddb/test/test_recno.py
+++ b/lib-python/2.7/bsddb/test/test_recno.py
@@ -18,7 +18,7 @@ class SimpleRecnoTestCase(unittest.TestCase):
def assertIsInstance(self, obj, datatype, msg=None) :
return self.assertEqual(type(obj), datatype, msg=msg)
def assertGreaterEqual(self, a, b, msg=None) :
- return self.assertTrue(a>=b, msg=msg)
+ return self.assertGreaterEqual(a, b, msg=msg)
def setUp(self):
diff --git a/lib-python/2.7/bsddb/test/test_replication.py b/lib-python/2.7/bsddb/test/test_replication.py
index 12ab2dd855..536d25d2fb 100644
--- a/lib-python/2.7/bsddb/test/test_replication.py
+++ b/lib-python/2.7/bsddb/test/test_replication.py
@@ -186,20 +186,18 @@ class DBReplicationManager(DBReplication) :
d = d.values()[0] # There is only one
self.assertEqual(d[0], "127.0.0.1")
self.assertEqual(d[1], client_port)
- self.assertTrue((d[2]==db.DB_REPMGR_CONNECTED) or \
- (d[2]==db.DB_REPMGR_DISCONNECTED))
+ self.assertIn(d[2], (db.DB_REPMGR_CONNECTED, db.DB_REPMGR_DISCONNECTED))
d = self.dbenvClient.repmgr_site_list()
self.assertEqual(len(d), 1)
d = d.values()[0] # There is only one
self.assertEqual(d[0], "127.0.0.1")
self.assertEqual(d[1], master_port)
- self.assertTrue((d[2]==db.DB_REPMGR_CONNECTED) or \
- (d[2]==db.DB_REPMGR_DISCONNECTED))
+ self.assertIn(d[2], (db.DB_REPMGR_CONNECTED, db.DB_REPMGR_DISCONNECTED))
if db.version() >= (4,6) :
d = self.dbenvMaster.repmgr_stat(flags=db.DB_STAT_CLEAR);
- self.assertTrue("msgs_queued" in d)
+ self.assertIn("msgs_queued", d)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
@@ -247,7 +245,7 @@ class DBReplicationManager(DBReplication) :
if time.time()>=timeout and startup_timeout:
self.skipTest("replication test skipped due to random failure, "
"see issue 3892")
- self.assertTrue(time.time()<timeout)
+ self.assertLess(time.time(), timeout)
self.assertEqual("123", v)
txn=self.dbenvMaster.txn_begin()
@@ -260,7 +258,7 @@ class DBReplicationManager(DBReplication) :
txn.commit()
if v is None :
time.sleep(0.02)
- self.assertTrue(time.time()<timeout)
+ self.assertLess(time.time(), timeout)
self.assertEqual(None, v)
class DBBaseReplication(DBReplication) :
@@ -381,7 +379,7 @@ class DBBaseReplication(DBReplication) :
while (time.time()<timeout) and not (self.confirmed_master and
self.client_startupdone) :
time.sleep(0.02)
- self.assertTrue(time.time()<timeout)
+ self.assertLess(time.time(), timeout)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
@@ -410,7 +408,7 @@ class DBBaseReplication(DBReplication) :
break
d = self.dbenvMaster.rep_stat(flags=db.DB_STAT_CLEAR);
- self.assertTrue("master_changes" in d)
+ self.assertIn("master_changes", d)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
@@ -424,7 +422,7 @@ class DBBaseReplication(DBReplication) :
txn.commit()
if v is None :
time.sleep(0.02)
- self.assertTrue(time.time()<timeout)
+ self.assertLess(time.time(), timeout)
self.assertEqual("123", v)
txn=self.dbenvMaster.txn_begin()
@@ -437,7 +435,7 @@ class DBBaseReplication(DBReplication) :
txn.commit()
if v is None :
time.sleep(0.02)
- self.assertTrue(time.time()<timeout)
+ self.assertLess(time.time(), timeout)
self.assertEqual(None, v)
if db.version() >= (4,7) :
diff --git a/lib-python/2.7/bsddb/test/test_sequence.py b/lib-python/2.7/bsddb/test/test_sequence.py
index f0aa12a8f2..763a9479d2 100644
--- a/lib-python/2.7/bsddb/test/test_sequence.py
+++ b/lib-python/2.7/bsddb/test/test_sequence.py
@@ -82,7 +82,7 @@ class DBSequenceTest(unittest.TestCase):
stat = self.seq.stat()
for param in ('nowait', 'min', 'max', 'value', 'current',
'flags', 'cache_size', 'last_value', 'wait'):
- self.assertTrue(param in stat, "parameter %s isn't in stat info" % param)
+ self.assertIn(param, stat, "parameter %s isn't in stat info" % param)
if db.version() >= (4,7) :
# This code checks a crash solved in Berkeley DB 4.7
diff --git a/lib-python/2.7/bsddb/test/test_thread.py b/lib-python/2.7/bsddb/test/test_thread.py
index 42212e9d9e..ce4963dbd3 100644
--- a/lib-python/2.7/bsddb/test/test_thread.py
+++ b/lib-python/2.7/bsddb/test/test_thread.py
@@ -85,7 +85,7 @@ class ConcurrentDataStoreBase(BaseThreadedTestCase):
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
- self.assertTrue((records_per_writer%readers_per_writer)==0)
+ self.assertEqual(records_per_writer%readers_per_writer, 0)
readers = []
for x in xrange(self.readers):
@@ -213,7 +213,7 @@ class SimpleThreadedBase(BaseThreadedTestCase):
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
- self.assertTrue((records_per_writer%readers_per_writer)==0)
+ self.assertEqual(records_per_writer%readers_per_writer, 0)
readers = []
for x in xrange(self.readers):
@@ -339,7 +339,7 @@ class ThreadedTransactionsBase(BaseThreadedTestCase):
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
- self.assertTrue((records_per_writer%readers_per_writer)==0)
+ self.assertEqual(records_per_writer%readers_per_writer, 0)
readers=[]
for x in xrange(self.readers):
diff --git a/lib-python/2.7/cProfile.py b/lib-python/2.7/cProfile.py
index 7cf3d6e54f..12ea6fe8b2 100755
--- a/lib-python/2.7/cProfile.py
+++ b/lib-python/2.7/cProfile.py
@@ -64,11 +64,11 @@ def help():
# ____________________________________________________________
class Profile(_lsprof.Profiler):
- """Profile(custom_timer=None, time_unit=None, subcalls=True, builtins=True)
+ """Profile(timer=None, timeunit=None, subcalls=True, builtins=True)
Builds a profiler object using the specified timer function.
The default timer is a fast built-in one based on real time.
- For custom timer functions returning integers, time_unit can
+ For custom timer functions returning integers, timeunit can
be a float specifying a scale (i.e. how long each integer unit
is, in seconds).
"""
@@ -161,7 +161,7 @@ def label(code):
# ____________________________________________________________
def main():
- import os, sys, types
+ import os, sys, pstats, types
from optparse import OptionParser
usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
parser = OptionParser(usage=usage)
@@ -170,7 +170,8 @@ def main():
help="Save stats to <outfile>", default=None)
parser.add_option('-s', '--sort', dest="sort",
help="Sort order when printing to stdout, based on pstats.Stats class",
- default=-1)
+ default=-1,
+ choices=sorted(pstats.Stats.sort_arg_dict_default))
if not sys.argv[1:]:
parser.print_usage()
diff --git a/lib-python/2.7/cgi.py b/lib-python/2.7/cgi.py
index 7c51b44db1..5b903e0347 100755
--- a/lib-python/2.7/cgi.py
+++ b/lib-python/2.7/cgi.py
@@ -184,11 +184,12 @@ def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
-def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
+def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urlparse.parse_qsl instead",
PendingDeprecationWarning, 2)
- return urlparse.parse_qsl(qs, keep_blank_values, strict_parsing)
+ return urlparse.parse_qsl(qs, keep_blank_values, strict_parsing,
+ max_num_fields)
def parse_multipart(fp, pdict):
"""Parse multipart input.
@@ -393,7 +394,8 @@ class FieldStorage:
"""
def __init__(self, fp=None, headers=None, outerboundary="",
- environ=os.environ, keep_blank_values=0, strict_parsing=0):
+ environ=os.environ, keep_blank_values=0, strict_parsing=0,
+ max_num_fields=None):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
@@ -420,10 +422,14 @@ class FieldStorage:
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
+ max_num_fields: int. If set, then __init__ throws a ValueError
+ if there are more than n fields read by parse_qsl().
+
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
+ self.max_num_fields = max_num_fields
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
@@ -606,10 +612,9 @@ class FieldStorage:
qs = self.fp.read(self.length)
if self.qs_on_post:
qs += '&' + self.qs_on_post
- self.list = list = []
- for key, value in urlparse.parse_qsl(qs, self.keep_blank_values,
- self.strict_parsing):
- list.append(MiniFieldStorage(key, value))
+ query = urlparse.parse_qsl(qs, self.keep_blank_values,
+ self.strict_parsing, self.max_num_fields)
+ self.list = [MiniFieldStorage(key, value) for key, value in query]
self.skip_lines()
FieldStorageClass = None
@@ -621,19 +626,38 @@ class FieldStorage:
raise ValueError, 'Invalid boundary in multipart form: %r' % (ib,)
self.list = []
if self.qs_on_post:
- for key, value in urlparse.parse_qsl(self.qs_on_post,
- self.keep_blank_values, self.strict_parsing):
- self.list.append(MiniFieldStorage(key, value))
+ query = urlparse.parse_qsl(self.qs_on_post,
+ self.keep_blank_values,
+ self.strict_parsing,
+ self.max_num_fields)
+ self.list.extend(MiniFieldStorage(key, value)
+ for key, value in query)
FieldStorageClass = None
+ # Propagate max_num_fields into the sub class appropriately
+ max_num_fields = self.max_num_fields
+ if max_num_fields is not None:
+ max_num_fields -= len(self.list)
+
klass = self.FieldStorageClass or self.__class__
part = klass(self.fp, {}, ib,
- environ, keep_blank_values, strict_parsing)
+ environ, keep_blank_values, strict_parsing,
+ max_num_fields)
+
# Throw first part away
while not part.done:
headers = rfc822.Message(self.fp)
part = klass(self.fp, headers, ib,
- environ, keep_blank_values, strict_parsing)
+ environ, keep_blank_values, strict_parsing,
+ max_num_fields)
+
+ if max_num_fields is not None:
+ max_num_fields -= 1
+ if part.list:
+ max_num_fields -= len(part.list)
+ if max_num_fields < 0:
+ raise ValueError('Max number of fields exceeded')
+
self.list.append(part)
self.skip_lines()
diff --git a/lib-python/2.7/cgitb.py b/lib-python/2.7/cgitb.py
index 8acc4b75fe..3689f8a4ba 100644
--- a/lib-python/2.7/cgitb.py
+++ b/lib-python/2.7/cgitb.py
@@ -125,7 +125,7 @@ function calls leading up to the error, in the order they occurred.</p>'''
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
- call = 'in ' + strong(func) + \
+ call = 'in ' + strong(pydoc.html.escape(func)) + \
inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.html.repr(value))
@@ -285,7 +285,7 @@ class Hook:
if self.display:
if plain:
- doc = doc.replace('&', '&amp;').replace('<', '&lt;')
+ doc = pydoc.html.escape(doc)
self.file.write('<pre>' + doc + '</pre>\n')
else:
self.file.write(doc + '\n')
diff --git a/lib-python/2.7/codecs.py b/lib-python/2.7/codecs.py
index 590238ec50..e120d636bc 100644
--- a/lib-python/2.7/codecs.py
+++ b/lib-python/2.7/codecs.py
@@ -472,15 +472,17 @@ class StreamReader(Codec):
self.charbuffer = "".join(self.linebuffer)
self.linebuffer = None
+ if chars < 0:
+ # For compatibility with other read() methods that take a
+ # single argument
+ chars = size
+
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars >= 0:
if len(self.charbuffer) >= chars:
break
- elif size >= 0:
- if len(self.charbuffer) >= size:
- break
# we need more data
if size < 0:
newdata = self.stream.read()
diff --git a/lib-python/2.7/compiler/pyassem.py b/lib-python/2.7/compiler/pyassem.py
index f52f7d079f..b82073e4d1 100644
--- a/lib-python/2.7/compiler/pyassem.py
+++ b/lib-python/2.7/compiler/pyassem.py
@@ -581,7 +581,7 @@ def getArgCount(args):
def twobyte(val):
"""Convert an int argument into high and low bytes"""
- assert isinstance(val, int)
+ assert isinstance(val, (int, long))
return divmod(val, 256)
class LineAddrTable:
diff --git a/lib-python/2.7/compiler/transformer.py b/lib-python/2.7/compiler/transformer.py
index d4f4613f48..ba5c03ce75 100644
--- a/lib-python/2.7/compiler/transformer.py
+++ b/lib-python/2.7/compiler/transformer.py
@@ -1526,7 +1526,7 @@ for k, v in token.tok_name.items():
def debug_tree(tree):
l = []
for elt in tree:
- if isinstance(elt, int):
+ if isinstance(elt, (int, long)):
l.append(_names.get(elt, elt))
elif isinstance(elt, str):
l.append(elt)
diff --git a/lib-python/2.7/cookielib.py b/lib-python/2.7/cookielib.py
index 2dd7c48728..e76d09d8a5 100644
--- a/lib-python/2.7/cookielib.py
+++ b/lib-python/2.7/cookielib.py
@@ -205,10 +205,14 @@ LOOSE_HTTP_DATE_RE = re.compile(
(?::(\d\d))? # optional seconds
)? # optional clock
\s*
- ([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
+ (?:
+ ([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+) # timezone
+ \s*
+ )?
+ (?:
+ \(\w+\) # ASCII representation of timezone in parens.
\s*
- (?:\(\w+\))? # ASCII representation of timezone in parens.
- \s*$""", re.X)
+ )?$""", re.X)
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
@@ -266,7 +270,7 @@ def http2time(text):
return _str2time(day, mon, yr, hr, min, sec, tz)
ISO_DATE_RE = re.compile(
- """^
+ r"""^
(\d{4}) # year
[-\/]?
(\d\d?) # numerical month
@@ -278,9 +282,11 @@ ISO_DATE_RE = re.compile(
(?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
)? # optional clock
\s*
- ([-+]?\d\d?:?(:?\d\d)?
- |Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
- \s*$""", re.X)
+ (?:
+ ([-+]?\d\d?:?(:?\d\d)?
+ |Z|z) # timezone (Z is "zero meridian", i.e. GMT)
+ \s*
+ )?$""", re.X)
def iso2time(text):
"""
As for http2time, but parses the ISO 8601 formats:
@@ -984,7 +990,7 @@ class DefaultCookiePolicy(CookiePolicy):
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
- not req_path.startswith(cookie.path)):
+ not self.path_return_ok(cookie.path, request)):
_debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
@@ -1139,6 +1145,11 @@ class DefaultCookiePolicy(CookiePolicy):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
+ if domain and not domain.startswith("."):
+ dotdomain = "." + domain
+ else:
+ dotdomain = domain
+
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
@@ -1151,7 +1162,7 @@ class DefaultCookiePolicy(CookiePolicy):
_debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
- if cookie.version == 0 and not ("."+erhn).endswith(domain):
+ if cookie.version == 0 and not ("."+erhn).endswith(dotdomain):
_debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
@@ -1165,7 +1176,11 @@ class DefaultCookiePolicy(CookiePolicy):
req_host = "."+req_host
if not erhn.startswith("."):
erhn = "."+erhn
- if not (req_host.endswith(domain) or erhn.endswith(domain)):
+ if domain and not domain.startswith("."):
+ dotdomain = "." + domain
+ else:
+ dotdomain = domain
+ if not (req_host.endswith(dotdomain) or erhn.endswith(dotdomain)):
#_debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
@@ -1182,11 +1197,15 @@ class DefaultCookiePolicy(CookiePolicy):
def path_return_ok(self, path, request):
_debug("- checking cookie path=%s", path)
req_path = request_path(request)
- if not req_path.startswith(path):
- _debug(" %s does not path-match %s", req_path, path)
- return False
- return True
+ pathlen = len(path)
+ if req_path == path:
+ return True
+ elif (req_path.startswith(path) and
+ (path.endswith("/") or req_path[pathlen:pathlen+1] == "/")):
+ return True
+ _debug(" %s does not path-match %s", req_path, path)
+ return False
def vals_sorted_by_key(adict):
keys = adict.keys()
diff --git a/lib-python/2.7/copy_reg.py b/lib-python/2.7/copy_reg.py
index db1715092c..8943077593 100644
--- a/lib-python/2.7/copy_reg.py
+++ b/lib-python/2.7/copy_reg.py
@@ -127,7 +127,11 @@ def _slotnames(cls):
continue
# mangled names
elif name.startswith('__') and not name.endswith('__'):
- names.append('_%s%s' % (c.__name__, name))
+ stripped = c.__name__.lstrip('_')
+ if stripped:
+ names.append('_%s%s' % (stripped, name))
+ else:
+ names.append(name)
else:
names.append(name)
diff --git a/lib-python/2.7/csv.py b/lib-python/2.7/csv.py
index c155ada794..70c53ae779 100644
--- a/lib-python/2.7/csv.py
+++ b/lib-python/2.7/csv.py
@@ -217,7 +217,7 @@ class Sniffer:
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
- '(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
+ '(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py
index 9f8f0f9f7a..ec87188bb5 100644
--- a/lib-python/2.7/ctypes/__init__.py
+++ b/lib-python/2.7/ctypes/__init__.py
@@ -343,6 +343,10 @@ class CDLL(object):
"""
_func_flags_ = _FUNCFLAG_CDECL
_func_restype_ = c_int
+ # default values for repr
+ _name = '<uninitialized>'
+ _handle = 0
+ _FuncPtr = None
def __init__(self, name, mode=DEFAULT_MODE, handle=None,
use_errno=False,
diff --git a/lib-python/2.7/ctypes/test/test_anon.py b/lib-python/2.7/ctypes/test/test_anon.py
index d892b59898..2c28b7f44a 100644
--- a/lib-python/2.7/ctypes/test/test_anon.py
+++ b/lib-python/2.7/ctypes/test/test_anon.py
@@ -1,4 +1,5 @@
import unittest
+from test.support import cpython_only
from ctypes import *
class AnonTest(unittest.TestCase):
@@ -35,6 +36,18 @@ class AnonTest(unittest.TestCase):
{"_fields_": [],
"_anonymous_": ["x"]}))
+ @cpython_only
+ def test_issue31490(self):
+ # There shouldn't be an assertion failure in case the class has an
+ # attribute whose name is specified in _anonymous_ but not in _fields_.
+
+ # AttributeError: 'x' is specified in _anonymous_ but not in _fields_
+ with self.assertRaises(AttributeError):
+ class Name(Structure):
+ _fields_ = []
+ _anonymous_ = ["x"]
+ x = 42
+
def test_nested(self):
class ANON_S(Structure):
_fields_ = [("a", c_int)]
diff --git a/lib-python/2.7/ctypes/test/test_arrays.py b/lib-python/2.7/ctypes/test/test_arrays.py
index c6cf3d939c..f2d621e930 100644
--- a/lib-python/2.7/ctypes/test/test_arrays.py
+++ b/lib-python/2.7/ctypes/test/test_arrays.py
@@ -1,4 +1,6 @@
import unittest
+from test.support import precisionbigmemtest, _2G
+import sys
from ctypes import *
from test.test_support import impl_detail
@@ -143,5 +145,31 @@ class ArrayTestCase(unittest.TestCase):
t2 = my_int * 1
self.assertIs(t1, t2)
+ def test_empty_element_struct(self):
+ class EmptyStruct(Structure):
+ _fields_ = []
+
+ obj = (EmptyStruct * 2)() # bpo37188: Floating point exception
+ self.assertEqual(sizeof(obj), 0)
+
+ def test_empty_element_array(self):
+ class EmptyArray(Array):
+ _type_ = c_int
+ _length_ = 0
+
+ obj = (EmptyArray * 2)() # bpo37188: Floating point exception
+ self.assertEqual(sizeof(obj), 0)
+
+ def test_bpo36504_signed_int_overflow(self):
+ # The overflow check in PyCArrayType_new() could cause signed integer
+ # overflow.
+ with self.assertRaises(OverflowError):
+ c_char * sys.maxsize * 2
+
+ @unittest.skipUnless(sys.maxsize > 2**32, 'requires 64bit platform')
+ @precisionbigmemtest(size=_2G, memuse=1, dry_run=False)
+ def test_large_array(self, size):
+ a = c_char * size
+
if __name__ == '__main__':
unittest.main()
diff --git a/lib-python/2.7/ctypes/test/test_as_parameter.py b/lib-python/2.7/ctypes/test/test_as_parameter.py
index f2fe10a955..de730e95bb 100644
--- a/lib-python/2.7/ctypes/test/test_as_parameter.py
+++ b/lib-python/2.7/ctypes/test/test_as_parameter.py
@@ -24,7 +24,7 @@ class BasicWrapTestCase(unittest.TestCase):
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(self.wrap(1), self.wrap(u"x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0))
self.assertEqual(result, 139)
- self.assertTrue(type(result), int)
+ self.assertIs(type(result), int)
def test_pointers(self):
f = dll._testfunc_p_p
diff --git a/lib-python/2.7/ctypes/test/test_callbacks.py b/lib-python/2.7/ctypes/test/test_callbacks.py
index 1363ddf925..3a108b79eb 100644
--- a/lib-python/2.7/ctypes/test/test_callbacks.py
+++ b/lib-python/2.7/ctypes/test/test_callbacks.py
@@ -252,6 +252,7 @@ class SampleCallbacksTestCase(unittest.TestCase):
def test_callback_large_struct(self):
class Check: pass
+ # This should mirror the structure in Modules/_ctypes/_ctypes_test.c
class X(Structure):
_fields_ = [
('first', c_ulong),
@@ -263,6 +264,11 @@ class SampleCallbacksTestCase(unittest.TestCase):
check.first = s.first
check.second = s.second
check.third = s.third
+ # See issue #29565.
+ # The structure should be passed by value, so
+ # any changes to it should not be reflected in
+ # the value passed
+ s.first = s.second = s.third = 0x0badf00d
check = Check()
s = X()
@@ -283,6 +289,11 @@ class SampleCallbacksTestCase(unittest.TestCase):
self.assertEqual(check.first, 0xdeadbeef)
self.assertEqual(check.second, 0xcafebabe)
self.assertEqual(check.third, 0x0bad1dea)
+ # See issue #29565.
+ # Ensure that the original struct is unchanged.
+ self.assertEqual(s.first, check.first)
+ self.assertEqual(s.second, check.second)
+ self.assertEqual(s.third, check.third)
################################################################
diff --git a/lib-python/2.7/ctypes/test/test_frombuffer.py b/lib-python/2.7/ctypes/test/test_frombuffer.py
index d708ed6906..99c32e095b 100644
--- a/lib-python/2.7/ctypes/test/test_frombuffer.py
+++ b/lib-python/2.7/ctypes/test/test_frombuffer.py
@@ -78,12 +78,21 @@ class Test(unittest.TestCase):
(c_int * 1).from_buffer_copy, a, 16 * sizeof(c_int))
def test_abstract(self):
+ from ctypes import _Pointer, _SimpleCData, _CFuncPtr
+
self.assertRaises(TypeError, Array.from_buffer, bytearray(10))
self.assertRaises(TypeError, Structure.from_buffer, bytearray(10))
self.assertRaises(TypeError, Union.from_buffer, bytearray(10))
+ self.assertRaises(TypeError, _CFuncPtr.from_buffer, bytearray(10))
+ self.assertRaises(TypeError, _Pointer.from_buffer, bytearray(10))
+ self.assertRaises(TypeError, _SimpleCData.from_buffer, bytearray(10))
+
self.assertRaises(TypeError, Array.from_buffer_copy, b"123")
self.assertRaises(TypeError, Structure.from_buffer_copy, b"123")
self.assertRaises(TypeError, Union.from_buffer_copy, b"123")
+ self.assertRaises(TypeError, _CFuncPtr.from_buffer_copy, b"123")
+ self.assertRaises(TypeError, _Pointer.from_buffer_copy, b"123")
+ self.assertRaises(TypeError, _SimpleCData.from_buffer_copy, b"123")
if __name__ == '__main__':
unittest.main()
diff --git a/lib-python/2.7/ctypes/test/test_funcptr.py b/lib-python/2.7/ctypes/test/test_funcptr.py
index 58cbb47af4..575030327e 100644
--- a/lib-python/2.7/ctypes/test/test_funcptr.py
+++ b/lib-python/2.7/ctypes/test/test_funcptr.py
@@ -123,5 +123,10 @@ class CFuncPtrTestCase(unittest.TestCase):
self.assertEqual(strtok(None, "\n"), "c")
self.assertEqual(strtok(None, "\n"), None)
+ def test_abstract(self):
+ from ctypes import _CFuncPtr
+
+ self.assertRaises(TypeError, _CFuncPtr, 13, "name", 42, "iid")
+
if __name__ == '__main__':
unittest.main()
diff --git a/lib-python/2.7/ctypes/test/test_loading.py b/lib-python/2.7/ctypes/test/test_loading.py
index 81a27e35df..e64fff7b03 100644
--- a/lib-python/2.7/ctypes/test/test_loading.py
+++ b/lib-python/2.7/ctypes/test/test_loading.py
@@ -3,6 +3,7 @@ import sys, unittest
import os
from ctypes.util import find_library
from ctypes.test import is_resource_enabled
+import test.test_support as support
libc_name = None
if os.name == "nt":
@@ -27,6 +28,12 @@ class LoaderTest(unittest.TestCase):
CDLL(os.path.basename(libc_name))
self.assertRaises(OSError, CDLL, self.unknowndll)
+ @support.requires_unicode
+ @unittest.skipUnless(libc_name is not None, 'could not find libc')
+ def test_load_unicode(self):
+ CDLL(unicode(libc_name))
+ self.assertRaises(OSError, CDLL, unicode(self.unknowndll))
+
@unittest.skipUnless(libc_name is not None, 'could not find libc')
@unittest.skipUnless(libc_name is not None and
os.path.basename(libc_name) == "libc.so.6",
diff --git a/lib-python/2.7/ctypes/test/test_parameters.py b/lib-python/2.7/ctypes/test/test_parameters.py
index 5d93761a3d..cd434811dd 100644
--- a/lib-python/2.7/ctypes/test/test_parameters.py
+++ b/lib-python/2.7/ctypes/test/test_parameters.py
@@ -1,5 +1,6 @@
import unittest, sys
from ctypes.test import need_symbol
+import test.support
from ctypes.test import xfail
@@ -180,6 +181,36 @@ class SimpleTypesTestCase(unittest.TestCase):
# ArgumentError: argument 1: ValueError: 99
self.assertRaises(ArgumentError, func, 99)
+ def test_abstract(self):
+ from ctypes import (Array, Structure, Union, _Pointer,
+ _SimpleCData, _CFuncPtr)
+
+ self.assertRaises(TypeError, Array.from_param, 42)
+ self.assertRaises(TypeError, Structure.from_param, 42)
+ self.assertRaises(TypeError, Union.from_param, 42)
+ self.assertRaises(TypeError, _CFuncPtr.from_param, 42)
+ self.assertRaises(TypeError, _Pointer.from_param, 42)
+ self.assertRaises(TypeError, _SimpleCData.from_param, 42)
+
+ @test.support.cpython_only
+ def test_issue31311(self):
+ # __setstate__ should neither raise a SystemError nor crash in case
+ # of a bad __dict__.
+ from ctypes import Structure
+
+ class BadStruct(Structure):
+ @property
+ def __dict__(self):
+ pass
+ with self.assertRaises(TypeError):
+ BadStruct().__setstate__({}, b'foo')
+
+ class WorseStruct(Structure):
+ @property
+ def __dict__(self):
+ 1/0.0
+ with self.assertRaises(ZeroDivisionError):
+ WorseStruct().__setstate__({}, b'foo')
################################################################
diff --git a/lib-python/2.7/ctypes/test/test_pep3118.py b/lib-python/2.7/ctypes/test/test_pep3118.py
index 90d2870305..c6e0f1119e 100644
--- a/lib-python/2.7/ctypes/test/test_pep3118.py
+++ b/lib-python/2.7/ctypes/test/test_pep3118.py
@@ -111,6 +111,34 @@ Complete._fields_ = [("a", c_long)]
# This table contains format strings as they look on little endian
# machines. The test replaces '<' with '>' on big endian machines.
#
+
+# Platform-specific type codes
+s_bool = {1: '?', 2: 'H', 4: 'L', 8: 'Q'}[sizeof(c_bool)]
+s_short = {2: 'h', 4: 'l', 8: 'q'}[sizeof(c_short)]
+s_ushort = {2: 'H', 4: 'L', 8: 'Q'}[sizeof(c_ushort)]
+s_int = {2: 'h', 4: 'i', 8: 'q'}[sizeof(c_int)]
+s_uint = {2: 'H', 4: 'I', 8: 'Q'}[sizeof(c_uint)]
+s_long = {4: 'l', 8: 'q'}[sizeof(c_long)]
+s_ulong = {4: 'L', 8: 'Q'}[sizeof(c_ulong)]
+s_longlong = "q"
+s_ulonglong = "Q"
+s_float = "f"
+s_double = "d"
+s_longdouble = "g"
+
+# Alias definitions in ctypes/__init__.py
+if c_int is c_long:
+ s_int = s_long
+if c_uint is c_ulong:
+ s_uint = s_ulong
+if c_longlong is c_long:
+ s_longlong = s_long
+if c_ulonglong is c_ulong:
+ s_ulonglong = s_ulong
+if c_longdouble is c_double:
+ s_longdouble = s_double
+
+
native_types = [
# type format shape calc itemsize
@@ -119,52 +147,51 @@ native_types = [
(c_char, "<c", None, c_char),
(c_byte, "<b", None, c_byte),
(c_ubyte, "<B", None, c_ubyte),
- (c_short, "<h", None, c_short),
- (c_ushort, "<H", None, c_ushort),
+ (c_short, "<" + s_short, None, c_short),
+ (c_ushort, "<" + s_ushort, None, c_ushort),
- # c_int and c_uint may be aliases to c_long
- #(c_int, "<i", None, c_int),
- #(c_uint, "<I", None, c_uint),
+ (c_int, "<" + s_int, None, c_int),
+ (c_uint, "<" + s_uint, None, c_uint),
- (c_long, "<l", None, c_long),
- (c_ulong, "<L", None, c_ulong),
+ (c_long, "<" + s_long, None, c_long),
+ (c_ulong, "<" + s_ulong, None, c_ulong),
- # c_longlong and c_ulonglong are aliases on 64-bit platforms
- #(c_longlong, "<q", None, c_longlong),
- #(c_ulonglong, "<Q", None, c_ulonglong),
+ (c_longlong, "<" + s_longlong, None, c_longlong),
+ (c_ulonglong, "<" + s_ulonglong, None, c_ulonglong),
(c_float, "<f", None, c_float),
(c_double, "<d", None, c_double),
- # c_longdouble may be an alias to c_double
- (c_bool, "<?", None, c_bool),
+ (c_longdouble, "<" + s_longdouble, None, c_longdouble),
+
+ (c_bool, "<" + s_bool, None, c_bool),
(py_object, "<O", None, py_object),
## pointers
(POINTER(c_byte), "&<b", None, POINTER(c_byte)),
- (POINTER(POINTER(c_long)), "&&<l", None, POINTER(POINTER(c_long))),
+ (POINTER(POINTER(c_long)), "&&<" + s_long, None, POINTER(POINTER(c_long))),
## arrays and pointers
(c_double * 4, "<d", (4,), c_double),
(c_float * 4 * 3 * 2, "<f", (2,3,4), c_float),
- (POINTER(c_short) * 2, "&<h", (2,), POINTER(c_short)),
- (POINTER(c_short) * 2 * 3, "&<h", (3,2,), POINTER(c_short)),
- (POINTER(c_short * 2), "&(2)<h", None, POINTER(c_short)),
+ (POINTER(c_short) * 2, "&<" + s_short, (2,), POINTER(c_short)),
+ (POINTER(c_short) * 2 * 3, "&<" + s_short, (3,2,), POINTER(c_short)),
+ (POINTER(c_short * 2), "&(2)<" + s_short, None, POINTER(c_short)),
## structures and unions
- (Point, "T{<l:x:<l:y:}", None, Point),
+ (Point, "T{<l:x:<l:y:}".replace('l', s_long), None, Point),
# packed structures do not implement the pep
- (PackedPoint, "B", None, PackedPoint),
- (Point2, "T{<l:x:<l:y:}", None, Point2),
- (EmptyStruct, "T{}", None, EmptyStruct),
+ (PackedPoint, "B", None, PackedPoint),
+ (Point2, "T{<l:x:<l:y:}".replace('l', s_long), None, Point2),
+ (EmptyStruct, "T{}", None, EmptyStruct),
# the pep does't support unions
- (aUnion, "B", None, aUnion),
+ (aUnion, "B", None, aUnion),
# structure with sub-arrays
- (StructWithArrays, "T{(2,3)<l:x:(4)T{<l:x:<l:y:}:y:}", None, StructWithArrays),
- (StructWithArrays * 3, "T{(2,3)<l:x:(4)T{<l:x:<l:y:}:y:}", (3,), StructWithArrays),
+ (StructWithArrays, "T{(2,3)<l:x:(4)T{<l:x:<l:y:}:y:}".replace('l', s_long), None, StructWithArrays),
+ (StructWithArrays * 3, "T{(2,3)<l:x:(4)T{<l:x:<l:y:}:y:}".replace('l', s_long), (3,), StructWithArrays),
## pointer to incomplete structure
## XXX: fix on PyPy
@@ -173,9 +200,9 @@ native_types = [
# 'Complete' is a structure that starts incomplete, but is completed after the
# pointer type to it has been created.
- (Complete, "T{<l:a:}", None, Complete),
+ (Complete, "T{<l:a:}".replace('l', s_long), None, Complete),
# This fails on CPython
- (POINTER(Complete), "&T{<l:a:}", None, POINTER(Complete)),
+ (POINTER(Complete), "&T{<l:a:}".replace('l', s_long), None, POINTER(Complete)),
## other
@@ -196,10 +223,10 @@ class LEPoint(LittleEndianStructure):
# and little endian machines.
#
endian_types = [
- (BEPoint, "T{>l:x:>l:y:}", None, BEPoint),
- (LEPoint, "T{<l:x:<l:y:}", None, LEPoint),
- (POINTER(BEPoint), "&T{>l:x:>l:y:}", None, POINTER(BEPoint)),
- (POINTER(LEPoint), "&T{<l:x:<l:y:}", None, POINTER(LEPoint)),
+ (BEPoint, "T{>l:x:>l:y:}".replace('l', s_long), None, BEPoint),
+ (LEPoint, "T{<l:x:<l:y:}".replace('l', s_long), None, LEPoint),
+ (POINTER(BEPoint), "&T{>l:x:>l:y:}".replace('l', s_long), None, POINTER(BEPoint)),
+ (POINTER(LEPoint), "&T{<l:x:<l:y:}".replace('l', s_long), None, POINTER(LEPoint)),
]
if __name__ == "__main__":
diff --git a/lib-python/2.7/ctypes/test/test_pointers.py b/lib-python/2.7/ctypes/test/test_pointers.py
index 24b0546c89..4a8887c1de 100644
--- a/lib-python/2.7/ctypes/test/test_pointers.py
+++ b/lib-python/2.7/ctypes/test/test_pointers.py
@@ -210,6 +210,11 @@ class PointersTestCase(unittest.TestCase):
from ctypes import _pointer_type_cache
del _pointer_type_cache[id(P)]
+ def test_abstract(self):
+ from ctypes import _Pointer
+
+ self.assertRaises(TypeError, _Pointer.set_type, 42)
+
if __name__ == '__main__':
unittest.main()
diff --git a/lib-python/2.7/ctypes/test/test_strings.py b/lib-python/2.7/ctypes/test/test_strings.py
index 6d14dcfe2c..442652022a 100644
--- a/lib-python/2.7/ctypes/test/test_strings.py
+++ b/lib-python/2.7/ctypes/test/test_strings.py
@@ -63,6 +63,13 @@ class StringArrayTestCase(unittest.TestCase):
## print BUF.from_param(c_char_p("python"))
## print BUF.from_param(BUF(*"pyth"))
+ def test_del_segfault(self):
+ BUF = c_char * 4
+ buf = BUF()
+ with self.assertRaises(AttributeError):
+ del buf.raw
+
+
@need_symbol('c_wchar')
class WStringArrayTestCase(unittest.TestCase):
def test(self):
diff --git a/lib-python/2.7/ctypes/test/test_struct_fields.py b/lib-python/2.7/ctypes/test/test_struct_fields.py
index 22eb3b0cd7..8045cc8267 100644
--- a/lib-python/2.7/ctypes/test/test_struct_fields.py
+++ b/lib-python/2.7/ctypes/test/test_struct_fields.py
@@ -46,5 +46,29 @@ class StructFieldsTestCase(unittest.TestCase):
Y._fields_ = []
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
+ # __set__ and __get__ should raise a TypeError in case their self
+ # argument is not a ctype instance.
+ def test___set__(self):
+ class MyCStruct(Structure):
+ _fields_ = (("field", c_int),)
+ self.assertRaises(TypeError,
+ MyCStruct.field.__set__, 'wrong type self', 42)
+
+ class MyCUnion(Union):
+ _fields_ = (("field", c_int),)
+ self.assertRaises(TypeError,
+ MyCUnion.field.__set__, 'wrong type self', 42)
+
+ def test___get__(self):
+ class MyCStruct(Structure):
+ _fields_ = (("field", c_int),)
+ self.assertRaises(TypeError,
+ MyCStruct.field.__get__, 'wrong type self', 42)
+
+ class MyCUnion(Union):
+ _fields_ = (("field", c_int),)
+ self.assertRaises(TypeError,
+ MyCUnion.field.__get__, 'wrong type self', 42)
+
if __name__ == "__main__":
unittest.main()
diff --git a/lib-python/2.7/ctypes/test/test_structures.py b/lib-python/2.7/ctypes/test/test_structures.py
index b66ce87f5b..04281203ab 100644
--- a/lib-python/2.7/ctypes/test/test_structures.py
+++ b/lib-python/2.7/ctypes/test/test_structures.py
@@ -3,6 +3,7 @@ from ctypes import *
from ctypes.test import need_symbol
from struct import calcsize
import _testcapi
+import _ctypes_test
class SubclassesTest(unittest.TestCase):
def test_subclass(self):
@@ -401,6 +402,28 @@ class StructureTestCase(unittest.TestCase):
(1, 0, 0, 0, 0, 0))
self.assertRaises(TypeError, lambda: Z(1, 2, 3, 4, 5, 6, 7))
+ def test_pass_by_value(self):
+ # This should mirror the structure in Modules/_ctypes/_ctypes_test.c
+ class X(Structure):
+ _fields_ = [
+ ('first', c_ulong),
+ ('second', c_ulong),
+ ('third', c_ulong),
+ ]
+
+ s = X()
+ s.first = 0xdeadbeef
+ s.second = 0xcafebabe
+ s.third = 0x0bad1dea
+ dll = CDLL(_ctypes_test.__file__)
+ func = dll._testfunc_large_struct_update_value
+ func.argtypes = (X,)
+ func.restype = None
+ func(s)
+ self.assertEqual(s.first, 0xdeadbeef)
+ self.assertEqual(s.second, 0xcafebabe)
+ self.assertEqual(s.third, 0x0bad1dea)
+
class PointerMemberTestCase(unittest.TestCase):
def test(self):
diff --git a/lib-python/2.7/ctypes/test/test_unicode.py b/lib-python/2.7/ctypes/test/test_unicode.py
index 1da5a25f25..ec5663a500 100644
--- a/lib-python/2.7/ctypes/test/test_unicode.py
+++ b/lib-python/2.7/ctypes/test/test_unicode.py
@@ -93,7 +93,7 @@ class StringTestCase(UnicodeTestCase):
func.argtypes = None
func.restype = ctypes.c_int
- def test_ascii_replace(self):
+ def test_ascii_strict(self):
func = self.func
ctypes.set_conversion_mode("ascii", "strict")
self.assertEqual(func("abc"), "abc")
diff --git a/lib-python/2.7/ctypes/test/test_win32.py b/lib-python/2.7/ctypes/test/test_win32.py
index d22e139a3f..13a986359f 100644
--- a/lib-python/2.7/ctypes/test/test_win32.py
+++ b/lib-python/2.7/ctypes/test/test_win32.py
@@ -53,6 +53,24 @@ class FunctionCallTestCase(unittest.TestCase):
windll.user32.GetDesktopWindow()
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
+class ReturnStructSizesTestCase(unittest.TestCase):
+ def test_sizes(self):
+ dll = CDLL(_ctypes_test.__file__)
+ for i in range(1, 11):
+ fields = [ ("f%d" % f, c_char) for f in range(1, i + 1)]
+ class S(Structure):
+ _fields_ = fields
+ f = getattr(dll, "TestSize%d" % i)
+ f.restype = S
+ res = f()
+ for i, f in enumerate(fields):
+ value = getattr(res, f[0])
+ expected = chr(ord('a') + i)
+ self.assertEquals(value, expected)
+
+
+
+@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
class TestWintypes(unittest.TestCase):
def test_HWND(self):
from ctypes import wintypes
diff --git a/lib-python/2.7/curses/ascii.py b/lib-python/2.7/curses/ascii.py
index 6a466e0078..5b243be681 100644
--- a/lib-python/2.7/curses/ascii.py
+++ b/lib-python/2.7/curses/ascii.py
@@ -53,19 +53,19 @@ def _ctoi(c):
def isalnum(c): return isalpha(c) or isdigit(c)
def isalpha(c): return isupper(c) or islower(c)
-def isascii(c): return _ctoi(c) <= 127 # ?
+def isascii(c): return 0 <= _ctoi(c) <= 127 # ?
def isblank(c): return _ctoi(c) in (9, 32)
-def iscntrl(c): return _ctoi(c) <= 31 or _ctoi(c) == 127
-def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57
-def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126
-def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122
-def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126
+def iscntrl(c): return 0 <= _ctoi(c) <= 31 or _ctoi(c) == 127
+def isdigit(c): return 48 <= _ctoi(c) <= 57
+def isgraph(c): return 33 <= _ctoi(c) <= 126
+def islower(c): return 97 <= _ctoi(c) <= 122
+def isprint(c): return 32 <= _ctoi(c) <= 126
def ispunct(c): return isgraph(c) and not isalnum(c)
def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32)
-def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90
+def isupper(c): return 65 <= _ctoi(c) <= 90
def isxdigit(c): return isdigit(c) or \
- (_ctoi(c) >= 65 and _ctoi(c) <= 70) or (_ctoi(c) >= 97 and _ctoi(c) <= 102)
-def isctrl(c): return _ctoi(c) < 32
+ (65 <= _ctoi(c) <= 70) or (97 <= _ctoi(c) <= 102)
+def isctrl(c): return 0 <= _ctoi(c) < 32
def ismeta(c): return _ctoi(c) > 127
def ascii(c):
diff --git a/lib-python/2.7/curses/has_key.py b/lib-python/2.7/curses/has_key.py
index 1dd5a3bd4a..60b7be9942 100644
--- a/lib-python/2.7/curses/has_key.py
+++ b/lib-python/2.7/curses/has_key.py
@@ -182,7 +182,7 @@ if __name__ == '__main__':
L = []
_curses.initscr()
for key in _capability_names.keys():
- system = key in _curses
+ system = _curses.has_key(key)
python = has_key(key)
if system != python:
L.append( 'Mismatch for key %s, system=%i, Python=%i'
diff --git a/lib-python/2.7/curses/textpad.py b/lib-python/2.7/curses/textpad.py
index c45361c7d2..b50c03a22d 100644
--- a/lib-python/2.7/curses/textpad.py
+++ b/lib-python/2.7/curses/textpad.py
@@ -43,16 +43,20 @@ class Textbox:
def __init__(self, win, insert_mode=False):
self.win = win
self.insert_mode = insert_mode
- (self.maxy, self.maxx) = win.getmaxyx()
- self.maxy = self.maxy - 1
- self.maxx = self.maxx - 1
+ self._update_max_yx()
self.stripspaces = 1
self.lastcmd = None
win.keypad(1)
+ def _update_max_yx(self):
+ maxy, maxx = self.win.getmaxyx()
+ self.maxy = maxy - 1
+ self.maxx = maxx - 1
+
def _end_of_line(self, y):
"""Go to the location of the first blank on the given line,
returning the index of the last non-blank character."""
+ self._update_max_yx()
last = self.maxx
while True:
if curses.ascii.ascii(self.win.inch(y, last)) != curses.ascii.SP:
@@ -64,8 +68,10 @@ class Textbox:
return last
def _insert_printable_char(self, ch):
+ self._update_max_yx()
(y, x) = self.win.getyx()
- if y < self.maxy or x < self.maxx:
+ backyx = None
+ while y < self.maxy or x < self.maxx:
if self.insert_mode:
oldch = self.win.inch()
# The try-catch ignores the error we trigger from some curses
@@ -75,14 +81,20 @@ class Textbox:
self.win.addch(ch)
except curses.error:
pass
- if self.insert_mode:
- (backy, backx) = self.win.getyx()
- if curses.ascii.isprint(oldch):
- self._insert_printable_char(oldch)
- self.win.move(backy, backx)
+ if not self.insert_mode or not curses.ascii.isprint(oldch):
+ break
+ ch = oldch
+ (y, x) = self.win.getyx()
+ # Remember where to put the cursor back since we are in insert_mode
+ if backyx is None:
+ backyx = y, x
+
+ if backyx is not None:
+ self.win.move(*backyx)
def do_command(self, ch):
"Process a single editing command."
+ self._update_max_yx()
(y, x) = self.win.getyx()
self.lastcmd = ch
if curses.ascii.isprint(ch):
@@ -148,6 +160,7 @@ class Textbox:
def gather(self):
"Collect and return the contents of the window."
result = ""
+ self._update_max_yx()
for y in range(self.maxy+1):
self.win.move(y, 0)
stop = self._end_of_line(y)
diff --git a/lib-python/2.7/decimal.py b/lib-python/2.7/decimal.py
index e5329dde49..220fa57ac5 100644
--- a/lib-python/2.7/decimal.py
+++ b/lib-python/2.7/decimal.py
@@ -1909,7 +1909,7 @@ class Decimal(object):
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
- 'and 2nd argument must be nonzero ;'
+ 'and 2nd argument must be nonzero; '
'0**0 is not defined')
# compute sign of result
diff --git a/lib-python/2.7/difflib.py b/lib-python/2.7/difflib.py
index 1c6fbdbedc..788a92df3f 100644
--- a/lib-python/2.7/difflib.py
+++ b/lib-python/2.7/difflib.py
@@ -1103,7 +1103,7 @@ class Differ:
import re
-def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
+def IS_LINE_JUNK(line, pat=re.compile(r"\s*(?:#\s*)?$").match):
r"""
Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
diff --git a/lib-python/2.7/distutils/archive_util.py b/lib-python/2.7/distutils/archive_util.py
index 834b722ed3..19a3bc4668 100644
--- a/lib-python/2.7/distutils/archive_util.py
+++ b/lib-python/2.7/distutils/archive_util.py
@@ -162,7 +162,15 @@ def make_zipfile(base_name, base_dir, verbose=0, dry_run=0):
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
+ if base_dir != os.curdir:
+ path = os.path.normpath(os.path.join(base_dir, ''))
+ zip.write(path, path)
+ log.info("adding '%s'", path)
for dirpath, dirnames, filenames in os.walk(base_dir):
+ for name in dirnames:
+ path = os.path.normpath(os.path.join(dirpath, name, ''))
+ zip.write(path, path)
+ log.info("adding '%s'", path)
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
diff --git a/lib-python/2.7/distutils/ccompiler.py b/lib-python/2.7/distutils/ccompiler.py
index 62506a6e6f..3a7b5b84e1 100644
--- a/lib-python/2.7/distutils/ccompiler.py
+++ b/lib-python/2.7/distutils/ccompiler.py
@@ -160,7 +160,7 @@ class CCompiler:
self.set_executable(key, args[key])
def set_executable(self, key, value):
- if isinstance(value, str):
+ if isinstance(value, basestring):
setattr(self, key, split_quoted(value))
else:
setattr(self, key, value)
@@ -748,8 +748,9 @@ class CCompiler:
for incl in includes:
f.write("""#include "%s"\n""" % incl)
f.write("""\
-main (int argc, char **argv) {
+int main (int argc, char **argv) {
%s();
+ return 0;
}
""" % funcname)
finally:
diff --git a/lib-python/2.7/distutils/command/bdist_dumb.py b/lib-python/2.7/distutils/command/bdist_dumb.py
index 2f3c66829a..d8e023dd05 100644
--- a/lib-python/2.7/distutils/command/bdist_dumb.py
+++ b/lib-python/2.7/distutils/command/bdist_dumb.py
@@ -35,7 +35,7 @@ class bdist_dumb (Command):
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
- "build the archive using relative paths"
+ "build the archive using relative paths "
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
diff --git a/lib-python/2.7/distutils/command/bdist_msi.py b/lib-python/2.7/distutils/command/bdist_msi.py
index 703f873b16..d2401bc473 100644
--- a/lib-python/2.7/distutils/command/bdist_msi.py
+++ b/lib-python/2.7/distutils/command/bdist_msi.py
@@ -99,14 +99,14 @@ class bdist_msi (Command):
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
- "do not compile .py to .pyo (optimized)"
+ "do not compile .py to .pyo (optimized) "
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
- "basename of installation script to be run after"
+ "basename of installation script to be run after "
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
diff --git a/lib-python/2.7/distutils/command/bdist_rpm.py b/lib-python/2.7/distutils/command/bdist_rpm.py
index 477e0ee064..caadf489a9 100644
--- a/lib-python/2.7/distutils/command/bdist_rpm.py
+++ b/lib-python/2.7/distutils/command/bdist_rpm.py
@@ -63,7 +63,7 @@ class bdist_rpm (Command):
"RPM \"vendor\" (eg. \"Joe Blow <joe@example.com>\") "
"[default: maintainer or author from setup script]"),
('packager=', None,
- "RPM packager (eg. \"Jane Doe <jane@example.net>\")"
+ "RPM packager (eg. \"Jane Doe <jane@example.net>\") "
"[default: vendor]"),
('doc-files=', None,
"list of documentation files (space or comma-separated)"),
diff --git a/lib-python/2.7/distutils/command/bdist_wininst.py b/lib-python/2.7/distutils/command/bdist_wininst.py
index 9bd77aa5d5..0c01e9d85d 100644
--- a/lib-python/2.7/distutils/command/bdist_wininst.py
+++ b/lib-python/2.7/distutils/command/bdist_wininst.py
@@ -35,7 +35,7 @@ class bdist_wininst (Command):
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
- "do not compile .py to .pyo (optimized)"
+ "do not compile .py to .pyo (optimized) "
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
@@ -46,7 +46,7 @@ class bdist_wininst (Command):
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
- "basename of installation script to be run after"
+ "basename of installation script to be run after "
"installation or before deinstallation"),
('pre-install-script=', None,
"Fully qualified filename of a script to be run before "
diff --git a/lib-python/2.7/distutils/command/build.py b/lib-python/2.7/distutils/command/build.py
index f84bf359dc..2360091a23 100644
--- a/lib-python/2.7/distutils/command/build.py
+++ b/lib-python/2.7/distutils/command/build.py
@@ -114,7 +114,7 @@ class build(Command):
self.build_scripts = os.path.join(self.build_base,
'scripts-' + sys.version[0:3])
- if self.executable is None:
+ if self.executable is None and sys.executable:
self.executable = os.path.normpath(sys.executable)
def run(self):
diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py
index 4f4294167e..d3b5c37d51 100644
--- a/lib-python/2.7/distutils/command/build_ext.py
+++ b/lib-python/2.7/distutils/command/build_ext.py
@@ -376,7 +376,7 @@ class build_ext (Command):
ext_name, build_info = ext
log.warn(("old-style (ext_name, build_info) tuple found in "
- "ext_modules for extension '%s'"
+ "ext_modules for extension '%s' "
"-- please convert to Extension instance" % ext_name))
if not (isinstance(ext_name, str) and
diff --git a/lib-python/2.7/distutils/command/check.py b/lib-python/2.7/distutils/command/check.py
index 4ea03d3034..fc7db527b9 100644
--- a/lib-python/2.7/distutils/command/check.py
+++ b/lib-python/2.7/distutils/command/check.py
@@ -124,7 +124,8 @@ class check(Command):
def _check_rst_data(self, data):
"""Returns warnings when the provided data doesn't compile."""
- source_path = StringIO()
+ # the include and csv_table directives need this to be a path
+ source_path = self.distribution.script_name or 'setup.py'
parser = Parser()
settings = frontend.OptionParser(components=(Parser,)).get_default_values()
settings.tab_width = 4
diff --git a/lib-python/2.7/distutils/command/upload.py b/lib-python/2.7/distutils/command/upload.py
index b773f47f76..ff043d2e12 100644
--- a/lib-python/2.7/distutils/command/upload.py
+++ b/lib-python/2.7/distutils/command/upload.py
@@ -55,7 +55,9 @@ class upload(PyPIRCCommand):
def run(self):
if not self.distribution.dist_files:
- raise DistutilsOptionError("No dist file created in earlier command")
+ msg = ("Must create and upload files in one command "
+ "(e.g. setup.py sdist upload)")
+ raise DistutilsOptionError(msg)
for command, pyversion, filename in self.distribution.dist_files:
self.upload_file(command, pyversion, filename)
@@ -155,8 +157,6 @@ class upload(PyPIRCCommand):
body.write(fn)
body.write("\r\n\r\n")
body.write(value)
- if value and value[-1] == '\r':
- body.write('\n') # write an extra newline (lurve Macs)
body.write(end_boundary)
body = body.getvalue()
diff --git a/lib-python/2.7/distutils/spawn.py b/lib-python/2.7/distutils/spawn.py
index d632d8a78f..6a8df1b9de 100644
--- a/lib-python/2.7/distutils/spawn.py
+++ b/lib-python/2.7/distutils/spawn.py
@@ -208,7 +208,8 @@ def find_executable(executable, path=None):
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
- path = os.environ['PATH']
+ path = os.environ.get('PATH', os.defpath)
+
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
diff --git a/lib-python/2.7/distutils/sysconfig_cpython.py b/lib-python/2.7/distutils/sysconfig_cpython.py
index 35f39e418a..6a14f704c7 100644
--- a/lib-python/2.7/distutils/sysconfig_cpython.py
+++ b/lib-python/2.7/distutils/sysconfig_cpython.py
@@ -25,7 +25,12 @@ EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
# Path to the base directory of the project. On Windows the binary may
# live in project/PCBuild9. If we're dealing with an x64 Windows build,
# it'll live in project/PCbuild/amd64.
-project_base = os.path.dirname(os.path.abspath(sys.executable))
+if sys.executable:
+ project_base = os.path.dirname(os.path.abspath(sys.executable))
+else:
+ # sys.executable can be empty if argv[0] has been changed and Python is
+ # unable to retrieve the real program name
+ project_base = os.getcwd()
if os.name == "nt" and "pcbuild" in project_base[-8:].lower():
project_base = os.path.abspath(os.path.join(project_base, os.path.pardir))
# PC/VS7.1
@@ -79,7 +84,12 @@ def get_python_inc(plat_specific=0, prefix=None):
if os.name == "posix":
if python_build:
- buildir = os.path.dirname(sys.executable)
+ if sys.executable:
+ buildir = os.path.dirname(sys.executable)
+ else:
+ # sys.executable can be empty if argv[0] has been changed
+ # and Python is unable to retrieve the real program name
+ buildir = os.getcwd()
if plat_specific:
# python.h is located in the buildir
inc_dir = buildir
@@ -171,8 +181,8 @@ def customize_compiler(compiler):
_osx_support.customize_compiler(_config_vars)
_config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
- (cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \
- get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
+ (cc, cxx, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \
+ get_config_vars('CC', 'CXX', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SO', 'AR',
'ARFLAGS')
@@ -196,7 +206,7 @@ def customize_compiler(compiler):
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
- cflags = opt + ' ' + os.environ['CFLAGS']
+ cflags = cflags + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
diff --git a/lib-python/2.7/distutils/tests/includetest.rst b/lib-python/2.7/distutils/tests/includetest.rst
new file mode 100644
index 0000000000..d7b4ae38b0
--- /dev/null
+++ b/lib-python/2.7/distutils/tests/includetest.rst
@@ -0,0 +1 @@
+This should be included.
diff --git a/lib-python/2.7/distutils/tests/test_archive_util.py b/lib-python/2.7/distutils/tests/test_archive_util.py
index ed7c2cea69..137100cca8 100644
--- a/lib-python/2.7/distutils/tests/test_archive_util.py
+++ b/lib-python/2.7/distutils/tests/test_archive_util.py
@@ -98,7 +98,7 @@ class ArchiveUtilTestCase(support.TempdirManager,
try:
names = tar.getnames()
names.sort()
- return tuple(names)
+ return names
finally:
tar.close()
diff --git a/lib-python/2.7/distutils/tests/test_bdist_dumb.py b/lib-python/2.7/distutils/tests/test_bdist_dumb.py
index 5db3a850f8..ef9e68131b 100644
--- a/lib-python/2.7/distutils/tests/test_bdist_dumb.py
+++ b/lib-python/2.7/distutils/tests/test_bdist_dumb.py
@@ -86,7 +86,7 @@ class BuildDumbTestCase(support.TempdirManager,
finally:
fp.close()
- contents = sorted(os.path.basename(fn) for fn in contents)
+ contents = sorted(filter(None, map(os.path.basename, contents)))
wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], 'foo.py']
if not sys.dont_write_bytecode:
wanted.append('foo.pyc')
diff --git a/lib-python/2.7/distutils/tests/test_bdist_rpm.py b/lib-python/2.7/distutils/tests/test_bdist_rpm.py
index 475a8277cb..8248c08b19 100644
--- a/lib-python/2.7/distutils/tests/test_bdist_rpm.py
+++ b/lib-python/2.7/distutils/tests/test_bdist_rpm.py
@@ -99,7 +99,7 @@ class BuildRpmTestCase(support.TempdirManager,
@unittest.skipIf(find_executable('rpmbuild') is None,
'the rpmbuild command is not found')
def test_no_optimize_flag(self):
- # let's create a package that brakes bdist_rpm
+ # let's create a package that breaks bdist_rpm
tmp_dir = self.mkdtemp()
os.environ['HOME'] = tmp_dir # to confine dir '.rpmdb' creation
pkg_dir = os.path.join(tmp_dir, 'foo')
diff --git a/lib-python/2.7/distutils/tests/test_build_ext.py b/lib-python/2.7/distutils/tests/test_build_ext.py
index a4494dba4c..a6d2d2eb15 100644
--- a/lib-python/2.7/distutils/tests/test_build_ext.py
+++ b/lib-python/2.7/distutils/tests/test_build_ext.py
@@ -20,6 +20,7 @@ ALREADY_TESTED = False
class BuildExtTestCase(support.TempdirManager,
support.LoggingSilencer,
+ support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(BuildExtTestCase, self).setUp()
diff --git a/lib-python/2.7/distutils/tests/test_ccompiler.py b/lib-python/2.7/distutils/tests/test_ccompiler.py
index 446eac2cda..4976098b8d 100644
--- a/lib-python/2.7/distutils/tests/test_ccompiler.py
+++ b/lib-python/2.7/distutils/tests/test_ccompiler.py
@@ -24,6 +24,30 @@ class FakeCompiler(object):
class CCompilerTestCase(support.EnvironGuard, unittest.TestCase):
+ def test_set_executables(self):
+ class MyCCompiler(CCompiler):
+ executables = {'compiler': '', 'compiler_cxx': '', 'linker': ''}
+
+ compiler = MyCCompiler()
+
+ # set executable as list
+ compiler.set_executables(compiler=['env', 'OMPI_MPICC=clang', 'mpicc'])
+ self.assertEqual(compiler.compiler, ['env',
+ 'OMPI_MPICC=clang',
+ 'mpicc'])
+
+ # set executable as string
+ compiler.set_executables(compiler_cxx='env OMPI_MPICXX=clang++ mpicxx')
+ self.assertEqual(compiler.compiler_cxx, ['env',
+ 'OMPI_MPICXX=clang++',
+ 'mpicxx'])
+
+ # set executable as unicode string
+ compiler.set_executables(linker=u'env OMPI_MPICXX=clang++ mpiCC')
+ self.assertEqual(compiler.linker, [u'env',
+ u'OMPI_MPICXX=clang++',
+ u'mpiCC'])
+
def test_gen_lib_options(self):
compiler = FakeCompiler()
libdirs = ['lib1', 'lib2']
diff --git a/lib-python/2.7/distutils/tests/test_check.py b/lib-python/2.7/distutils/tests/test_check.py
index 81058b1911..2e2cf131e0 100644
--- a/lib-python/2.7/distutils/tests/test_check.py
+++ b/lib-python/2.7/distutils/tests/test_check.py
@@ -1,5 +1,6 @@
# -*- encoding: utf8 -*-
"""Tests for distutils.command.check."""
+import os
import textwrap
import unittest
from test.test_support import run_unittest
@@ -8,13 +9,25 @@ from distutils.command.check import check, HAS_DOCUTILS
from distutils.tests import support
from distutils.errors import DistutilsSetupError
+try:
+ import pygments
+except ImportError:
+ pygments = None
+
+
+HERE = os.path.dirname(__file__)
+
+
class CheckTestCase(support.LoggingSilencer,
support.TempdirManager,
unittest.TestCase):
- def _run(self, metadata=None, **options):
+ def _run(self, metadata=None, cwd=None, **options):
if metadata is None:
metadata = {}
+ if cwd is not None:
+ old_dir = os.getcwd()
+ os.chdir(cwd)
pkg_info, dist = self.create_dist(**metadata)
cmd = check(dist)
cmd.initialize_options()
@@ -22,6 +35,8 @@ class CheckTestCase(support.LoggingSilencer,
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
+ if cwd is not None:
+ os.chdir(old_dir)
return cmd
def test_check_metadata(self):
@@ -94,6 +109,11 @@ class CheckTestCase(support.LoggingSilencer,
cmd = self._run(metadata, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
+ # check that includes work to test #31292
+ metadata['long_description'] = 'title\n=====\n\n.. include:: includetest.rst'
+ cmd = self._run(metadata, cwd=HERE, strict=1, restructuredtext=1)
+ self.assertEqual(cmd._warnings, 0)
+
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext_with_syntax_highlight(self):
# Don't fail if there is a `code` or `code-block` directive
@@ -120,9 +140,15 @@ class CheckTestCase(support.LoggingSilencer,
pkg_info, dist = self.create_dist(long_description=rest_with_code)
cmd = check(dist)
cmd.check_restructuredtext()
- self.assertEqual(cmd._warnings, 0)
msgs = cmd._check_rst_data(rest_with_code)
- self.assertEqual(len(msgs), 0)
+ if pygments is not None:
+ self.assertEqual(len(msgs), 0)
+ else:
+ self.assertEqual(len(msgs), 1)
+ self.assertEqual(
+ str(msgs[0][1]),
+ 'Cannot analyze code. Pygments package not found.'
+ )
def test_check_all(self):
diff --git a/lib-python/2.7/distutils/tests/test_install.py b/lib-python/2.7/distutils/tests/test_install.py
index c3492b8c90..397e2a7d4b 100644
--- a/lib-python/2.7/distutils/tests/test_install.py
+++ b/lib-python/2.7/distutils/tests/test_install.py
@@ -26,6 +26,7 @@ def _make_ext_name(modname):
class InstallTestCase(support.TempdirManager,
+ support.EnvironGuard,
support.LoggingSilencer,
unittest.TestCase):
diff --git a/lib-python/2.7/distutils/tests/test_sdist.py b/lib-python/2.7/distutils/tests/test_sdist.py
index 02c1d12e20..c503bd62b7 100644
--- a/lib-python/2.7/distutils/tests/test_sdist.py
+++ b/lib-python/2.7/distutils/tests/test_sdist.py
@@ -130,7 +130,9 @@ class SDistTestCase(PyPIRCCommandTestCase):
zip_file.close()
# making sure everything has been pruned correctly
- self.assertEqual(len(content), 4)
+ expected = ['', 'PKG-INFO', 'README', 'setup.py',
+ 'somecode/', 'somecode/__init__.py']
+ self.assertEqual(sorted(content), ['fake-1.0/' + x for x in expected])
@unittest.skipUnless(zlib, "requires zlib")
def test_make_distribution(self):
@@ -246,7 +248,13 @@ class SDistTestCase(PyPIRCCommandTestCase):
zip_file.close()
# making sure everything was added
- self.assertEqual(len(content), 12)
+ expected = ['', 'PKG-INFO', 'README', 'buildout.cfg',
+ 'data/', 'data/data.dt', 'inroot.txt',
+ 'scripts/', 'scripts/script.py', 'setup.py',
+ 'some/', 'some/file.txt', 'some/other_file.txt',
+ 'somecode/', 'somecode/__init__.py', 'somecode/doc.dat',
+ 'somecode/doc.txt']
+ self.assertEqual(sorted(content), ['fake-1.0/' + x for x in expected])
# checking the MANIFEST
f = open(join(self.tmp_dir, 'MANIFEST'))
diff --git a/lib-python/2.7/distutils/tests/test_spawn.py b/lib-python/2.7/distutils/tests/test_spawn.py
index defa54d87f..061a72f1a5 100644
--- a/lib-python/2.7/distutils/tests/test_spawn.py
+++ b/lib-python/2.7/distutils/tests/test_spawn.py
@@ -1,8 +1,11 @@
"""Tests for distutils.spawn."""
-import unittest
import os
+import stat
+import sys
import time
-from test.test_support import captured_stdout, run_unittest
+import unittest
+from test.support import captured_stdout, run_unittest
+from test import support as test_support
from distutils.spawn import _nt_quote_args
from distutils.spawn import spawn, find_executable
@@ -53,6 +56,48 @@ class SpawnTestCase(support.TempdirManager,
os.chmod(exe, 0777)
spawn([exe]) # should work without any error
+ def test_find_executable(self):
+ with test_support.temp_dir() as tmp_dir:
+ # use TESTFN to get a pseudo-unique filename
+ program_noeext = test_support.TESTFN
+ # Give the temporary program an ".exe" suffix for all.
+ # It's needed on Windows and not harmful on other platforms.
+ program = program_noeext + ".exe"
+
+ filename = os.path.join(tmp_dir, program)
+ with open(filename, "wb"):
+ pass
+ os.chmod(filename, stat.S_IXUSR)
+
+ # test path parameter
+ rv = find_executable(program, path=tmp_dir)
+ self.assertEqual(rv, filename)
+
+ if sys.platform == 'win32':
+ # test without ".exe" extension
+ rv = find_executable(program_noeext, path=tmp_dir)
+ self.assertEqual(rv, filename)
+
+ # test find in the current directory
+ with test_support.change_cwd(tmp_dir):
+ rv = find_executable(program)
+ self.assertEqual(rv, program)
+
+ # test non-existent program
+ dont_exist_program = "dontexist_" + program
+ rv = find_executable(dont_exist_program , path=tmp_dir)
+ self.assertIsNone(rv)
+
+ # test os.defpath: missing PATH environment variable
+ with test_support.EnvironmentVarGuard() as env:
+ from distutils import spawn
+ with test_support.swap_attr(spawn.os, 'defpath', tmp_dir):
+ env.pop('PATH')
+
+ rv = find_executable(program)
+ self.assertEqual(rv, filename)
+
+
def test_suite():
return unittest.makeSuite(SpawnTestCase)
diff --git a/lib-python/2.7/distutils/tests/test_sysconfig.py b/lib-python/2.7/distutils/tests/test_sysconfig.py
index eb4d27c39e..9e2aeb833b 100644
--- a/lib-python/2.7/distutils/tests/test_sysconfig.py
+++ b/lib-python/2.7/distutils/tests/test_sysconfig.py
@@ -8,8 +8,9 @@ import sys
import textwrap
from distutils import sysconfig
+from distutils.ccompiler import get_default_compiler
from distutils.tests import support
-from test.test_support import TESTFN
+from test.test_support import TESTFN, swap_item
class SysconfigTestCase(support.EnvironGuard,
unittest.TestCase):
@@ -50,6 +51,102 @@ class SysconfigTestCase(support.EnvironGuard,
python_h = os.path.join(inc_dir, "Python.h")
self.assertTrue(os.path.isfile(python_h), python_h)
+ def customize_compiler(self):
+ # make sure AR gets caught
+ class compiler:
+ compiler_type = 'unix'
+
+ def set_executables(self, **kw):
+ self.exes = kw
+
+ sysconfig_vars = {
+ 'AR': 'sc_ar',
+ 'CC': 'sc_cc',
+ 'CXX': 'sc_cxx',
+ 'ARFLAGS': '--sc-arflags',
+ 'CFLAGS': '--sc-cflags',
+ 'CCSHARED': '--sc-ccshared',
+ 'LDSHARED': 'sc_ldshared',
+ 'SO': 'sc_shutil_suffix',
+ }
+
+ comp = compiler()
+ old_vars = dict(sysconfig._config_vars)
+ try:
+ # On macOS, disable _osx_support.customize_compiler()
+ sysconfig._config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
+
+ for key, value in sysconfig_vars.items():
+ sysconfig._config_vars[key] = value
+ sysconfig.customize_compiler(comp)
+ finally:
+ sysconfig._config_vars.clear()
+ sysconfig._config_vars.update(old_vars)
+
+ return comp
+
+ @unittest.skipUnless(get_default_compiler() == 'unix',
+ 'not testing if default compiler is not unix')
+ def test_customize_compiler(self):
+ # Make sure that sysconfig._config_vars is initialized
+ sysconfig.get_config_vars()
+
+ os.environ['AR'] = 'env_ar'
+ os.environ['CC'] = 'env_cc'
+ os.environ['CPP'] = 'env_cpp'
+ os.environ['CXX'] = 'env_cxx --env-cxx-flags'
+ os.environ['LDSHARED'] = 'env_ldshared'
+ os.environ['LDFLAGS'] = '--env-ldflags'
+ os.environ['ARFLAGS'] = '--env-arflags'
+ os.environ['CFLAGS'] = '--env-cflags'
+ os.environ['CPPFLAGS'] = '--env-cppflags'
+
+ comp = self.customize_compiler()
+ self.assertEqual(comp.exes['archiver'],
+ 'env_ar --env-arflags')
+ self.assertEqual(comp.exes['preprocessor'],
+ 'env_cpp --env-cppflags')
+ self.assertEqual(comp.exes['compiler'],
+ 'env_cc --sc-cflags --env-cflags --env-cppflags')
+ self.assertEqual(comp.exes['compiler_so'],
+ ('env_cc --sc-cflags '
+ '--env-cflags ''--env-cppflags --sc-ccshared'))
+ self.assertEqual(comp.exes['compiler_cxx'],
+ 'env_cxx --env-cxx-flags')
+ self.assertEqual(comp.exes['linker_exe'],
+ 'env_cc')
+ self.assertEqual(comp.exes['linker_so'],
+ ('env_ldshared --env-ldflags --env-cflags'
+ ' --env-cppflags'))
+ self.assertEqual(comp.shared_lib_extension, 'sc_shutil_suffix')
+
+ del os.environ['AR']
+ del os.environ['CC']
+ del os.environ['CPP']
+ del os.environ['CXX']
+ del os.environ['LDSHARED']
+ del os.environ['LDFLAGS']
+ del os.environ['ARFLAGS']
+ del os.environ['CFLAGS']
+ del os.environ['CPPFLAGS']
+
+ comp = self.customize_compiler()
+ self.assertEqual(comp.exes['archiver'],
+ 'sc_ar --sc-arflags')
+ self.assertEqual(comp.exes['preprocessor'],
+ 'sc_cc -E')
+ self.assertEqual(comp.exes['compiler'],
+ 'sc_cc --sc-cflags')
+ self.assertEqual(comp.exes['compiler_so'],
+ 'sc_cc --sc-cflags --sc-ccshared')
+ self.assertEqual(comp.exes['compiler_cxx'],
+ 'sc_cxx')
+ self.assertEqual(comp.exes['linker_exe'],
+ 'sc_cc')
+ self.assertEqual(comp.exes['linker_so'],
+ 'sc_ldshared')
+ self.assertEqual(comp.shared_lib_extension, 'sc_shutil_suffix')
+
def test_parse_makefile_base(self):
self.makefile = test.test_support.TESTFN
fd = open(self.makefile, 'w')
diff --git a/lib-python/2.7/distutils/tests/test_upload.py b/lib-python/2.7/distutils/tests/test_upload.py
index 3d4f30504e..d225754587 100644
--- a/lib-python/2.7/distutils/tests/test_upload.py
+++ b/lib-python/2.7/distutils/tests/test_upload.py
@@ -128,6 +128,32 @@ class uploadTestCase(PyPIRCCommandTestCase):
auth = self.last_open.req.headers['Authorization']
self.assertNotIn('\n', auth)
+ # bpo-32304: archives whose last byte was b'\r' were corrupted due to
+ # normalization intended for Mac OS 9.
+ def test_upload_correct_cr(self):
+ # content that ends with \r should not be modified.
+ tmp = self.mkdtemp()
+ path = os.path.join(tmp, 'xxx')
+ self.write_file(path, content='yy\r')
+ command, pyversion, filename = 'xxx', '2.6', path
+ dist_files = [(command, pyversion, filename)]
+ self.write_file(self.rc, PYPIRC_LONG_PASSWORD)
+
+ # other fields that ended with \r used to be modified, now are
+ # preserved.
+ pkg_dir, dist = self.create_dist(
+ dist_files=dist_files,
+ description='long description\r'
+ )
+ cmd = upload(dist)
+ cmd.ensure_finalized()
+ cmd.run()
+
+ headers = dict(self.last_open.req.headers)
+ self.assertEqual(headers['Content-length'], '2170')
+ self.assertIn(b'long description\r', self.last_open.req.data)
+ self.assertNotIn(b'long description\r\n', self.last_open.req.data)
+
def test_upload_fails(self):
self.next_msg = "Not Found"
self.next_code = 404
diff --git a/lib-python/2.7/distutils/tests/test_util.py b/lib-python/2.7/distutils/tests/test_util.py
index 2d7b101d16..e081709729 100644
--- a/lib-python/2.7/distutils/tests/test_util.py
+++ b/lib-python/2.7/distutils/tests/test_util.py
@@ -1,13 +1,17 @@
"""Tests for distutils.util."""
+import os
import sys
import unittest
-from test.test_support import run_unittest
+from test.test_support import run_unittest, swap_attr
from distutils.errors import DistutilsByteCompileError
-from distutils.util import byte_compile, grok_environment_error
+from distutils.tests import support
+from distutils import util # used to patch _environ_checked
+from distutils.util import (byte_compile, grok_environment_error,
+ check_environ, get_platform)
-class UtilTestCase(unittest.TestCase):
+class UtilTestCase(support.EnvironGuard, unittest.TestCase):
def test_dont_write_bytecode(self):
# makes sure byte_compile raise a DistutilsError
@@ -25,6 +29,41 @@ class UtilTestCase(unittest.TestCase):
msg = grok_environment_error(exc)
self.assertEqual(msg, "error: Unable to find batch file")
+ def test_check_environ(self):
+ util._environ_checked = 0
+ os.environ.pop('HOME', None)
+
+ check_environ()
+
+ self.assertEqual(os.environ['PLAT'], get_platform())
+ self.assertEqual(util._environ_checked, 1)
+
+ @unittest.skipUnless(os.name == 'posix', 'specific to posix')
+ def test_check_environ_getpwuid(self):
+ util._environ_checked = 0
+ os.environ.pop('HOME', None)
+
+ import pwd
+
+ # only set pw_dir field, other fields are not used
+ def mock_getpwuid(uid):
+ return pwd.struct_passwd((None, None, None, None, None,
+ '/home/distutils', None))
+
+ with swap_attr(pwd, 'getpwuid', mock_getpwuid):
+ check_environ()
+ self.assertEqual(os.environ['HOME'], '/home/distutils')
+
+ util._environ_checked = 0
+ os.environ.pop('HOME', None)
+
+ # bpo-10496: Catch pwd.getpwuid() error
+ def getpwuid_err(uid):
+ raise KeyError
+ with swap_attr(pwd, 'getpwuid', getpwuid_err):
+ check_environ()
+ self.assertNotIn('HOME', os.environ)
+
def test_suite():
return unittest.makeSuite(UtilTestCase)
diff --git a/lib-python/2.7/distutils/util.py b/lib-python/2.7/distutils/util.py
index 273d234823..c3aeab8674 100644
--- a/lib-python/2.7/distutils/util.py
+++ b/lib-python/2.7/distutils/util.py
@@ -178,8 +178,13 @@ def check_environ ():
return
if os.name == 'posix' and 'HOME' not in os.environ:
- import pwd
- os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
+ try:
+ import pwd
+ os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
+ except (ImportError, KeyError):
+ # bpo-10496: if the current user identifier doesn't exist in the
+ # password database, do nothing
+ pass
if 'PLAT' not in os.environ:
os.environ['PLAT'] = get_platform()
diff --git a/lib-python/2.7/doctest.py b/lib-python/2.7/doctest.py
index fedf67011d..1d822b576b 100644
--- a/lib-python/2.7/doctest.py
+++ b/lib-python/2.7/doctest.py
@@ -1651,8 +1651,6 @@ class OutputChecker:
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
- # Remove trailing whitespace on diff output.
- diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
diff --git a/lib-python/2.7/email/_parseaddr.py b/lib-python/2.7/email/_parseaddr.py
index 690db2c22d..dc49d2e45a 100644
--- a/lib-python/2.7/email/_parseaddr.py
+++ b/lib-python/2.7/email/_parseaddr.py
@@ -336,7 +336,12 @@ class AddrlistClass:
aslist.append('@')
self.pos += 1
self.gotonext()
- return EMPTYSTRING.join(aslist) + self.getdomain()
+ domain = self.getdomain()
+ if not domain:
+ # Invalid domain, return an empty address instead of returning a
+ # local part to denote failed parsing.
+ return EMPTYSTRING
+ return EMPTYSTRING.join(aslist) + domain
def getdomain(self):
"""Get the complete domain name from an address."""
@@ -351,6 +356,10 @@ class AddrlistClass:
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
+ elif self.field[self.pos] == '@':
+ # bpo-34155: Don't parse domains with two `@` like
+ # `a@malicious.org@important.com`.
+ return EMPTYSTRING
elif self.field[self.pos] in self.atomends:
break
else:
diff --git a/lib-python/2.7/email/feedparser.py b/lib-python/2.7/email/feedparser.py
index 8031ca666e..298fe79df8 100644
--- a/lib-python/2.7/email/feedparser.py
+++ b/lib-python/2.7/email/feedparser.py
@@ -118,26 +118,6 @@ class BufferedSubFile(object):
self.pushlines(parts)
def pushlines(self, lines):
- # Crack into lines, but preserve the newlines on the end of each
- parts = NLCRE_crack.split(data)
- # The *ahem* interesting behaviour of re.split when supplied grouping
- # parentheses is that the last element of the resulting list is the
- # data after the final RE. In the case of a NL/CR terminated string,
- # this is the empty string.
- self._partial = parts.pop()
- #GAN 29Mar09 bugs 1555570, 1721862 Confusion at 8K boundary ending with \r:
- # is there a \n to follow later?
- if not self._partial and parts and parts[-1].endswith('\r'):
- self._partial = parts.pop(-2)+parts.pop()
- # parts is a list of strings, alternating between the line contents
- # and the eol character(s). Gather up a list of lines after
- # re-attaching the newlines.
- lines = []
- for i in range(len(parts) // 2):
- lines.append(parts[i*2] + parts[i*2+1])
- self.pushlines(lines)
-
- def pushlines(self, lines):
# Reverse and insert at the front of the lines.
self._lines[:0] = lines[::-1]
diff --git a/lib-python/2.7/email/test/test_email.py b/lib-python/2.7/email/test/test_email.py
index 4b4dee3d34..2efe44ac5a 100644
--- a/lib-python/2.7/email/test/test_email.py
+++ b/lib-python/2.7/email/test/test_email.py
@@ -2306,6 +2306,20 @@ class TestMiscellaneous(TestEmailBase):
self.assertEqual(Utils.parseaddr('<>'), ('', ''))
self.assertEqual(Utils.formataddr(Utils.parseaddr('<>')), '')
+ def test_parseaddr_multiple_domains(self):
+ self.assertEqual(
+ Utils.parseaddr('a@b@c'),
+ ('', '')
+ )
+ self.assertEqual(
+ Utils.parseaddr('a@b.c@c'),
+ ('', '')
+ )
+ self.assertEqual(
+ Utils.parseaddr('a@172.17.0.1@c'),
+ ('', '')
+ )
+
def test_noquote_dump(self):
self.assertEqual(
Utils.formataddr(('A Silly Person', 'person@dom.ain')),
diff --git a/lib-python/2.7/email/test/test_email_renamed.py b/lib-python/2.7/email/test/test_email_renamed.py
index 5a41701271..206baaf59a 100644
--- a/lib-python/2.7/email/test/test_email_renamed.py
+++ b/lib-python/2.7/email/test/test_email_renamed.py
@@ -515,11 +515,6 @@ class TestEncoders(unittest.TestCase):
def test_default_cte(self):
eq = self.assertEqual
- msg = MIMEText('hello world')
- eq(msg['content-transfer-encoding'], '7bit')
-
- def test_default_cte(self):
- eq = self.assertEqual
# With no explicit _charset its us-ascii, and all are 7-bit
msg = MIMEText('hello world')
eq(msg['content-transfer-encoding'], '7bit')
diff --git a/lib-python/2.7/email/utils.py b/lib-python/2.7/email/utils.py
index ac13f49d59..5b22521e58 100644
--- a/lib-python/2.7/email/utils.py
+++ b/lib-python/2.7/email/utils.py
@@ -211,6 +211,12 @@ def parsedate_tz(data):
def parseaddr(addr):
+ """
+ Parse addr into its constituent realname and email address parts.
+
+ Return a tuple of realname and email address, unless the parse fails, in
+ which case return a 2-tuple of ('', '').
+ """
addrs = _AddressList(addr).addresslist
if not addrs:
return '', ''
diff --git a/lib-python/2.7/encodings/uu_codec.py b/lib-python/2.7/encodings/uu_codec.py
index 5cb0d2b13e..fcd5aa45a9 100644
--- a/lib-python/2.7/encodings/uu_codec.py
+++ b/lib-python/2.7/encodings/uu_codec.py
@@ -31,6 +31,10 @@ def uu_encode(input,errors='strict',filename='<data>',mode=0666):
read = infile.read
write = outfile.write
+ # Remove newline chars from filename
+ filename = filename.replace('\n','\\n')
+ filename = filename.replace('\r','\\r')
+
# Encode
write('begin %o %s\n' % (mode & 0777, filename))
chunk = read(45)
diff --git a/lib-python/2.7/ensurepip/__init__.py b/lib-python/2.7/ensurepip/__init__.py
index 8def001ce4..6685621c57 100644
--- a/lib-python/2.7/ensurepip/__init__.py
+++ b/lib-python/2.7/ensurepip/__init__.py
@@ -186,8 +186,8 @@ def _main(argv=None):
"--altinstall",
action="store_true",
default=False,
- help=("Make an alternate install, installing only the X.Y versioned"
- "scripts (Default: pipX, pipX.Y, easy_install-X.Y)"),
+ help=("Make an alternate install, installing only the X.Y versioned "
+ "scripts (Default: pipX, pipX.Y, easy_install-X.Y)."),
)
parser.add_argument(
"--default-pip",
diff --git a/lib-python/2.7/ensurepip/__main__.py b/lib-python/2.7/ensurepip/__main__.py
index 77527d7a35..03eef0dd94 100644
--- a/lib-python/2.7/ensurepip/__main__.py
+++ b/lib-python/2.7/ensurepip/__main__.py
@@ -1,4 +1,5 @@
import ensurepip
+import sys
if __name__ == "__main__":
- ensurepip._main()
+ sys.exit(ensurepip._main())
diff --git a/lib-python/2.7/ensurepip/_uninstall.py b/lib-python/2.7/ensurepip/_uninstall.py
index 750365ec4d..b257904328 100644
--- a/lib-python/2.7/ensurepip/_uninstall.py
+++ b/lib-python/2.7/ensurepip/_uninstall.py
@@ -2,6 +2,7 @@
import argparse
import ensurepip
+import sys
def _main(argv=None):
@@ -23,8 +24,8 @@ def _main(argv=None):
args = parser.parse_args(argv)
- ensurepip._uninstall_helper(verbosity=args.verbosity)
+ return ensurepip._uninstall_helper(verbosity=args.verbosity)
if __name__ == "__main__":
- _main()
+ sys.exit(_main())
diff --git a/lib-python/2.7/fpformat.py b/lib-python/2.7/fpformat.py
index 71cbb25f3c..0537a27b88 100644
--- a/lib-python/2.7/fpformat.py
+++ b/lib-python/2.7/fpformat.py
@@ -19,7 +19,7 @@ import re
__all__ = ["fix","sci","NotANumber"]
# Compiled regular expression to "decode" a number
-decoder = re.compile(r'^([-+]?)0*(\d*)((?:\.\d*)?)(([eE][-+]?\d+)?)$')
+decoder = re.compile(r'^([-+]?)(\d*)((?:\.\d*)?)(([eE][-+]?\d+)?)$')
# \0 the whole thing
# \1 leading sign or empty
# \2 digits left of decimal point
@@ -41,6 +41,7 @@ def extract(s):
res = decoder.match(s)
if res is None: raise NotANumber, s
sign, intpart, fraction, exppart = res.group(1,2,3,4)
+ intpart = intpart.lstrip('0');
if sign == '+': sign = ''
if fraction: fraction = fraction[1:]
if exppart: expo = int(exppart[1:])
diff --git a/lib-python/2.7/ftplib.py b/lib-python/2.7/ftplib.py
index 3739741901..6644554792 100644
--- a/lib-python/2.7/ftplib.py
+++ b/lib-python/2.7/ftplib.py
@@ -171,6 +171,8 @@ class FTP:
# Internal: send one line to the server, appending CRLF
def putline(self, line):
+ if '\r' in line or '\n' in line:
+ raise ValueError('an illegal newline character should not be contained')
line = line + CRLF
if self.debugging > 1: print '*put*', self.sanitize(line)
self.sock.sendall(line)
@@ -412,12 +414,14 @@ class FTP:
"""
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
- while 1:
- data = conn.recv(blocksize)
- if not data:
- break
- callback(data)
- conn.close()
+ try:
+ while 1:
+ data = conn.recv(blocksize)
+ if not data:
+ break
+ callback(data)
+ finally:
+ conn.close()
return self.voidresp()
def retrlines(self, cmd, callback = None):
@@ -435,21 +439,25 @@ class FTP:
if callback is None: callback = print_line
resp = self.sendcmd('TYPE A')
conn = self.transfercmd(cmd)
- fp = conn.makefile('rb')
- while 1:
- line = fp.readline(self.maxline + 1)
- if len(line) > self.maxline:
- raise Error("got more than %d bytes" % self.maxline)
- if self.debugging > 2: print '*retr*', repr(line)
- if not line:
- break
- if line[-2:] == CRLF:
- line = line[:-2]
- elif line[-1:] == '\n':
- line = line[:-1]
- callback(line)
- fp.close()
- conn.close()
+ fp = None
+ try:
+ fp = conn.makefile('rb')
+ while 1:
+ line = fp.readline(self.maxline + 1)
+ if len(line) > self.maxline:
+ raise Error("got more than %d bytes" % self.maxline)
+ if self.debugging > 2: print '*retr*', repr(line)
+ if not line:
+ break
+ if line[-2:] == CRLF:
+ line = line[:-2]
+ elif line[-1:] == '\n':
+ line = line[:-1]
+ callback(line)
+ finally:
+ if fp:
+ fp.close()
+ conn.close()
return self.voidresp()
def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
@@ -469,12 +477,14 @@ class FTP:
"""
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
- while 1:
- buf = fp.read(blocksize)
- if not buf: break
- conn.sendall(buf)
- if callback: callback(buf)
- conn.close()
+ try:
+ while 1:
+ buf = fp.read(blocksize)
+ if not buf: break
+ conn.sendall(buf)
+ if callback: callback(buf)
+ finally:
+ conn.close()
return self.voidresp()
def storlines(self, cmd, fp, callback=None):
@@ -491,17 +501,19 @@ class FTP:
"""
self.voidcmd('TYPE A')
conn = self.transfercmd(cmd)
- while 1:
- buf = fp.readline(self.maxline + 1)
- if len(buf) > self.maxline:
- raise Error("got more than %d bytes" % self.maxline)
- if not buf: break
- if buf[-2:] != CRLF:
- if buf[-1] in CRLF: buf = buf[:-1]
- buf = buf + CRLF
- conn.sendall(buf)
- if callback: callback(buf)
- conn.close()
+ try:
+ while 1:
+ buf = fp.readline(self.maxline + 1)
+ if len(buf) > self.maxline:
+ raise Error("got more than %d bytes" % self.maxline)
+ if not buf: break
+ if buf[-2:] != CRLF:
+ if buf[-1] in CRLF: buf = buf[:-1]
+ buf = buf + CRLF
+ conn.sendall(buf)
+ if callback: callback(buf)
+ finally:
+ conn.close()
return self.voidresp()
def acct(self, password):
diff --git a/lib-python/2.7/functools.py b/lib-python/2.7/functools.py
index 53680b8946..5d755d4726 100644
--- a/lib-python/2.7/functools.py
+++ b/lib-python/2.7/functools.py
@@ -55,23 +55,28 @@ def total_ordering(cls):
convert = {
'__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
('__le__', lambda self, other: self < other or self == other),
+ ('__ne__', lambda self, other: not self == other),
('__ge__', lambda self, other: not self < other)],
'__le__': [('__ge__', lambda self, other: not self <= other or self == other),
('__lt__', lambda self, other: self <= other and not self == other),
+ ('__ne__', lambda self, other: not self == other),
('__gt__', lambda self, other: not self <= other)],
'__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
('__ge__', lambda self, other: self > other or self == other),
+ ('__ne__', lambda self, other: not self == other),
('__le__', lambda self, other: not self > other)],
'__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
('__gt__', lambda self, other: self >= other and not self == other),
+ ('__ne__', lambda self, other: not self == other),
('__lt__', lambda self, other: not self >= other)]
}
- roots = set(dir(cls)) & set(convert)
+ defined_methods = set(dir(cls))
+ roots = defined_methods & set(convert)
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
- if opname not in roots:
+ if opname not in defined_methods:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
diff --git a/lib-python/2.7/gzip.py b/lib-python/2.7/gzip.py
index 07c6db493b..76ace394f4 100644
--- a/lib-python/2.7/gzip.py
+++ b/lib-python/2.7/gzip.py
@@ -95,9 +95,8 @@ class GzipFile(io.BufferedIOBase):
if filename is None:
# Issue #13781: os.fdopen() creates a fileobj with a bogus name
# attribute. Avoid saving this in the gzip header's filename field.
- if hasattr(fileobj, 'name') and fileobj.name != '<fdopen>':
- filename = fileobj.name
- else:
+ filename = getattr(fileobj, 'name', '')
+ if not isinstance(filename, basestring) or filename == '<fdopen>':
filename = ''
if mode is None:
if hasattr(fileobj, 'mode'): mode = fileobj.mode
diff --git a/lib-python/2.7/httplib.py b/lib-python/2.7/httplib.py
index e108d6f21c..81a08d5d71 100644
--- a/lib-python/2.7/httplib.py
+++ b/lib-python/2.7/httplib.py
@@ -1031,9 +1031,11 @@ class HTTPConnection:
# prevent http header injection
match = _contains_disallowed_method_pchar_re.search(method)
if match:
- raise ValueError(
- "method can't contain control characters. %r (found "
- "at least %r)" % (method, match.group()))
+ msg = (
+ "method can't contain control characters. {method!r} "
+ "(found at least {matched!r})"
+ ).format(matched=match.group(), method=method)
+ raise ValueError(msg)
def _validate_path(self, url):
"""Validate a url for putrequest."""
diff --git a/lib-python/2.7/idlelib/CallTipWindow.py b/lib-python/2.7/idlelib/CallTipWindow.py
index e63164b5a0..2a453d06f5 100644
--- a/lib-python/2.7/idlelib/CallTipWindow.py
+++ b/lib-python/2.7/idlelib/CallTipWindow.py
@@ -72,6 +72,7 @@ class CallTip:
background="#ffffe0", relief=SOLID, borderwidth=1,
font = self.widget['font'])
self.label.pack()
+ tw.update_idletasks()
tw.lift() # work around bug in Tk 8.5.18+ (issue #24570)
self.checkhideid = self.widget.bind(CHECKHIDE_VIRTUAL_EVENT_NAME,
diff --git a/lib-python/2.7/idlelib/FileList.py b/lib-python/2.7/idlelib/FileList.py
index 8318ff17b2..46979e33e3 100644
--- a/lib-python/2.7/idlelib/FileList.py
+++ b/lib-python/2.7/idlelib/FileList.py
@@ -107,8 +107,10 @@ class FileList:
def _test():
from idlelib.EditorWindow import fixwordbreaks
+ from idlelib.run import fix_scaling
import sys
root = Tk()
+ fix_scaling(root)
fixwordbreaks(root)
root.withdraw()
flist = FileList(root)
diff --git a/lib-python/2.7/idlelib/HyperParser.py b/lib-python/2.7/idlelib/HyperParser.py
index 5816d00f45..6e45b161a4 100644
--- a/lib-python/2.7/idlelib/HyperParser.py
+++ b/lib-python/2.7/idlelib/HyperParser.py
@@ -167,7 +167,7 @@ class HyperParser:
given index, which is empty if there is no real one.
"""
if not self.is_in_code():
- raise ValueError("get_expression should only be called"
+ raise ValueError("get_expression should only be called "
"if index is inside a code.")
rawtext = self.rawtext
diff --git a/lib-python/2.7/idlelib/IOBinding.py b/lib-python/2.7/idlelib/IOBinding.py
index 2aba46e0df..872bece476 100644
--- a/lib-python/2.7/idlelib/IOBinding.py
+++ b/lib-python/2.7/idlelib/IOBinding.py
@@ -383,6 +383,8 @@ class IOBinding:
try:
with open(filename, "wb") as f:
f.write(chars)
+ f.flush()
+ os.fsync(f.fileno())
return True
except IOError as msg:
tkMessageBox.showerror("I/O Error", str(msg),
diff --git a/lib-python/2.7/idlelib/NEWS.txt b/lib-python/2.7/idlelib/NEWS.txt
index 35e2a7ecce..d4560b8c2b 100644
--- a/lib-python/2.7/idlelib/NEWS.txt
+++ b/lib-python/2.7/idlelib/NEWS.txt
@@ -1,6 +1,29 @@
+Since 2.7.13, only severe bugs are fixed on the 2.7 branch.
+
+What's New in IDLE 2.7.17?
+==========================
+*Release date: 2019-07-??*
+
+bpo-36807: When saving a file, call file.flush() and os.fsync()
+so bits are flushed to e.g. a USB drive.
+
+
+What's New in IDLE 2.7.16?
+==========================
+*Release date: 2019-03-02*
+
+bpo-31500: Default fonts now are scaled on HiDPI displays.
+
+bpo-34275: Make calltips always visible on Mac.
+Patch by Kevin Walzer.
+
+bpo-34120: Fix freezing after closing some dialogs on Mac.
+This is one of multiple regressions from using newer tcl/tk.
+
+
What's New in IDLE 2.7.13?
==========================
-*Release date: 2017-01-01?*
+*Release date: 2016-12-17*
- Issue #27854: Make Help => IDLE Help work again on Windows.
Include idlelib/help.html in 2.7 Windows installer.
@@ -35,7 +58,7 @@ What's New in IDLE 2.7.13?
What's New in IDLE 2.7.12?
==========================
-*Release date: 2015-06-25*
+*Release date: 2016-06-25*
- Issue #5124: Paste with text selected now replaces the selection on X11.
This matches how paste works on Windows, Mac, most modern Linux apps,
diff --git a/lib-python/2.7/idlelib/PyShell.py b/lib-python/2.7/idlelib/PyShell.py
index 3ea1a7d8c8..2ea7e6b939 100755
--- a/lib-python/2.7/idlelib/PyShell.py
+++ b/lib-python/2.7/idlelib/PyShell.py
@@ -1040,7 +1040,7 @@ class PyShell(OutputWindow):
return self.shell_title
COPYRIGHT = \
- 'Type "copyright", "credits" or "license()" for more information.'
+ 'Type "help", "copyright", "credits" or "license()" for more information.'
def begin(self):
self.resetoutput()
@@ -1370,7 +1370,7 @@ class PseudoInputFile(PseudoFile):
raise ValueError("read from closed file")
if size is None:
size = -1
- elif not isinstance(size, int):
+ elif not isinstance(size, (int, long)):
raise TypeError('must be int, not ' + type(size).__name__)
result = self._line_buffer
self._line_buffer = ''
@@ -1393,7 +1393,7 @@ class PseudoInputFile(PseudoFile):
raise ValueError("read from closed file")
if size is None:
size = -1
- elif not isinstance(size, int):
+ elif not isinstance(size, (int, long)):
raise TypeError('must be int, not ' + type(size).__name__)
line = self._line_buffer or self.shell.readline()
if size < 0:
@@ -1552,13 +1552,15 @@ def main():
# start editor and/or shell windows:
root = Tk(className="Idle")
root.withdraw()
+ from idlelib.run import fix_scaling
+ fix_scaling(root)
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
- elif TkVersion >= 8.5:
+ elif TkVersion >= 8.5 and sys.platform != 'darwin':
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
diff --git a/lib-python/2.7/idlelib/SearchDialogBase.py b/lib-python/2.7/idlelib/SearchDialogBase.py
index 651e7f4a3f..9eb8b223ff 100644
--- a/lib-python/2.7/idlelib/SearchDialogBase.py
+++ b/lib-python/2.7/idlelib/SearchDialogBase.py
@@ -52,6 +52,7 @@ class SearchDialogBase:
else:
self.top.deiconify()
self.top.tkraise()
+ self.top.transient(text.winfo_toplevel())
if searchphrase:
self.ent.delete(0,"end")
self.ent.insert("end",searchphrase)
@@ -64,6 +65,7 @@ class SearchDialogBase:
"Put dialog away for later use."
if self.top:
self.top.grab_release()
+ self.top.transient('')
self.top.withdraw()
def create_widgets(self):
diff --git a/lib-python/2.7/idlelib/aboutDialog.py b/lib-python/2.7/idlelib/aboutDialog.py
index c9adc08bb2..87d6c3cb9c 100644
--- a/lib-python/2.7/idlelib/aboutDialog.py
+++ b/lib-python/2.7/idlelib/aboutDialog.py
@@ -141,6 +141,7 @@ class AboutDialog(Toplevel):
textView.view_file(self, title, fn, encoding)
def Ok(self, event=None):
+ self.grab_release()
self.destroy()
if __name__ == '__main__':
diff --git a/lib-python/2.7/idlelib/configDialog.py b/lib-python/2.7/idlelib/configDialog.py
index d53f5ff200..3c29af1376 100644
--- a/lib-python/2.7/idlelib/configDialog.py
+++ b/lib-python/2.7/idlelib/configDialog.py
@@ -1197,10 +1197,12 @@ class ConfigDialog(Toplevel):
instance.reset_help_menu_entries()
def Cancel(self):
+ self.grab_release()
self.destroy()
def Ok(self):
self.Apply()
+ self.grab_release()
self.destroy()
def Apply(self):
diff --git a/lib-python/2.7/idlelib/configHelpSourceEdit.py b/lib-python/2.7/idlelib/configHelpSourceEdit.py
index 5816449caf..62b010a0ca 100644
--- a/lib-python/2.7/idlelib/configHelpSourceEdit.py
+++ b/lib-python/2.7/idlelib/configHelpSourceEdit.py
@@ -155,10 +155,12 @@ class GetHelpSourceDialog(Toplevel):
# Mac Safari insists on using the URI form for local files
self.result = list(self.result)
self.result[1] = "file://" + path
+ self.grab_release()
self.destroy()
def Cancel(self, event=None):
self.result = None
+ self.grab_release()
self.destroy()
if __name__ == '__main__':
diff --git a/lib-python/2.7/idlelib/configSectionNameDialog.py b/lib-python/2.7/idlelib/configSectionNameDialog.py
index c09dca896b..f28dc1a283 100644
--- a/lib-python/2.7/idlelib/configSectionNameDialog.py
+++ b/lib-python/2.7/idlelib/configSectionNameDialog.py
@@ -80,10 +80,13 @@ class GetCfgSectionNameDialog(Toplevel):
name = self.name_ok()
if name:
self.result = name
+ self.grab_release()
self.destroy()
def Cancel(self, event=None):
self.result = ''
+ self.grab_release()
self.destroy()
+
if __name__ == '__main__':
import unittest
unittest.main('idlelib.idle_test.test_config_name', verbosity=2, exit=False)
diff --git a/lib-python/2.7/idlelib/help.html b/lib-python/2.7/idlelib/help.html
index 1c4598bfd5..9a20c3949e 100644
--- a/lib-python/2.7/idlelib/help.html
+++ b/lib-python/2.7/idlelib/help.html
@@ -700,7 +700,7 @@ are currently:</p>
</ul>
</div>
<div class="footer">
- &copy; <a href="../copyright.html">Copyright</a> 1990-2016, Python Software Foundation.
+ &copy; <a href="../copyright.html">Copyright</a> 1990-2017, Python Software Foundation.
<br />
The Python Software Foundation is a non-profit corporation.
<a href="https://www.python.org/psf/donations/">Please donate.</a>
diff --git a/lib-python/2.7/idlelib/idle_test/htest.py b/lib-python/2.7/idlelib/idle_test/htest.py
index f34140921c..9e2ddd2c19 100644
--- a/lib-python/2.7/idlelib/idle_test/htest.py
+++ b/lib-python/2.7/idlelib/idle_test/htest.py
@@ -112,7 +112,7 @@ ConfigDialog_spec = {
"font face of the text in the area below it.\nIn the "
"'Highlighting' tab, try different color schemes. Clicking "
"items in the sample program should update the choices above it."
- "\nIn the 'Keys', 'General' and 'Extensions' tabs, test settings"
+ "\nIn the 'Keys', 'General' and 'Extensions' tabs, test settings "
"of interest."
"\n[Ok] to close the dialog.[Apply] to apply the settings and "
"and [Cancel] to revert all changes.\nRe-run the test to ensure "
@@ -171,7 +171,7 @@ GetKeysDialog_spec = {
'msg': "Test for different key modifier sequences.\n"
"<nothing> is invalid.\n"
"No modifier key is invalid.\n"
- "Shift key with [a-z],[0-9], function key, move key, tab, space"
+ "Shift key with [a-z],[0-9], function key, move key, tab, space "
"is invalid.\nNo validitity checking if advanced key binding "
"entry is used."
}
@@ -237,7 +237,7 @@ _percolator_spec = {
'file': 'Percolator',
'kwds': {},
'msg': "There are two tracers which can be toggled using a checkbox.\n"
- "Toggling a tracer 'on' by checking it should print tracer"
+ "Toggling a tracer 'on' by checking it should print tracer "
"output to the console or to the IDLE shell.\n"
"If both the tracers are 'on', the output from the tracer which "
"was switched 'on' later, should be printed first\n"
@@ -329,7 +329,7 @@ _undo_delegator_spec = {
_widget_redirector_spec = {
'file': 'WidgetRedirector',
'kwds': {},
- 'msg': "Every text insert should be printed to the console."
+ 'msg': "Every text insert should be printed to the console "
"or the IDLE shell."
}
diff --git a/lib-python/2.7/idlelib/idle_test/mock_tk.py b/lib-python/2.7/idlelib/idle_test/mock_tk.py
index f42a039711..56ca87695a 100644
--- a/lib-python/2.7/idlelib/idle_test/mock_tk.py
+++ b/lib-python/2.7/idlelib/idle_test/mock_tk.py
@@ -260,7 +260,7 @@ class Text(object):
elif op == '!=':
return line1 != line2 or char1 != char2
else:
- raise TclError('''bad comparison operator "%s":'''
+ raise TclError('''bad comparison operator "%s": '''
'''must be <, <=, ==, >=, >, or !=''' % op)
# The following Text methods normally do something and return None.
diff --git a/lib-python/2.7/idlelib/idle_test/test_config_name.py b/lib-python/2.7/idlelib/idle_test/test_config_name.py
index 4403f87fd3..2a4df6a744 100644
--- a/lib-python/2.7/idlelib/idle_test/test_config_name.py
+++ b/lib-python/2.7/idlelib/idle_test/test_config_name.py
@@ -15,6 +15,8 @@ class Dummy_name_dialog(object):
name = Var()
result = None
destroyed = False
+ def grab_release(self):
+ pass
def destroy(self):
self.destroyed = True
diff --git a/lib-python/2.7/idlelib/idle_test/test_searchdialogbase.py b/lib-python/2.7/idlelib/idle_test/test_searchdialogbase.py
index 32abfe6f79..59b9bbf30f 100644
--- a/lib-python/2.7/idlelib/idle_test/test_searchdialogbase.py
+++ b/lib-python/2.7/idlelib/idle_test/test_searchdialogbase.py
@@ -6,7 +6,7 @@ testing skipping of suite when self.needwrapbutton is false.
'''
import unittest
from test.test_support import requires
-from Tkinter import Tk, Toplevel, Frame ## BooleanVar, StringVar
+from Tkinter import Text, Tk, Toplevel, Frame ## BooleanVar, StringVar
from idlelib import SearchEngine as se
from idlelib import SearchDialogBase as sdb
from idlelib.idle_test.mock_idle import Func
@@ -45,16 +45,17 @@ class SearchDialogBaseTest(unittest.TestCase):
# open calls create_widgets, which needs default_command
self.dialog.default_command = None
- # Since text parameter of .open is not used in base class,
- # pass dummy 'text' instead of tk.Text().
- self.dialog.open('text')
+ toplevel = Toplevel(self.root)
+ text = Text(toplevel)
+ self.dialog.open(text)
self.assertEqual(self.dialog.top.state(), 'normal')
self.dialog.close()
self.assertEqual(self.dialog.top.state(), 'withdrawn')
- self.dialog.open('text', searchphrase="hello")
+ self.dialog.open(text, searchphrase="hello")
self.assertEqual(self.dialog.ent.get(), 'hello')
- self.dialog.close()
+ toplevel.update_idletasks()
+ toplevel.destroy()
def test_create_widgets(self):
self.dialog.create_entries = Func()
diff --git a/lib-python/2.7/idlelib/keybindingDialog.py b/lib-python/2.7/idlelib/keybindingDialog.py
index 4d32ca9476..9713c79aae 100644
--- a/lib-python/2.7/idlelib/keybindingDialog.py
+++ b/lib-python/2.7/idlelib/keybindingDialog.py
@@ -182,7 +182,7 @@ class GetKeysDialog(Toplevel):
def LoadFinalKeyList(self):
#these tuples are also available for use in validity checks
- self.functionKeys=('F1','F2','F2','F4','F5','F6','F7','F8','F9',
+ self.functionKeys=('F1','F2','F3','F4','F5','F6','F7','F8','F9',
'F10','F11','F12')
self.alphanumKeys=tuple(string.ascii_lowercase+string.digits)
self.punctuationKeys=tuple('~!@#%^&*()_-+={}[]|;:,.<>/?')
@@ -217,10 +217,12 @@ class GetKeysDialog(Toplevel):
def OK(self, event=None):
if self.advanced or self.KeysOK(): # doesn't check advanced string yet
self.result=self.keyString.get()
+ self.grab_release()
self.destroy()
def Cancel(self, event=None):
self.result=''
+ self.grab_release()
self.destroy()
def KeysOK(self):
diff --git a/lib-python/2.7/idlelib/run.py b/lib-python/2.7/idlelib/run.py
index 466c61ea34..518afabd1d 100644
--- a/lib-python/2.7/idlelib/run.py
+++ b/lib-python/2.7/idlelib/run.py
@@ -155,6 +155,7 @@ def show_socket_error(err, address):
import Tkinter
import tkMessageBox
root = Tkinter.Tk()
+ fix_scaling(root)
root.withdraw()
if err.args[0] == 61: # connection refused
msg = "IDLE's subprocess can't connect to %s:%d. This may be due "\
@@ -240,6 +241,19 @@ def exit():
capture_warnings(False)
sys.exit(0)
+
+def fix_scaling(root):
+ """Scale fonts on HiDPI displays."""
+ import tkFont
+ scaling = float(root.tk.call('tk', 'scaling'))
+ if scaling > 1.4:
+ for name in tkFont.names(root):
+ font = tkFont.Font(root=root, name=name, exists=True)
+ size = int(font['size'])
+ if size < 0:
+ font['size'] = int(round(-0.75*size))
+
+
class MyRPCServer(rpc.RPCServer):
def handle_error(self, request, client_address):
diff --git a/lib-python/2.7/idlelib/textView.py b/lib-python/2.7/idlelib/textView.py
index b8c4ac1821..ec837f810c 100644
--- a/lib-python/2.7/idlelib/textView.py
+++ b/lib-python/2.7/idlelib/textView.py
@@ -39,7 +39,8 @@ class TextViewer(Toplevel):
self.textView.insert(0.0, text)
self.textView.config(state=DISABLED)
- if modal:
+ self.is_modal = modal
+ if self.is_modal:
self.transient(parent)
self.grab_set()
self.wait_window()
@@ -62,6 +63,8 @@ class TextViewer(Toplevel):
frameText.pack(side=TOP,expand=TRUE,fill=BOTH)
def Ok(self, event=None):
+ if self.is_modal:
+ self.grab_release()
self.destroy()
diff --git a/lib-python/2.7/imaplib.py b/lib-python/2.7/imaplib.py
index 826eea2524..679c468251 100644
--- a/lib-python/2.7/imaplib.py
+++ b/lib-python/2.7/imaplib.py
@@ -70,6 +70,7 @@ Commands = {
'LOGIN': ('NONAUTH',),
'LOGOUT': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
'LSUB': ('AUTH', 'SELECTED'),
+ 'MOVE': ('SELECTED',),
'NAMESPACE': ('AUTH', 'SELECTED'),
'NOOP': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
'PARTIAL': ('SELECTED',), # NB: obsolete
@@ -264,8 +265,10 @@ class IMAP4:
try:
self.sock.shutdown(socket.SHUT_RDWR)
except socket.error as e:
- # The server might already have closed the connection
- if e.errno != errno.ENOTCONN:
+ # The server might already have closed the connection.
+ # On Windows, this may result in WSAEINVAL (error 10022):
+ # An invalid operation was attempted.
+ if e.errno not in (errno.ENOTCONN, 10022):
raise
finally:
self.sock.close()
@@ -1179,16 +1182,6 @@ else:
self.file = self.sslobj.makefile('rb')
- def read(self, size):
- """Read 'size' bytes from remote."""
- return self.file.read(size)
-
-
- def readline(self):
- """Read line from remote."""
- return self.file.readline()
-
-
def send(self, data):
"""Send data to remote."""
bytes = len(data)
@@ -1409,7 +1402,7 @@ def Time2Internaldate(date_time):
be in the correct format.
"""
- if isinstance(date_time, (int, float)):
+ if isinstance(date_time, (int, long, float)):
tt = time.localtime(date_time)
elif isinstance(date_time, (tuple, time.struct_time)):
tt = date_time
diff --git a/lib-python/2.7/inspect.py b/lib-python/2.7/inspect.py
index 4335258898..5a010f5bfb 100644
--- a/lib-python/2.7/inspect.py
+++ b/lib-python/2.7/inspect.py
@@ -692,8 +692,15 @@ def getsourcelines(object):
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
- if ismodule(object): return lines, 0
- else: return getblock(lines[lnum:]), lnum + 1
+ if istraceback(object):
+ object = object.tb_frame
+
+ # for module or frame that corresponds to module, return all source lines
+ if (ismodule(object) or
+ (isframe(object) and object.f_code.co_name == "<module>")):
+ return lines, 0
+ else:
+ return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
@@ -780,8 +787,11 @@ def getargs(co):
if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
remain.append(value)
count.append(value)
- elif opname == 'STORE_FAST':
- stack.append(names[value])
+ elif opname in ('STORE_FAST', 'STORE_DEREF'):
+ if opname == 'STORE_FAST':
+ stack.append(names[value])
+ else:
+ stack.append(co.co_cellvars[value])
# Special case for sublists of length 1: def foo((bar))
# doesn't generate the UNPACK_TUPLE bytecode, so if
diff --git a/lib-python/2.7/json/__init__.py b/lib-python/2.7/json/__init__.py
index 7a71010aa5..1b1ec3a1d3 100644
--- a/lib-python/2.7/json/__init__.py
+++ b/lib-python/2.7/json/__init__.py
@@ -78,7 +78,7 @@ Specializing JSON object encoding::
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
- ... raise TypeError(repr(o) + " is not JSON serializable")
+ ... raise TypeError(repr(obj) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
diff --git a/lib-python/2.7/json/tests/test_speedups.py b/lib-python/2.7/json/tests/test_speedups.py
index 7186a40932..a6b7c38ddf 100644
--- a/lib-python/2.7/json/tests/test_speedups.py
+++ b/lib-python/2.7/json/tests/test_speedups.py
@@ -1,6 +1,11 @@
from json.tests import CTest
+class BadBool:
+ def __nonzero__(self):
+ 1/0.0
+
+
class TestSpeedups(CTest):
def test_scanstring(self):
self.assertEqual(self.json.decoder.scanstring.__module__, "_json")
@@ -16,8 +21,31 @@ class TestDecode(CTest):
def test_make_scanner(self):
self.assertRaises(AttributeError, self.json.scanner.c_make_scanner, 1)
+ def test_bad_bool_args(self):
+ def test(value):
+ self.json.decoder.JSONDecoder(strict=BadBool()).decode(value)
+ self.assertRaises(ZeroDivisionError, test, '""')
+ self.assertRaises(ZeroDivisionError, test, '{}')
+ self.assertRaises(ZeroDivisionError, test, u'""')
+ self.assertRaises(ZeroDivisionError, test, u'{}')
+
+
+class TestEncode(CTest):
def test_make_encoder(self):
self.assertRaises(TypeError, self.json.encoder.c_make_encoder,
None,
"\xCD\x7D\x3D\x4E\x12\x4C\xF9\x79\xD7\x52\xBA\x82\xF2\x27\x4A\x7D\xA0\xCA\x75",
None)
+
+ def test_bad_bool_args(self):
+ def test(name):
+ self.json.encoder.JSONEncoder(**{name: BadBool()}).encode({'a': 1})
+ self.assertRaises(ZeroDivisionError, test, 'skipkeys')
+ self.assertRaises(ZeroDivisionError, test, 'ensure_ascii')
+ self.assertRaises(ZeroDivisionError, test, 'check_circular')
+ self.assertRaises(ZeroDivisionError, test, 'allow_nan')
+ self.assertRaises(ZeroDivisionError, test, 'sort_keys')
+
+ def test_bad_encoding(self):
+ with self.assertRaises(UnicodeEncodeError):
+ self.json.encoder.JSONEncoder(encoding=u'\udcff').encode({'key': 123})
diff --git a/lib-python/2.7/lib-tk/Tix.py b/lib-python/2.7/lib-tk/Tix.py
index 45e8a90374..d0f8fe750c 100644
--- a/lib-python/2.7/lib-tk/Tix.py
+++ b/lib-python/2.7/lib-tk/Tix.py
@@ -1,7 +1,3 @@
-# -*-mode: python; fill-column: 75; tab-width: 8; coding: iso-latin-1-unix -*-
-#
-# $Id$
-#
# Tix.py -- Tix widget wrappers.
#
# For Tix, see http://tix.sourceforge.net
diff --git a/lib-python/2.7/lib-tk/Tkinter.py b/lib-python/2.7/lib-tk/Tkinter.py
index 64e99247cf..6c02955928 100644
--- a/lib-python/2.7/lib-tk/Tkinter.py
+++ b/lib-python/2.7/lib-tk/Tkinter.py
@@ -71,7 +71,7 @@ def _stringify(value):
if isinstance(value, (list, tuple)):
if len(value) == 1:
value = _stringify(value[0])
- if value[0] == '{':
+ if _magic_re.search(value):
value = '{%s}' % value
else:
value = '{%s}' % _join(value)
@@ -85,7 +85,10 @@ def _stringify(value):
elif _magic_re.search(value):
# add '\' before special characters and spaces
value = _magic_re.sub(r'\\\1', value)
+ value = value.replace('\n', r'\n')
value = _space_re.sub(r'\\\1', value)
+ if value[0] == '"':
+ value = '\\' + value
elif value[0] == '"' or _space_re.search(value):
value = '{%s}' % value
return value
@@ -586,6 +589,7 @@ class Misc:
if not func:
# I'd rather use time.sleep(ms*0.001)
self.tk.call('after', ms)
+ return None
else:
def callit():
try:
@@ -609,11 +613,13 @@ class Misc:
"""Cancel scheduling of function identified with ID.
Identifier returned by after or after_idle must be
- given as first parameter."""
+ given as first parameter.
+ """
+ if not id:
+ raise ValueError('id must be a valid identifier returned from '
+ 'after or after_idle')
try:
data = self.tk.call('after', 'info', id)
- # In Tk 8.3, splitlist returns: (script, type)
- # In Tk 8.4, splitlist may return (script, type) or (script,)
script = self.tk.splitlist(data)[0]
self.deletecommand(script)
except TclError:
@@ -844,8 +850,7 @@ class Misc:
self.tk.call('winfo', 'height', self._w))
def winfo_id(self):
"""Return identifier ID for this widget."""
- return self.tk.getint(
- self.tk.call('winfo', 'id', self._w))
+ return int(self.tk.call('winfo', 'id', self._w), 0)
def winfo_interps(self, displayof=0):
"""Return the name of all Tcl interpreters for this display."""
args = ('winfo', 'interps') + self._displayof(displayof)
@@ -855,7 +860,7 @@ class Misc:
return getint(
self.tk.call('winfo', 'ismapped', self._w))
def winfo_manager(self):
- """Return the window mananger name for this widget."""
+ """Return the window manager name for this widget."""
return self.tk.call('winfo', 'manager', self._w)
def winfo_name(self):
"""Return the name of this widget."""
@@ -1174,9 +1179,9 @@ class Misc:
elif isinstance(v, (tuple, list)):
nv = []
for item in v:
- if not isinstance(item, (basestring, int)):
+ if not isinstance(item, (basestring, int, long)):
break
- elif isinstance(item, int):
+ elif isinstance(item, (int, long)):
nv.append('%d' % item)
else:
# format it to proper Tcl code if it contains space
@@ -1522,7 +1527,7 @@ class Misc:
return self.tk.splitlist(self.tk.call('image', 'names'))
def image_types(self):
- """Return a list of all available image types (e.g. phote bitmap)."""
+ """Return a list of all available image types (e.g. photo bitmap)."""
return self.tk.splitlist(self.tk.call('image', 'types'))
@@ -2364,7 +2369,7 @@ class Canvas(Widget, XView, YView):
"""Return item which is closest to pixel at X, Y.
If several match take the top-most.
All items closer than HALO are considered overlapping (all are
- closests). If START is specified the next below this tag is taken."""
+ closest). If START is specified the next below this tag is taken."""
return self.find('closest', x, y, halo, start)
def find_enclosed(self, x1, y1, x2, y2):
"""Return all items in rectangle defined
@@ -2424,7 +2429,7 @@ class Canvas(Widget, XView, YView):
"""Print the contents of the canvas to a postscript
file. Valid options: colormap, colormode, file, fontmap,
height, pageanchor, pageheight, pagewidth, pagex, pagey,
- rotate, witdh, x, y."""
+ rotate, width, x, y."""
return self.tk.call((self._w, 'postscript') +
self._options(cnf, kw))
def tag_raise(self, *args):
@@ -3355,7 +3360,7 @@ class Image:
return getint(
self.tk.call('image', 'height', self.name))
def type(self):
- """Return the type of the imgage, e.g. "photo" or "bitmap"."""
+ """Return the type of the image, e.g. "photo" or "bitmap"."""
return self.tk.call('image', 'type', self.name)
def width(self):
"""Return the width of the image."""
@@ -3363,7 +3368,7 @@ class Image:
self.tk.call('image', 'width', self.name))
class PhotoImage(Image):
- """Widget which can display colored images in GIF, PPM/PGM format."""
+ """Widget which can display images in PGM, PPM, GIF, PNG format."""
def __init__(self, name=None, cnf={}, master=None, **kw):
"""Create an image with NAME.
@@ -3427,7 +3432,7 @@ class PhotoImage(Image):
self.tk.call(args)
class BitmapImage(Image):
- """Widget which can display a bitmap."""
+ """Widget which can display images in XBM format."""
def __init__(self, name=None, cnf={}, master=None, **kw):
"""Create a bitmap with NAME.
@@ -3577,7 +3582,7 @@ class Spinbox(Widget, XView):
select to commands. If the selection isn't currently in
the spinbox, then a new selection is created to include
the characters between index and the most recent selection
- anchor point, inclusive. Returns an empty string.
+ anchor point, inclusive.
"""
return self.selection("adjust", index)
@@ -3585,7 +3590,7 @@ class Spinbox(Widget, XView):
"""Clear the selection
If the selection isn't in this widget then the
- command has no effect. Returns an empty string.
+ command has no effect.
"""
return self.selection("clear")
@@ -3593,9 +3598,9 @@ class Spinbox(Widget, XView):
"""Sets or gets the currently selected element.
If a spinbutton element is specified, it will be
- displayed depressed
+ displayed depressed.
"""
- return self.selection("element", element)
+ return self.tk.call(self._w, 'selection', 'element', element)
###########################################################################
diff --git a/lib-python/2.7/lib-tk/test/test_tkinter/test_font.py b/lib-python/2.7/lib-tk/test/test_tkinter/test_font.py
index 4cbf82e8d8..830c5a691a 100644
--- a/lib-python/2.7/lib-tk/test/test_tkinter/test_font.py
+++ b/lib-python/2.7/lib-tk/test/test_tkinter/test_font.py
@@ -1,7 +1,7 @@
import unittest
import Tkinter as tkinter
import tkFont as font
-from test.test_support import requires, run_unittest
+from test.test_support import requires, run_unittest, gc_collect
from test_ttk.support import AbstractTkTest
requires('gui')
@@ -35,6 +35,16 @@ class FontTest(AbstractTkTest, unittest.TestCase):
self.assertIsInstance(self.font.cget(key), sizetype)
self.assertIsInstance(self.font[key], sizetype)
+ def test_unicode_family(self):
+ family = u'MS \u30b4\u30b7\u30c3\u30af'
+ try:
+ f = font.Font(root=self.root, family=family, exists=True)
+ except tkinter.TclError:
+ f = font.Font(root=self.root, family=family, exists=False)
+ self.assertEqual(f.cget('family'), family)
+ del f
+ gc_collect()
+
def test_actual(self):
options = self.font.actual()
self.assertGreaterEqual(set(options),
diff --git a/lib-python/2.7/lib-tk/test/test_tkinter/test_misc.py b/lib-python/2.7/lib-tk/test/test_tkinter/test_misc.py
new file mode 100644
index 0000000000..796269ede4
--- /dev/null
+++ b/lib-python/2.7/lib-tk/test/test_tkinter/test_misc.py
@@ -0,0 +1,122 @@
+import unittest
+import Tkinter as tkinter
+from test.test_support import requires, run_unittest
+from test_ttk.support import AbstractTkTest
+
+requires('gui')
+
+class MiscTest(AbstractTkTest, unittest.TestCase):
+
+ def test_after(self):
+ root = self.root
+ cbcount = {'count': 0}
+
+ def callback(start=0, step=1):
+ cbcount['count'] = start + step
+
+ # Without function, sleeps for ms.
+ self.assertIsNone(root.after(1))
+
+ # Set up with callback with no args.
+ cbcount['count'] = 0
+ timer1 = root.after(0, callback)
+ self.assertIn(timer1, root.tk.call('after', 'info'))
+ (script, _) = root.tk.splitlist(root.tk.call('after', 'info', timer1))
+ root.update() # Process all pending events.
+ self.assertEqual(cbcount['count'], 1)
+ with self.assertRaises(tkinter.TclError):
+ root.tk.call(script)
+
+ # Set up with callback with args.
+ cbcount['count'] = 0
+ timer1 = root.after(0, callback, 42, 11)
+ root.update() # Process all pending events.
+ self.assertEqual(cbcount['count'], 53)
+
+ # Cancel before called.
+ timer1 = root.after(1000, callback)
+ self.assertIn(timer1, root.tk.call('after', 'info'))
+ (script, _) = root.tk.splitlist(root.tk.call('after', 'info', timer1))
+ root.after_cancel(timer1) # Cancel this event.
+ self.assertEqual(cbcount['count'], 53)
+ with self.assertRaises(tkinter.TclError):
+ root.tk.call(script)
+
+ def test_after_idle(self):
+ root = self.root
+ cbcount = {'count': 0}
+
+ def callback(start=0, step=1):
+ cbcount['count'] = start + step
+
+ # Set up with callback with no args.
+ cbcount['count'] = 0
+ idle1 = root.after_idle(callback)
+ self.assertIn(idle1, root.tk.call('after', 'info'))
+ (script, _) = root.tk.splitlist(root.tk.call('after', 'info', idle1))
+ root.update_idletasks() # Process all pending events.
+ self.assertEqual(cbcount['count'], 1)
+ with self.assertRaises(tkinter.TclError):
+ root.tk.call(script)
+
+ # Set up with callback with args.
+ cbcount['count'] = 0
+ idle1 = root.after_idle(callback, 42, 11)
+ root.update_idletasks() # Process all pending events.
+ self.assertEqual(cbcount['count'], 53)
+
+ # Cancel before called.
+ idle1 = root.after_idle(callback)
+ self.assertIn(idle1, root.tk.call('after', 'info'))
+ (script, _) = root.tk.splitlist(root.tk.call('after', 'info', idle1))
+ root.after_cancel(idle1) # Cancel this event.
+ self.assertEqual(cbcount['count'], 53)
+ with self.assertRaises(tkinter.TclError):
+ root.tk.call(script)
+
+ def test_after_cancel(self):
+ root = self.root
+ cbcount = {'count': 0}
+
+ def callback():
+ cbcount['count'] += 1
+
+ timer1 = root.after(5000, callback)
+ idle1 = root.after_idle(callback)
+
+ # No value for id raises a ValueError.
+ with self.assertRaises(ValueError):
+ root.after_cancel(None)
+
+ # Cancel timer event.
+ cbcount['count'] = 0
+ (script, _) = root.tk.splitlist(root.tk.call('after', 'info', timer1))
+ root.tk.call(script)
+ self.assertEqual(cbcount['count'], 1)
+ root.after_cancel(timer1)
+ with self.assertRaises(tkinter.TclError):
+ root.tk.call(script)
+ self.assertEqual(cbcount['count'], 1)
+ with self.assertRaises(tkinter.TclError):
+ root.tk.call('after', 'info', timer1)
+
+ # Cancel same event - nothing happens.
+ root.after_cancel(timer1)
+
+ # Cancel idle event.
+ cbcount['count'] = 0
+ (script, _) = root.tk.splitlist(root.tk.call('after', 'info', idle1))
+ root.tk.call(script)
+ self.assertEqual(cbcount['count'], 1)
+ root.after_cancel(idle1)
+ with self.assertRaises(tkinter.TclError):
+ root.tk.call(script)
+ self.assertEqual(cbcount['count'], 1)
+ with self.assertRaises(tkinter.TclError):
+ root.tk.call('after', 'info', idle1)
+
+
+tests_gui = (MiscTest, )
+
+if __name__ == "__main__":
+ run_unittest(*tests_gui)
diff --git a/lib-python/2.7/lib-tk/test/test_tkinter/test_widgets.py b/lib-python/2.7/lib-tk/test/test_tkinter/test_widgets.py
index 4da309617e..4b196ac5d5 100644
--- a/lib-python/2.7/lib-tk/test/test_tkinter/test_widgets.py
+++ b/lib-python/2.7/lib-tk/test/test_tkinter/test_widgets.py
@@ -88,9 +88,10 @@ class ToplevelTest(AbstractToplevelTest, unittest.TestCase):
widget = self.create()
self.assertEqual(widget['use'], '')
parent = self.create(container=True)
- wid = parent.winfo_id()
+ # hex() adds the 'L' suffix for longs
+ wid = '%#x' % parent.winfo_id()
widget2 = self.create(use=wid)
- self.assertEqual(int(widget2['use']), wid)
+ self.assertEqual(widget2['use'], wid)
@add_standard_options(StandardOptionsTests)
@@ -470,6 +471,14 @@ class SpinboxTest(EntryTest, unittest.TestCase):
self.assertRaises(TypeError, widget.bbox)
self.assertRaises(TypeError, widget.bbox, 0, 1)
+ def test_selection_element(self):
+ widget = self.create()
+ self.assertEqual(widget.selection_element(), "none")
+ widget.selection_element("buttonup")
+ self.assertEqual(widget.selection_element(), "buttonup")
+ widget.selection_element("buttondown")
+ self.assertEqual(widget.selection_element(), "buttondown")
+
@add_standard_options(StandardOptionsTests)
class TextTest(AbstractWidgetTest, unittest.TestCase):
@@ -699,7 +708,7 @@ class ListboxTest(AbstractWidgetTest, unittest.TestCase):
'disabledforeground', 'exportselection',
'font', 'foreground', 'height',
'highlightbackground', 'highlightcolor', 'highlightthickness',
- 'listvariable', 'relief',
+ 'justify', 'listvariable', 'relief',
'selectbackground', 'selectborderwidth', 'selectforeground',
'selectmode', 'setgrid', 'state',
'takefocus', 'width', 'xscrollcommand', 'yscrollcommand',
@@ -713,6 +722,8 @@ class ListboxTest(AbstractWidgetTest, unittest.TestCase):
self.checkEnumParam(widget, 'activestyle',
'dotbox', 'none', 'underline')
+ test_justify = requires_tcl(8, 6, 5)(StandardOptionsTests.test_justify.im_func)
+
def test_listvariable(self):
widget = self.create()
var = tkinter.DoubleVar(self.root)
@@ -946,7 +957,9 @@ class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'borderwidth', 'cursor',
'handlepad', 'handlesize', 'height',
- 'opaqueresize', 'orient', 'relief',
+ 'opaqueresize', 'orient',
+ 'proxybackground', 'proxyborderwidth', 'proxyrelief',
+ 'relief',
'sashcursor', 'sashpad', 'sashrelief', 'sashwidth',
'showhandle', 'width',
)
@@ -973,6 +986,23 @@ class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
widget = self.create()
self.checkBooleanParam(widget, 'opaqueresize')
+ @requires_tcl(8, 6, 5)
+ def test_proxybackground(self):
+ widget = self.create()
+ self.checkColorParam(widget, 'proxybackground')
+
+ @requires_tcl(8, 6, 5)
+ def test_proxyborderwidth(self):
+ widget = self.create()
+ self.checkPixelsParam(widget, 'proxyborderwidth',
+ 0, 1.3, 2.9, 6, -2, '10p',
+ conv=noconv)
+
+ @requires_tcl(8, 6, 5)
+ def test_proxyrelief(self):
+ widget = self.create()
+ self.checkReliefParam(widget, 'proxyrelief')
+
def test_sashcursor(self):
widget = self.create()
self.checkCursorParam(widget, 'sashcursor')
diff --git a/lib-python/2.7/lib-tk/test/test_ttk/support.py b/lib-python/2.7/lib-tk/test/test_ttk/support.py
index c4d842aac1..a86e0ea851 100644
--- a/lib-python/2.7/lib-tk/test/test_ttk/support.py
+++ b/lib-python/2.7/lib-tk/test/test_ttk/support.py
@@ -1,3 +1,4 @@
+import functools
import re
import unittest
import Tkinter as tkinter
@@ -54,9 +55,20 @@ import _tkinter
tcl_version = tuple(map(int, _tkinter.TCL_VERSION.split('.')))
def requires_tcl(*version):
- return unittest.skipUnless(tcl_version >= version,
+ if len(version) <= 2:
+ return unittest.skipUnless(tcl_version >= version,
'requires Tcl version >= ' + '.'.join(map(str, version)))
+ def deco(test):
+ @functools.wraps(test)
+ def newtest(self):
+ if get_tk_patchlevel() < version:
+ self.skipTest('requires Tcl version >= ' +
+ '.'.join(map(str, version)))
+ test(self)
+ return newtest
+ return deco
+
_tk_patchlevel = None
def get_tk_patchlevel():
global _tk_patchlevel
diff --git a/lib-python/2.7/lib-tk/test/test_ttk/test_extensions.py b/lib-python/2.7/lib-tk/test/test_ttk/test_extensions.py
index 65a04e0a4a..c8879bfb46 100644
--- a/lib-python/2.7/lib-tk/test/test_ttk/test_extensions.py
+++ b/lib-python/2.7/lib-tk/test/test_ttk/test_extensions.py
@@ -289,6 +289,31 @@ class OptionMenuTest(AbstractTkTest, unittest.TestCase):
optmenu.destroy()
+ def test_unique_radiobuttons(self):
+ # check that radiobuttons are unique across instances (bpo25684)
+ items = ('a', 'b', 'c')
+ default = 'a'
+ optmenu = ttk.OptionMenu(self.root, self.textvar, default, *items)
+ textvar2 = tkinter.StringVar(self.root)
+ optmenu2 = ttk.OptionMenu(self.root, textvar2, default, *items)
+ optmenu.pack()
+ optmenu.wait_visibility()
+ optmenu2.pack()
+ optmenu2.wait_visibility()
+ optmenu['menu'].invoke(1)
+ optmenu2['menu'].invoke(2)
+ optmenu_stringvar_name = optmenu['menu'].entrycget(0, 'variable')
+ optmenu2_stringvar_name = optmenu2['menu'].entrycget(0, 'variable')
+ self.assertNotEqual(optmenu_stringvar_name,
+ optmenu2_stringvar_name)
+ self.assertEqual(self.root.tk.globalgetvar(optmenu_stringvar_name),
+ items[1])
+ self.assertEqual(self.root.tk.globalgetvar(optmenu2_stringvar_name),
+ items[2])
+
+ optmenu.destroy()
+ optmenu2.destroy()
+
tests_gui = (LabeledScaleTest, OptionMenuTest)
diff --git a/lib-python/2.7/lib-tk/test/test_ttk/test_widgets.py b/lib-python/2.7/lib-tk/test/test_ttk/test_widgets.py
index b9cf304795..ac8ba0757a 100644
--- a/lib-python/2.7/lib-tk/test/test_ttk/test_widgets.py
+++ b/lib-python/2.7/lib-tk/test/test_ttk/test_widgets.py
@@ -332,7 +332,12 @@ class EntryTest(AbstractWidgetTest, unittest.TestCase):
self.entry.wait_visibility()
self.entry.update_idletasks()
- self.assertEqual(self.entry.identify(5, 5), "textarea")
+ # bpo-27313: macOS Cocoa widget differs from X, allow either
+ if sys.platform == 'darwin':
+ self.assertIn(self.entry.identify(5, 5),
+ ("textarea", "Combobox.button") )
+ else:
+ self.assertEqual(self.entry.identify(5, 5), "textarea")
self.assertEqual(self.entry.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.entry.identify, None, 5)
@@ -1487,6 +1492,15 @@ class TreeviewTest(AbstractWidgetTest, unittest.TestCase):
self.tv.insert('', 'end', text=value), text=None),
value)
+ # test for values which are not None
+ itemid = self.tv.insert('', 'end', 0)
+ self.assertEqual(itemid, '0')
+ itemid = self.tv.insert('', 'end', 0.0)
+ self.assertEqual(itemid, '0.0')
+ # this is because False resolves to 0 and element with 0 iid is already present
+ self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end', False)
+ self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end', '')
+
def test_selection(self):
# item 'none' doesn't exist
@@ -1668,9 +1682,5 @@ tests_gui = (
SizegripTest, TreeviewTest, WidgetTest,
)
-tests_gui = (
- TreeviewTest,
- )
-
if __name__ == "__main__":
run_unittest(*tests_gui)
diff --git a/lib-python/2.7/lib-tk/tkFont.py b/lib-python/2.7/lib-tk/tkFont.py
index 113c983b01..b245623e30 100644
--- a/lib-python/2.7/lib-tk/tkFont.py
+++ b/lib-python/2.7/lib-tk/tkFont.py
@@ -47,8 +47,10 @@ class Font:
def _set(self, kw):
options = []
for k, v in kw.items():
+ if not isinstance(v, basestring):
+ v = str(v)
options.append("-"+k)
- options.append(str(v))
+ options.append(v)
return tuple(options)
def _get(self, args):
diff --git a/lib-python/2.7/lib-tk/ttk.py b/lib-python/2.7/lib-tk/ttk.py
index 11254391c0..d4df408e47 100644
--- a/lib-python/2.7/lib-tk/ttk.py
+++ b/lib-python/2.7/lib-tk/ttk.py
@@ -1332,7 +1332,7 @@ class Treeview(Widget, Tkinter.XView, Tkinter.YView):
already exist in the tree. Otherwise, a new unique identifier
is generated."""
opts = _format_optdict(kw)
- if iid:
+ if iid is not None:
res = self.tk.call(self._w, "insert", parent, index,
"-id", iid, *opts)
else:
@@ -1521,7 +1521,9 @@ class LabeledScale(Frame, object):
pass
else:
del self._variable
- Frame.destroy(self)
+ Frame.destroy(self)
+ self.label = None
+ self.scale = None
def _adjust(self, *args):
@@ -1612,7 +1614,8 @@ class OptionMenu(Menubutton):
menu.delete(0, 'end')
for val in values:
menu.add_radiobutton(label=val,
- command=Tkinter._setit(self._variable, val, self._callback))
+ command=Tkinter._setit(self._variable, val, self._callback),
+ variable=self._variable)
if default:
self._variable.set(default)
@@ -1620,5 +1623,8 @@ class OptionMenu(Menubutton):
def destroy(self):
"""Destroy this widget and its associated variable."""
- del self._variable
+ try:
+ del self._variable
+ except AttributeError:
+ pass
Menubutton.destroy(self)
diff --git a/lib-python/2.7/lib-tk/turtle.py b/lib-python/2.7/lib-tk/turtle.py
index 264318effc..ae921ce2e5 100644
--- a/lib-python/2.7/lib-tk/turtle.py
+++ b/lib-python/2.7/lib-tk/turtle.py
@@ -276,7 +276,7 @@ class Vec2D(tuple):
return self[0]*other[0]+self[1]*other[1]
return Vec2D(self[0]*other, self[1]*other)
def __rmul__(self, other):
- if isinstance(other, int) or isinstance(other, float):
+ if isinstance(other, (int, long, float)):
return Vec2D(self[0]*other, self[1]*other)
def __sub__(self, other):
return Vec2D(self[0]-other[0], self[1]-other[1])
@@ -1300,7 +1300,7 @@ class TurtleScreen(TurtleScreenBase):
Arguments:
fun -- a function with two arguments, the coordinates of the
clicked point on the canvas.
- num -- the number of the mouse-button, defaults to 1
+ btn -- the number of the mouse-button, defaults to 1
Example (for a TurtleScreen instance named screen
and a Turtle instance named turtle):
@@ -2352,7 +2352,7 @@ class TPen(object):
self._resizemode = p["resizemode"]
if "stretchfactor" in p:
sf = p["stretchfactor"]
- if isinstance(sf, (int, float)):
+ if isinstance(sf, (int, long, float)):
sf = (sf, sf)
self._stretchfactor = sf
if "outline" in p:
@@ -3418,7 +3418,7 @@ class RawTurtle(TPen, TNavigator):
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
- num -- number of the mouse-button defaults to 1 (left mouse button).
+ btn -- number of the mouse-button defaults to 1 (left mouse button).
add -- True or False. If True, new binding will be added, otherwise
it will replace a former binding.
@@ -3439,7 +3439,7 @@ class RawTurtle(TPen, TNavigator):
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
- num -- number of the mouse-button defaults to 1 (left mouse button).
+ btn -- number of the mouse-button defaults to 1 (left mouse button).
Example (for a MyTurtle instance named joe):
>>> class MyTurtle(Turtle):
@@ -3464,7 +3464,7 @@ class RawTurtle(TPen, TNavigator):
Arguments:
fun -- a function with two arguments, to which will be assigned
the coordinates of the clicked point on the canvas.
- num -- number of the mouse-button defaults to 1 (left mouse button).
+ btn -- number of the mouse-button defaults to 1 (left mouse button).
Every sequence of mouse-move-events on a turtle is preceded by a
mouse-click event on that turtle.
diff --git a/lib-python/2.7/lib2to3/fixer_base.py b/lib-python/2.7/lib2to3/fixer_base.py
index d437b96a29..2f50ad3669 100644
--- a/lib-python/2.7/lib2to3/fixer_base.py
+++ b/lib-python/2.7/lib2to3/fixer_base.py
@@ -4,7 +4,6 @@
"""Base class for fixers (optional, but recommended)."""
# Python imports
-import logging
import itertools
# Local imports
@@ -75,7 +74,6 @@ class BaseFix(object):
The main refactoring tool should call this.
"""
self.filename = filename
- self.logger = logging.getLogger(filename)
def match(self, node):
"""Returns match for a given parse tree node.
diff --git a/lib-python/2.7/lib2to3/fixes/fix_execfile.py b/lib-python/2.7/lib2to3/fixes/fix_execfile.py
index 2f29d3b281..786268bb9d 100644
--- a/lib-python/2.7/lib2to3/fixes/fix_execfile.py
+++ b/lib-python/2.7/lib2to3/fixes/fix_execfile.py
@@ -31,7 +31,8 @@ class FixExecfile(fixer_base.BaseFix):
# call.
execfile_paren = node.children[-1].children[-1].clone()
# Construct open().read().
- open_args = ArgList([filename.clone()], rparen=execfile_paren)
+ open_args = ArgList([filename.clone(), Comma(), String('"rb"', ' ')],
+ rparen=execfile_paren)
open_call = Node(syms.power, [Name(u"open"), open_args])
read = [Node(syms.trailer, [Dot(), Name(u'read')]),
Node(syms.trailer, [LParen(), RParen()])]
diff --git a/lib-python/2.7/lib2to3/patcomp.py b/lib-python/2.7/lib2to3/patcomp.py
index d31a9dad9a..49ed6680b6 100644
--- a/lib-python/2.7/lib2to3/patcomp.py
+++ b/lib-python/2.7/lib2to3/patcomp.py
@@ -11,7 +11,6 @@ The compiler compiles a pattern to a pytree.*Pattern instance.
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
-import os
import StringIO
# Fairly local imports
@@ -21,10 +20,6 @@ from .pgen2 import driver, literals, token, tokenize, parse, grammar
from . import pytree
from . import pygram
-# The pattern grammar file
-_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
- "PatternGrammar.txt")
-
class PatternSyntaxError(Exception):
pass
@@ -42,13 +37,17 @@ def tokenize_wrapper(input):
class PatternCompiler(object):
- def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE):
+ def __init__(self, grammar_file=None):
"""Initializer.
Takes an optional alternative filename for the pattern grammar.
"""
- self.grammar = driver.load_grammar(grammar_file)
- self.syms = pygram.Symbols(self.grammar)
+ if grammar_file is None:
+ self.grammar = pygram.pattern_grammar
+ self.syms = pygram.pattern_symbols
+ else:
+ self.grammar = driver.load_grammar(grammar_file)
+ self.syms = pygram.Symbols(self.grammar)
self.pygrammar = pygram.python_grammar
self.pysyms = pygram.python_symbols
self.driver = driver.Driver(self.grammar, convert=pattern_convert)
diff --git a/lib-python/2.7/lib2to3/pgen2/driver.py b/lib-python/2.7/lib2to3/pgen2/driver.py
index ce601bb04f..a5133309c6 100644
--- a/lib-python/2.7/lib2to3/pgen2/driver.py
+++ b/lib-python/2.7/lib2to3/pgen2/driver.py
@@ -19,6 +19,7 @@ __all__ = ["Driver", "load_grammar"]
import codecs
import os
import logging
+import pkgutil
import StringIO
import sys
@@ -143,6 +144,26 @@ def _newer(a, b):
return os.path.getmtime(a) >= os.path.getmtime(b)
+def load_packaged_grammar(package, grammar_source):
+ """Normally, loads a pickled grammar by doing
+ pkgutil.get_data(package, pickled_grammar)
+ where *pickled_grammar* is computed from *grammar_source* by adding the
+ Python version and using a ``.pickle`` extension.
+
+ However, if *grammar_source* is an extant file, load_grammar(grammar_source)
+ is called instead. This facilitates using a packaged grammar file when needed
+ but preserves load_grammar's automatic regeneration behavior when possible.
+
+ """
+ if os.path.isfile(grammar_source):
+ return load_grammar(grammar_source)
+ pickled_name = _generate_pickle_name(os.path.basename(grammar_source))
+ data = pkgutil.get_data(package, pickled_name)
+ g = grammar.Grammar()
+ g.loads(data)
+ return g
+
+
def main(*args):
"""Main program, when run as a script: produce grammar pickle files.
diff --git a/lib-python/2.7/lib2to3/pgen2/grammar.py b/lib-python/2.7/lib2to3/pgen2/grammar.py
index 75255e9c01..0b6d86b679 100644
--- a/lib-python/2.7/lib2to3/pgen2/grammar.py
+++ b/lib-python/2.7/lib2to3/pgen2/grammar.py
@@ -109,6 +109,10 @@ class Grammar(object):
f.close()
self.__dict__.update(d)
+ def loads(self, pkl):
+ """Load the grammar tables from a pickle bytes object."""
+ self.__dict__.update(pickle.loads(pkl))
+
def copy(self):
"""
Copy the grammar.
diff --git a/lib-python/2.7/lib2to3/pgen2/pgen.py b/lib-python/2.7/lib2to3/pgen2/pgen.py
index ed16992a20..be4fcad65c 100644
--- a/lib-python/2.7/lib2to3/pgen2/pgen.py
+++ b/lib-python/2.7/lib2to3/pgen2/pgen.py
@@ -74,7 +74,7 @@ class ParserGenerator(object):
else:
# A named token (NAME, NUMBER, STRING)
itoken = getattr(token, label, None)
- assert isinstance(itoken, int), label
+ assert isinstance(itoken, (int, long)), label
assert itoken in token.tok_name, label
if itoken in c.tokens:
return c.tokens[itoken]
diff --git a/lib-python/2.7/lib2to3/pygram.py b/lib-python/2.7/lib2to3/pygram.py
index 621ff24c95..7e67e4a867 100644
--- a/lib-python/2.7/lib2to3/pygram.py
+++ b/lib-python/2.7/lib2to3/pygram.py
@@ -29,12 +29,12 @@ class Symbols(object):
setattr(self, name, symbol)
-python_grammar = driver.load_grammar(_GRAMMAR_FILE)
+python_grammar = driver.load_packaged_grammar("lib2to3", _GRAMMAR_FILE)
python_symbols = Symbols(python_grammar)
python_grammar_no_print_statement = python_grammar.copy()
del python_grammar_no_print_statement.keywords["print"]
-pattern_grammar = driver.load_grammar(_PATTERN_GRAMMAR_FILE)
+pattern_grammar = driver.load_packaged_grammar("lib2to3", _PATTERN_GRAMMAR_FILE)
pattern_symbols = Symbols(pattern_grammar)
diff --git a/lib-python/2.7/lib2to3/refactor.py b/lib-python/2.7/lib2to3/refactor.py
index 98386c5f31..8a40deb8ac 100644
--- a/lib-python/2.7/lib2to3/refactor.py
+++ b/lib-python/2.7/lib2to3/refactor.py
@@ -15,6 +15,7 @@ __author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
+import pkgutil
import sys
import logging
import operator
@@ -33,13 +34,12 @@ from . import btm_matcher as bm
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
- fixer_dir = os.path.dirname(pkg.__file__)
fix_names = []
- for name in sorted(os.listdir(fixer_dir)):
- if name.startswith("fix_") and name.endswith(".py"):
+ for finder, name, ispkg in pkgutil.iter_modules(pkg.__path__):
+ if name.startswith("fix_"):
if remove_prefix:
name = name[4:]
- fix_names.append(name[:-3])
+ fix_names.append(name)
return fix_names
diff --git a/lib-python/2.7/lib2to3/tests/test_fixers.py b/lib-python/2.7/lib2to3/tests/test_fixers.py
index b0e60fe196..c7d5ff93e8 100644
--- a/lib-python/2.7/lib2to3/tests/test_fixers.py
+++ b/lib-python/2.7/lib2to3/tests/test_fixers.py
@@ -1143,36 +1143,36 @@ class Test_execfile(FixerTestCase):
def test_conversion(self):
b = """execfile("fn")"""
- a = """exec(compile(open("fn").read(), "fn", 'exec'))"""
+ a = """exec(compile(open("fn", "rb").read(), "fn", 'exec'))"""
self.check(b, a)
b = """execfile("fn", glob)"""
- a = """exec(compile(open("fn").read(), "fn", 'exec'), glob)"""
+ a = """exec(compile(open("fn", "rb").read(), "fn", 'exec'), glob)"""
self.check(b, a)
b = """execfile("fn", glob, loc)"""
- a = """exec(compile(open("fn").read(), "fn", 'exec'), glob, loc)"""
+ a = """exec(compile(open("fn", "rb").read(), "fn", 'exec'), glob, loc)"""
self.check(b, a)
b = """execfile("fn", globals=glob)"""
- a = """exec(compile(open("fn").read(), "fn", 'exec'), globals=glob)"""
+ a = """exec(compile(open("fn", "rb").read(), "fn", 'exec'), globals=glob)"""
self.check(b, a)
b = """execfile("fn", locals=loc)"""
- a = """exec(compile(open("fn").read(), "fn", 'exec'), locals=loc)"""
+ a = """exec(compile(open("fn", "rb").read(), "fn", 'exec'), locals=loc)"""
self.check(b, a)
b = """execfile("fn", globals=glob, locals=loc)"""
- a = """exec(compile(open("fn").read(), "fn", 'exec'), globals=glob, locals=loc)"""
+ a = """exec(compile(open("fn", "rb").read(), "fn", 'exec'), globals=glob, locals=loc)"""
self.check(b, a)
def test_spacing(self):
b = """execfile( "fn" )"""
- a = """exec(compile(open( "fn" ).read(), "fn", 'exec'))"""
+ a = """exec(compile(open( "fn", "rb" ).read(), "fn", 'exec'))"""
self.check(b, a)
b = """execfile("fn", globals = glob)"""
- a = """exec(compile(open("fn").read(), "fn", 'exec'), globals = glob)"""
+ a = """exec(compile(open("fn", "rb").read(), "fn", 'exec'), globals = glob)"""
self.check(b, a)
diff --git a/lib-python/2.7/lib2to3/tests/test_parser.py b/lib-python/2.7/lib2to3/tests/test_parser.py
index ebf84418fe..d2254f13e0 100644
--- a/lib-python/2.7/lib2to3/tests/test_parser.py
+++ b/lib-python/2.7/lib2to3/tests/test_parser.py
@@ -11,11 +11,14 @@ from . import support
from .support import driver, test_dir
# Python imports
+import operator
import os
+import pickle
import shutil
import subprocess
import sys
import tempfile
+import types
import unittest
# Local imports
@@ -97,6 +100,18 @@ pgen2_driver.load_grammar(%r, save=True, force=True)
finally:
shutil.rmtree(tmpdir)
+ def test_load_packaged_grammar(self):
+ modname = __name__ + '.load_test'
+ class MyLoader:
+ def get_data(self, where):
+ return pickle.dumps({'elephant': 19})
+ class MyModule(types.ModuleType):
+ __file__ = 'parsertestmodule'
+ __loader__ = MyLoader()
+ sys.modules[modname] = MyModule(modname)
+ self.addCleanup(operator.delitem, sys.modules, modname)
+ g = pgen2_driver.load_packaged_grammar(modname, 'Grammar.txt')
+ self.assertEqual(g.elephant, 19)
class GrammarTest(support.TestCase):
diff --git a/lib-python/2.7/locale.py b/lib-python/2.7/locale.py
index 5aab163e5a..51909f81e7 100644
--- a/lib-python/2.7/locale.py
+++ b/lib-python/2.7/locale.py
@@ -618,9 +618,18 @@ else:
pass
result = nl_langinfo(CODESET)
setlocale(LC_CTYPE, oldloc)
- return result
else:
- return nl_langinfo(CODESET)
+ result = nl_langinfo(CODESET)
+
+ if not result and sys.platform == 'darwin':
+ # nl_langinfo can return an empty string
+ # when the setting has an invalid value.
+ # Default to UTF-8 in that case because
+ # UTF-8 is the default charset on OSX and
+ # returning nothing will crash the
+ # interpreter.
+ result = 'UTF-8'
+ return result
### Database
@@ -789,6 +798,17 @@ locale_encoding_alias = {
#
# SS 2014-10-01:
# Updated alias mapping with glibc 2.19 supported locales.
+#
+# SS 2018-05-05:
+# Updated alias mapping with glibc 2.27 supported locales.
+#
+# These are the differences compared to the old mapping (Python 2.7.15
+# and older):
+#
+# updated 'ca_es@valencia' -> 'ca_ES.ISO8859-15@valencia' to 'ca_ES.UTF-8@valencia'
+# updated 'english.iso88591' -> 'en_EN.ISO8859-1' to 'en_US.ISO8859-1'
+# updated 'kk_kz' -> 'kk_KZ.RK1048' to 'kk_KZ.ptcp154'
+# updated 'russian' -> 'ru_RU.ISO8859-5' to 'ru_RU.KOI8-R'
locale_alias = {
'a3': 'az_AZ.KOI8-C',
@@ -801,11 +821,14 @@ locale_alias = {
'af': 'af_ZA.ISO8859-1',
'af_za': 'af_ZA.ISO8859-1',
'af_za.iso88591': 'af_ZA.ISO8859-1',
+ 'agr_pe': 'agr_PE.UTF-8',
+ 'ak_gh': 'ak_GH.UTF-8',
'am': 'am_ET.UTF-8',
'am_et': 'am_ET.UTF-8',
'american': 'en_US.ISO8859-1',
'american.iso88591': 'en_US.ISO8859-1',
'an_es': 'an_ES.ISO8859-15',
+ 'anp_in': 'anp_IN.UTF-8',
'ar': 'ar_AA.ISO8859-6',
'ar_aa': 'ar_AA.ISO8859-6',
'ar_aa.iso88596': 'ar_AA.ISO8859-6',
@@ -838,6 +861,7 @@ locale_alias = {
'ar_sa.iso88596': 'ar_SA.ISO8859-6',
'ar_sd': 'ar_SD.ISO8859-6',
'ar_sd.iso88596': 'ar_SD.ISO8859-6',
+ 'ar_ss': 'ar_SS.UTF-8',
'ar_sy': 'ar_SY.ISO8859-6',
'ar_sy.iso88596': 'ar_SY.ISO8859-6',
'ar_tn': 'ar_TN.ISO8859-6',
@@ -853,6 +877,7 @@ locale_alias = {
'az': 'az_AZ.ISO8859-9E',
'az_az': 'az_AZ.ISO8859-9E',
'az_az.iso88599e': 'az_AZ.ISO8859-9E',
+ 'az_ir': 'az_IR.UTF-8',
'be': 'be_BY.CP1251',
'be@latin': 'be_BY.UTF-8@latin',
'be_bg.utf8': 'bg_BG.UTF-8',
@@ -870,7 +895,10 @@ locale_alias = {
'bg_bg.iso88595': 'bg_BG.ISO8859-5',
'bg_bg.koi8r': 'bg_BG.KOI8-R',
'bg_bg.microsoftcp1251': 'bg_BG.CP1251',
+ 'bhb_in.utf8': 'bhb_IN.UTF-8',
'bho_in': 'bho_IN.UTF-8',
+ 'bho_np': 'bho_NP.UTF-8',
+ 'bi_vu': 'bi_VU.UTF-8',
'bn_bd': 'bn_BD.UTF-8',
'bn_in': 'bn_IN.UTF-8',
'bo_cn': 'bo_CN.UTF-8',
@@ -912,8 +940,8 @@ locale_alias = {
'ca_es.iso885915': 'ca_ES.ISO8859-15',
'ca_es.iso885915@euro': 'ca_ES.ISO8859-15',
'ca_es.utf8@euro': 'ca_ES.UTF-8',
- 'ca_es@valencia': 'ca_ES.ISO8859-15@valencia',
'ca_es@euro': 'ca_ES.ISO8859-15',
+ 'ca_es@valencia': 'ca_ES.UTF-8@valencia',
'ca_fr': 'ca_FR.ISO8859-1',
'ca_fr.iso88591': 'ca_FR.ISO8859-1',
'ca_fr.iso885915': 'ca_FR.ISO8859-15',
@@ -927,10 +955,14 @@ locale_alias = {
'ca_it.utf8@euro': 'ca_IT.UTF-8',
'ca_it@euro': 'ca_IT.ISO8859-15',
'catalan': 'ca_ES.ISO8859-1',
+ 'ce_ru': 'ce_RU.UTF-8',
'cextend': 'en_US.ISO8859-1',
'cextend.en': 'en_US.ISO8859-1',
'chinese-s': 'zh_CN.eucCN',
'chinese-t': 'zh_TW.eucTW',
+ 'chr_us': 'chr_US.UTF-8',
+ 'ckb_iq': 'ckb_IQ.UTF-8',
+ 'cmn_tw': 'cmn_TW.UTF-8',
'crh_ua': 'crh_UA.UTF-8',
'croatian': 'hr_HR.ISO8859-2',
'cs': 'cs_CZ.ISO8859-2',
@@ -987,6 +1019,7 @@ locale_alias = {
'de_de.iso885915@euro': 'de_DE.ISO8859-15',
'de_de.utf8@euro': 'de_DE.UTF-8',
'de_de@euro': 'de_DE.ISO8859-15',
+ 'de_it': 'de_IT.ISO8859-1',
'de_li.utf8': 'de_LI.UTF-8',
'de_lu': 'de_LU.ISO8859-1',
'de_lu.iso88591': 'de_LU.ISO8859-1',
@@ -1021,6 +1054,8 @@ locale_alias = {
'en_ca': 'en_CA.ISO8859-1',
'en_ca.iso88591': 'en_CA.ISO8859-1',
'en_dk': 'en_DK.ISO8859-1',
+ 'en_dk.iso88591': 'en_DK.ISO8859-1',
+ 'en_dk.iso885915': 'en_DK.ISO8859-15',
'en_dl.utf8': 'en_DL.UTF-8',
'en_gb': 'en_GB.ISO8859-1',
'en_gb.88591': 'en_GB.ISO8859-1',
@@ -1035,12 +1070,14 @@ locale_alias = {
'en_ie.iso885915@euro': 'en_IE.ISO8859-15',
'en_ie.utf8@euro': 'en_IE.UTF-8',
'en_ie@euro': 'en_IE.ISO8859-15',
+ 'en_il': 'en_IL.UTF-8',
'en_in': 'en_IN.ISO8859-1',
'en_ng': 'en_NG.UTF-8',
'en_nz': 'en_NZ.ISO8859-1',
'en_nz.iso88591': 'en_NZ.ISO8859-1',
'en_ph': 'en_PH.ISO8859-1',
'en_ph.iso88591': 'en_PH.ISO8859-1',
+ 'en_sc.utf8': 'en_SC.UTF-8',
'en_sg': 'en_SG.ISO8859-1',
'en_sg.iso88591': 'en_SG.ISO8859-1',
'en_uk': 'en_GB.ISO8859-1',
@@ -1064,7 +1101,7 @@ locale_alias = {
'eng_gb': 'en_GB.ISO8859-1',
'eng_gb.8859': 'en_GB.ISO8859-1',
'english': 'en_EN.ISO8859-1',
- 'english.iso88591': 'en_EN.ISO8859-1',
+ 'english.iso88591': 'en_US.ISO8859-1',
'english_uk': 'en_GB.ISO8859-1',
'english_uk.8859': 'en_GB.ISO8859-1',
'english_united-states': 'en_US.ISO8859-1',
@@ -1259,6 +1296,7 @@ locale_alias = {
'gv_gb.iso885915': 'gv_GB.ISO8859-15',
'gv_gb@euro': 'gv_GB.ISO8859-15',
'ha_ng': 'ha_NG.UTF-8',
+ 'hak_tw': 'hak_TW.UTF-8',
'he': 'he_IL.ISO8859-8',
'he_il': 'he_IL.ISO8859-8',
'he_il.cp1255': 'he_IL.CP1255',
@@ -1269,6 +1307,7 @@ locale_alias = {
'hi': 'hi_IN.ISCII-DEV',
'hi_in': 'hi_IN.ISCII-DEV',
'hi_in.isciidev': 'hi_IN.ISCII-DEV',
+ 'hif_fj': 'hif_FJ.UTF-8',
'hne': 'hne_IN.UTF-8',
'hne_in': 'hne_IN.UTF-8',
'hr': 'hr_HR.ISO8859-2',
@@ -1352,7 +1391,8 @@ locale_alias = {
'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS',
'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY',
- 'kk_kz': 'kk_KZ.RK1048',
+ 'kab_dz': 'kab_DZ.UTF-8',
+ 'kk_kz': 'kk_KZ.ptcp154',
'kl': 'kl_GL.ISO8859-1',
'kl_gl': 'kl_GL.ISO8859-1',
'kl_gl.iso88591': 'kl_GL.ISO8859-1',
@@ -1370,6 +1410,7 @@ locale_alias = {
'korean.euc': 'ko_KR.eucKR',
'ks': 'ks_IN.UTF-8',
'ks_in': 'ks_IN.UTF-8',
+ 'ks_in.utf8@devanagari': 'ks_IN.UTF-8@devanagari',
'ks_in@devanagari': 'ks_IN.UTF-8@devanagari',
'ks_in@devanagari.utf8': 'ks_IN.UTF-8@devanagari',
'ku_tr': 'ku_TR.ISO8859-9',
@@ -1387,6 +1428,7 @@ locale_alias = {
'li_nl': 'li_NL.UTF-8',
'lij_it': 'lij_IT.UTF-8',
'lithuanian': 'lt_LT.ISO8859-13',
+ 'ln_cd': 'ln_CD.UTF-8',
'lo': 'lo_LA.MULELAO-1',
'lo_la': 'lo_LA.MULELAO-1',
'lo_la.cp1133': 'lo_LA.IBM-CP1133',
@@ -1400,14 +1442,19 @@ locale_alias = {
'lv_lv': 'lv_LV.ISO8859-13',
'lv_lv.iso885913': 'lv_LV.ISO8859-13',
'lv_lv.iso88594': 'lv_LV.ISO8859-4',
+ 'lzh_tw': 'lzh_TW.UTF-8',
'mag_in': 'mag_IN.UTF-8',
'mai': 'mai_IN.UTF-8',
'mai_in': 'mai_IN.UTF-8',
+ 'mai_np': 'mai_NP.UTF-8',
+ 'mfe_mu': 'mfe_MU.UTF-8',
'mg_mg': 'mg_MG.ISO8859-15',
'mhr_ru': 'mhr_RU.UTF-8',
'mi': 'mi_NZ.ISO8859-1',
'mi_nz': 'mi_NZ.ISO8859-1',
'mi_nz.iso88591': 'mi_NZ.ISO8859-1',
+ 'miq_ni': 'miq_NI.UTF-8',
+ 'mjw_in': 'mjw_IN.UTF-8',
'mk': 'mk_MK.ISO8859-5',
'mk_mk': 'mk_MK.ISO8859-5',
'mk_mk.cp1251': 'mk_MK.CP1251',
@@ -1426,7 +1473,7 @@ locale_alias = {
'mt_mt': 'mt_MT.ISO8859-3',
'mt_mt.iso88593': 'mt_MT.ISO8859-3',
'my_mm': 'my_MM.UTF-8',
- 'nan_tw@latin': 'nan_TW.UTF-8@latin',
+ 'nan_tw': 'nan_TW.UTF-8',
'nb': 'nb_NO.ISO8859-1',
'nb_no': 'nb_NO.ISO8859-1',
'nb_no.88591': 'nb_NO.ISO8859-1',
@@ -1500,6 +1547,8 @@ locale_alias = {
'pa_in': 'pa_IN.UTF-8',
'pa_pk': 'pa_PK.UTF-8',
'pap_an': 'pap_AN.UTF-8',
+ 'pap_aw': 'pap_AW.UTF-8',
+ 'pap_cw': 'pap_CW.UTF-8',
'pd': 'pd_US.ISO8859-1',
'pd_de': 'pd_DE.ISO8859-1',
'pd_de.iso88591': 'pd_DE.ISO8859-1',
@@ -1540,6 +1589,8 @@ locale_alias = {
'pt_pt.iso885915@euro': 'pt_PT.ISO8859-15',
'pt_pt.utf8@euro': 'pt_PT.UTF-8',
'pt_pt@euro': 'pt_PT.ISO8859-15',
+ 'quz_pe': 'quz_PE.UTF-8',
+ 'raj_in': 'raj_IN.UTF-8',
'ro': 'ro_RO.ISO8859-2',
'ro_ro': 'ro_RO.ISO8859-2',
'ro_ro.iso88592': 'ro_RO.ISO8859-2',
@@ -1556,7 +1607,7 @@ locale_alias = {
'ru_ua.koi8u': 'ru_UA.KOI8-U',
'ru_ua.microsoftcp1251': 'ru_UA.CP1251',
'rumanian': 'ro_RO.ISO8859-2',
- 'russian': 'ru_RU.ISO8859-5',
+ 'russian': 'ru_RU.KOI8-R',
'rw': 'rw_RW.ISO8859-1',
'rw_rw': 'rw_RW.ISO8859-1',
'rw_rw.iso88591': 'rw_RW.ISO8859-1',
@@ -1566,17 +1617,20 @@ locale_alias = {
'sd': 'sd_IN.UTF-8',
'sd@devanagari': 'sd_IN.UTF-8@devanagari',
'sd_in': 'sd_IN.UTF-8',
+ 'sd_in.utf8@devanagari': 'sd_IN.UTF-8@devanagari',
'sd_in@devanagari': 'sd_IN.UTF-8@devanagari',
'sd_in@devanagari.utf8': 'sd_IN.UTF-8@devanagari',
'sd_pk': 'sd_PK.UTF-8',
'se_no': 'se_NO.UTF-8',
'serbocroatian': 'sr_RS.UTF-8@latin',
+ 'sgs_lt': 'sgs_LT.UTF-8',
'sh': 'sr_RS.UTF-8@latin',
'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2',
'sh_hr': 'sh_HR.ISO8859-2',
'sh_hr.iso88592': 'hr_HR.ISO8859-2',
'sh_sp': 'sr_CS.ISO8859-2',
'sh_yu': 'sr_RS.UTF-8@latin',
+ 'shn_mm': 'shn_MM.UTF-8',
'shs_ca': 'shs_CA.UTF-8',
'si': 'si_LK.UTF-8',
'si_lk': 'si_LK.UTF-8',
@@ -1592,6 +1646,7 @@ locale_alias = {
'slovak': 'sk_SK.ISO8859-2',
'slovene': 'sl_SI.ISO8859-2',
'slovenian': 'sl_SI.ISO8859-2',
+ 'sm_ws': 'sm_WS.UTF-8',
'so_dj': 'so_DJ.ISO8859-1',
'so_et': 'so_ET.UTF-8',
'so_ke': 'so_KE.ISO8859-1',
@@ -1618,6 +1673,7 @@ locale_alias = {
'sr_cs@latn': 'sr_CS.UTF-8@latin',
'sr_me': 'sr_ME.UTF-8',
'sr_rs': 'sr_RS.UTF-8',
+ 'sr_rs.utf8@latn': 'sr_RS.UTF-8@latin',
'sr_rs@latin': 'sr_RS.UTF-8@latin',
'sr_rs@latn': 'sr_RS.UTF-8@latin',
'sr_sp': 'sr_CS.ISO8859-2',
@@ -1659,6 +1715,7 @@ locale_alias = {
'ta_in.tscii': 'ta_IN.TSCII-0',
'ta_in.tscii0': 'ta_IN.TSCII-0',
'ta_lk': 'ta_LK.UTF-8',
+ 'tcy_in.utf8': 'tcy_IN.UTF-8',
'te': 'te_IN.UTF-8',
'te_in': 'te_IN.UTF-8',
'tg': 'tg_TJ.KOI8-C',
@@ -1670,6 +1727,7 @@ locale_alias = {
'th_th.tactis': 'th_TH.TIS620',
'th_th.tis620': 'th_TH.TIS620',
'thai': 'th_TH.ISO8859-11',
+ 'the_np': 'the_NP.UTF-8',
'ti_er': 'ti_ER.UTF-8',
'ti_et': 'ti_ET.UTF-8',
'tig_er': 'tig_ER.UTF-8',
@@ -1680,6 +1738,8 @@ locale_alias = {
'tn': 'tn_ZA.ISO8859-15',
'tn_za': 'tn_ZA.ISO8859-15',
'tn_za.iso885915': 'tn_ZA.ISO8859-15',
+ 'to_to': 'to_TO.UTF-8',
+ 'tpi_pg': 'tpi_PG.UTF-8',
'tr': 'tr_TR.ISO8859-9',
'tr_cy': 'tr_CY.ISO8859-9',
'tr_tr': 'tr_TR.ISO8859-9',
@@ -1741,6 +1801,7 @@ locale_alias = {
'yi_us.microsoftcp1255': 'yi_US.CP1255',
'yo_ng': 'yo_NG.UTF-8',
'yue_hk': 'yue_HK.UTF-8',
+ 'yuw_pg': 'yuw_PG.UTF-8',
'zh': 'zh_CN.eucCN',
'zh_cn': 'zh_CN.gb2312',
'zh_cn.big5': 'zh_TW.big5',
diff --git a/lib-python/2.7/logging/__init__.py b/lib-python/2.7/logging/__init__.py
index 863a639ed9..88a42bda34 100644
--- a/lib-python/2.7/logging/__init__.py
+++ b/lib-python/2.7/logging/__init__.py
@@ -649,12 +649,19 @@ def _removeHandlerRef(wr):
# to prevent race conditions and failures during interpreter shutdown.
acquire, release, handlers = _acquireLock, _releaseLock, _handlerList
if acquire and release and handlers:
- acquire()
try:
- if wr in handlers:
- handlers.remove(wr)
- finally:
- release()
+ acquire()
+ try:
+ if wr in handlers:
+ handlers.remove(wr)
+ finally:
+ release()
+ except TypeError:
+ # https://bugs.python.org/issue21149 - If the RLock object behind
+ # acquire() and release() has been partially finalized you may see
+ # an error about NoneType not being callable. Absolutely nothing
+ # we can do in this GC during process shutdown situation. Eat it.
+ pass
def _addHandlerRef(handler):
"""
@@ -1235,7 +1242,7 @@ class Logger(Filterer):
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
- if not isinstance(level, int):
+ if not isinstance(level, (int, long)):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
diff --git a/lib-python/2.7/logging/handlers.py b/lib-python/2.7/logging/handlers.py
index e430ab7b9b..e0b935c878 100644
--- a/lib-python/2.7/logging/handlers.py
+++ b/lib-python/2.7/logging/handlers.py
@@ -760,14 +760,29 @@ class SysLogHandler(logging.Handler):
self.unixsocket = 1
self._connect_unixsocket(address)
else:
- self.unixsocket = 0
+ self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
- self.socket = socket.socket(socket.AF_INET, socktype)
- if socktype == socket.SOCK_STREAM:
- self.socket.connect(address)
+ host, port = address
+ ress = socket.getaddrinfo(host, port, 0, socktype)
+ if not ress:
+ raise socket.error("getaddrinfo returns an empty list")
+ for res in ress:
+ af, socktype, proto, _, sa = res
+ err = sock = None
+ try:
+ sock = socket.socket(af, socktype, proto)
+ if socktype == socket.SOCK_STREAM:
+ sock.connect(sa)
+ break
+ except socket.error as exc:
+ err = exc
+ if sock is not None:
+ sock.close()
+ if err is not None:
+ raise err
+ self.socket = sock
self.socktype = socktype
- self.formatter = None
def _connect_unixsocket(self, address):
use_socktype = self.socktype
@@ -812,7 +827,7 @@ class SysLogHandler(logging.Handler):
priority = self.priority_names[priority]
return (facility << 3) | priority
- def close (self):
+ def close(self):
"""
Closes the socket.
"""
diff --git a/lib-python/2.7/mimetypes.py b/lib-python/2.7/mimetypes.py
index 157d455521..afc053f135 100644
--- a/lib-python/2.7/mimetypes.py
+++ b/lib-python/2.7/mimetypes.py
@@ -442,6 +442,7 @@ def _default_mime_types():
'.jpeg' : 'image/jpeg',
'.jpg' : 'image/jpeg',
'.js' : 'application/javascript',
+ '.json' : 'application/json',
'.ksh' : 'text/plain',
'.latex' : 'application/x-latex',
'.m1v' : 'video/mpeg',
@@ -450,6 +451,7 @@ def _default_mime_types():
'.mht' : 'message/rfc822',
'.mhtml' : 'message/rfc822',
'.mif' : 'application/x-mif',
+ '.mjs' : 'application/javascript',
'.mov' : 'video/quicktime',
'.movie' : 'video/x-sgi-movie',
'.mp2' : 'audio/mpeg',
diff --git a/lib-python/2.7/msilib/__init__.py b/lib-python/2.7/msilib/__init__.py
index 0352b60c0b..9520dfc021 100644
--- a/lib-python/2.7/msilib/__init__.py
+++ b/lib-python/2.7/msilib/__init__.py
@@ -276,7 +276,7 @@ class Directory:
if Win64:
flags |= 256
if keyfile:
- keyid = self.cab.gen_id(self.absolute, keyfile)
+ keyid = self.cab.gen_id(keyfile)
self.keyfiles[keyfile] = keyid
else:
keyid = None
diff --git a/lib-python/2.7/multiprocessing/forking.py b/lib-python/2.7/multiprocessing/forking.py
index e6293d7c55..1cba5f9c54 100644
--- a/lib-python/2.7/multiprocessing/forking.py
+++ b/lib-python/2.7/multiprocessing/forking.py
@@ -402,7 +402,8 @@ else:
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
- if not WINEXE and not WINSERVICE:
+ if not WINEXE and not WINSERVICE and \
+ not d['sys_argv'][0].lower().endswith('pythonservice.exe'):
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
diff --git a/lib-python/2.7/multiprocessing/managers.py b/lib-python/2.7/multiprocessing/managers.py
index f2cee0c38c..118812c8ce 100644
--- a/lib-python/2.7/multiprocessing/managers.py
+++ b/lib-python/2.7/multiprocessing/managers.py
@@ -1059,10 +1059,13 @@ class ListProxy(BaseListProxy):
DictProxy = MakeProxyType('DictProxy', (
- '__contains__', '__delitem__', '__getitem__', '__len__',
+ '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
+DictProxy._method_to_typeid_ = {
+ '__iter__': 'Iterator',
+ }
ArrayProxy = MakeProxyType('ArrayProxy', (
diff --git a/lib-python/2.7/multiprocessing/pool.py b/lib-python/2.7/multiprocessing/pool.py
index 991f87f2f1..a47cd0f58a 100644
--- a/lib-python/2.7/multiprocessing/pool.py
+++ b/lib-python/2.7/multiprocessing/pool.py
@@ -86,7 +86,7 @@ class MaybeEncodingError(Exception):
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
- assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
+ assert maxtasks is None or (type(maxtasks) in (int, long) and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
@@ -120,6 +120,8 @@ def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
+
+ task = job = result = func = args = kwds = None
completed += 1
debug('worker exiting after %d tasks' % completed)
@@ -362,10 +364,11 @@ class Pool(object):
if set_length:
debug('doing set_length()')
set_length(i+1)
+ finally:
+ task = taskseq = job = None
else:
debug('task handler got sentinel')
-
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
@@ -405,6 +408,7 @@ class Pool(object):
cache[job]._set(i, obj)
except KeyError:
pass
+ task = job = obj = None
while cache and thread._state != TERMINATE:
try:
@@ -421,6 +425,7 @@ class Pool(object):
cache[job]._set(i, obj)
except KeyError:
pass
+ task = job = obj = None
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
diff --git a/lib-python/2.7/multiprocessing/process.py b/lib-python/2.7/multiprocessing/process.py
index 44c1e44242..11c8fca360 100644
--- a/lib-python/2.7/multiprocessing/process.py
+++ b/lib-python/2.7/multiprocessing/process.py
@@ -128,6 +128,9 @@ class Process(object):
else:
from .forking import Popen
self._popen = Popen(self)
+ # Avoid a refcycle if the target function holds an indirect
+ # reference to the process object (see bpo-30775)
+ del self._target, self._args, self._kwargs
_current_process._children.add(self)
def terminate(self):
@@ -153,10 +156,16 @@ class Process(object):
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
+
if self._popen is None:
return False
- self._popen.poll()
- return self._popen.returncode is None
+
+ returncode = self._popen.poll()
+ if returncode is None:
+ return True
+ else:
+ _current_process._children.discard(self)
+ return False
@property
def name(self):
@@ -227,7 +236,7 @@ class Process(object):
else:
status = 'started'
- if type(status) is int:
+ if type(status) in (int, long):
if status == 0:
status = 'stopped'
else:
@@ -262,8 +271,8 @@ class Process(object):
except SystemExit, e:
if not e.args:
exitcode = 1
- elif isinstance(e.args[0], int):
- exitcode = e.args[0]
+ elif isinstance(e.args[0], (int, long)):
+ exitcode = int(e.args[0])
else:
sys.stderr.write(str(e.args[0]) + '\n')
sys.stderr.flush()
diff --git a/lib-python/2.7/multiprocessing/queues.py b/lib-python/2.7/multiprocessing/queues.py
index a88e298973..079cd354d6 100644
--- a/lib-python/2.7/multiprocessing/queues.py
+++ b/lib-python/2.7/multiprocessing/queues.py
@@ -128,7 +128,7 @@ class Queue(object):
try:
if block:
timeout = deadline - time.time()
- if timeout < 0 or not self._poll(timeout):
+ if not self._poll(timeout):
raise Empty
elif not self._poll():
raise Empty
@@ -244,8 +244,8 @@ class Queue(object):
else:
wacquire = None
- try:
- while 1:
+ while 1:
+ try:
nacquire()
try:
if not buffer:
@@ -270,19 +270,17 @@ class Queue(object):
wrelease()
except IndexError:
pass
- except Exception, e:
- # Since this runs in a daemon thread the resources it uses
- # may be become unusable while the process is cleaning up.
- # We ignore errors which happen after the process has
- # started to cleanup.
- try:
+ except Exception as e:
+ # Since this runs in a daemon thread the resources it uses
+ # may be become unusable while the process is cleaning up.
+ # We ignore errors which happen after the process has
+ # started to cleanup.
if is_exiting():
info('error in queue thread: %s', e)
+ return
else:
import traceback
traceback.print_exc()
- except Exception:
- pass
_sentinel = object()
diff --git a/lib-python/2.7/multiprocessing/util.py b/lib-python/2.7/multiprocessing/util.py
index 092b61ce09..2920f2445e 100644
--- a/lib-python/2.7/multiprocessing/util.py
+++ b/lib-python/2.7/multiprocessing/util.py
@@ -174,7 +174,7 @@ class Finalize(object):
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
- assert exitpriority is None or type(exitpriority) is int
+ assert exitpriority is None or type(exitpriority) in (int, long)
if obj is not None:
self._weakref = weakref.ref(obj, self)
@@ -265,6 +265,9 @@ def _run_finalizers(minpriority=None):
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
+ # Careful: _finalizer_registry may be mutated while this function
+ # is running (either by a GC run or by another thread).
+
items = [x for x in _finalizer_registry.items() if f(x)]
items.sort(reverse=True)
diff --git a/lib-python/2.7/netrc.py b/lib-python/2.7/netrc.py
index 4b18973d51..16bc347023 100644
--- a/lib-python/2.7/netrc.py
+++ b/lib-python/2.7/netrc.py
@@ -130,15 +130,15 @@ class netrc:
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
- rep = rep + "machine "+ host + "\n\tlogin " + repr(attrs[0]) + "\n"
+ rep += "machine {host}\n\tlogin {attrs[0]}\n".format(host=host, attrs=attrs)
if attrs[1]:
- rep = rep + "account " + repr(attrs[1])
- rep = rep + "\tpassword " + repr(attrs[2]) + "\n"
+ rep += "\taccount {attrs[1]}\n".format(attrs=attrs)
+ rep += "\tpassword {attrs[2]}\n".format(attrs=attrs)
for macro in self.macros.keys():
- rep = rep + "macdef " + macro + "\n"
+ rep += "macdef {macro}\n".format(macro=macro)
for line in self.macros[macro]:
- rep = rep + line
- rep = rep + "\n"
+ rep += line
+ rep += "\n"
return rep
if __name__ == '__main__':
diff --git a/lib-python/2.7/pickletools.py b/lib-python/2.7/pickletools.py
index 8de53dd250..cc15540e55 100644
--- a/lib-python/2.7/pickletools.py
+++ b/lib-python/2.7/pickletools.py
@@ -185,7 +185,7 @@ class ArgumentDescriptor(object):
assert isinstance(name, str)
self.name = name
- assert isinstance(n, int) and (n >= 0 or
+ assert isinstance(n, (int, long)) and (n >= 0 or
n in (UP_TO_NEWLINE,
TAKEN_FROM_ARGUMENT1,
TAKEN_FROM_ARGUMENT4))
@@ -873,7 +873,7 @@ class OpcodeInfo(object):
assert isinstance(x, StackObject)
self.stack_after = stack_after
- assert isinstance(proto, int) and 0 <= proto <= 2
+ assert isinstance(proto, (int, long)) and 0 <= proto <= 2
self.proto = proto
assert isinstance(doc, str)
@@ -1048,9 +1048,7 @@ opcodes = [
stack_before=[],
stack_after=[pybool],
proto=2,
- doc="""True.
-
- Push True onto the stack."""),
+ doc="Push True onto the stack."),
I(name='NEWFALSE',
code='\x89',
@@ -1058,9 +1056,7 @@ opcodes = [
stack_before=[],
stack_after=[pybool],
proto=2,
- doc="""True.
-
- Push False onto the stack."""),
+ doc="Push False onto the stack."),
# Ways to spell Unicode strings.
diff --git a/lib-python/2.7/pkgutil.py b/lib-python/2.7/pkgutil.py
index 4ad03343f2..68ca72b0e4 100644
--- a/lib-python/2.7/pkgutil.py
+++ b/lib-python/2.7/pkgutil.py
@@ -1,8 +1,5 @@
"""Utilities to support packages."""
-# NOTE: This module must remain compatible with Python 2.3, as it is shared
-# by setuptools for distribution with Python 2.3 and up.
-
import os
import sys
import imp
@@ -252,8 +249,8 @@ class ImpLoader:
return mod
def get_data(self, pathname):
- with open(pathname, "rb") as f:
- return f.read()
+ with open(pathname, "rb") as file:
+ return file.read()
def _reopen(self):
if self.file and self.file.closed:
diff --git a/lib-python/2.7/platform.py b/lib-python/2.7/platform.py
index 55f2fa8995..e04d87f258 100755
--- a/lib-python/2.7/platform.py
+++ b/lib-python/2.7/platform.py
@@ -132,6 +132,35 @@ except AttributeError:
# Standard Unix uses /dev/null
DEV_NULL = '/dev/null'
+# Helper for comparing two version number strings.
+# Based on the description of the PHP's version_compare():
+# http://php.net/manual/en/function.version-compare.php
+
+_ver_stages = {
+ # any string not found in this dict, will get 0 assigned
+ 'dev': 10,
+ 'alpha': 20, 'a': 20,
+ 'beta': 30, 'b': 30,
+ 'c': 40,
+ 'RC': 50, 'rc': 50,
+ # number, will get 100 assigned
+ 'pl': 200, 'p': 200,
+}
+
+_component_re = re.compile(r'([0-9]+|[._+-])')
+
+def _comparable_version(version):
+ result = []
+ for v in _component_re.split(version):
+ if v not in '._+-':
+ try:
+ v = int(v, 10)
+ t = 100
+ except ValueError:
+ t = _ver_stages.get(v, 0)
+ result.extend((t, v))
+ return result
+
### Platform specific APIs
_libc_search = re.compile(r'(__libc_init)'
@@ -140,9 +169,7 @@ _libc_search = re.compile(r'(__libc_init)'
'|'
'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)')
-def libc_ver(executable=sys.executable,lib='',version='',
-
- chunksize=2048):
+def libc_ver(executable=sys.executable,lib='',version='', chunksize=2048):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
@@ -157,40 +184,45 @@ def libc_ver(executable=sys.executable,lib='',version='',
The file is read and scanned in chunks of chunksize bytes.
"""
+ V = _comparable_version
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
- f = open(executable,'rb')
- binary = f.read(chunksize)
- pos = 0
- while 1:
- m = _libc_search.search(binary,pos)
- if not m:
- binary = f.read(chunksize)
- if not binary:
- break
- pos = 0
- continue
- libcinit,glibc,glibcversion,so,threads,soversion = m.groups()
- if libcinit and not lib:
- lib = 'libc'
- elif glibc:
- if lib != 'glibc':
- lib = 'glibc'
- version = glibcversion
- elif glibcversion > version:
- version = glibcversion
- elif so:
- if lib != 'glibc':
+ with open(executable, 'rb') as f:
+ binary = f.read(chunksize)
+ pos = 0
+ while pos < len(binary):
+ if 'libc' in binary or 'GLIBC' in binary:
+ m = _libc_search.search(binary, pos)
+ else:
+ m = None
+ if not m or m.end() == len(binary):
+ chunk = f.read(chunksize)
+ if chunk:
+ binary = binary[max(pos, len(binary) - 1000):] + chunk
+ pos = 0
+ continue
+ if not m:
+ break
+ libcinit,glibc,glibcversion,so,threads,soversion = m.groups()
+ if libcinit and not lib:
lib = 'libc'
- if soversion and soversion > version:
- version = soversion
- if threads and version[-len(threads):] != threads:
- version = version + threads
- pos = m.end()
- f.close()
+ elif glibc:
+ if lib != 'glibc':
+ lib = 'glibc'
+ version = glibcversion
+ elif V(glibcversion) > V(version):
+ version = glibcversion
+ elif so:
+ if lib != 'glibc':
+ lib = 'libc'
+ if soversion and (not version or V(soversion) > V(version)):
+ version = soversion
+ if threads and version[-len(threads):] != threads:
+ version = version + threads
+ pos = m.end()
return lib,version
def _dist_try_harder(distname,version,id):
@@ -451,6 +483,7 @@ def popen(cmd, mode='r', bufsize=None):
else:
return popen(cmd,mode,bufsize)
+
def _norm_version(version, build=''):
""" Normalize the version and build strings and return a single
diff --git a/lib-python/2.7/poplib.py b/lib-python/2.7/poplib.py
index b91e5f72d2..a238510b38 100644
--- a/lib-python/2.7/poplib.py
+++ b/lib-python/2.7/poplib.py
@@ -274,7 +274,7 @@ class POP3:
return self._shortcmd('RPOP %s' % user)
- timestamp = re.compile(r'\+OK.*(<[^>]+>)')
+ timestamp = re.compile(br'\+OK.[^<]*(<.*>)')
def apop(self, user, secret):
"""Authorisation
diff --git a/lib-python/2.7/posixpath.py b/lib-python/2.7/posixpath.py
index f5c2260f1e..bbc2369ce7 100644
--- a/lib-python/2.7/posixpath.py
+++ b/lib-python/2.7/posixpath.py
@@ -259,7 +259,12 @@ def expanduser(path):
if i == 1:
if 'HOME' not in os.environ:
import pwd
- userhome = pwd.getpwuid(os.getuid()).pw_dir
+ try:
+ userhome = pwd.getpwuid(os.getuid()).pw_dir
+ except KeyError:
+ # bpo-10496: if the current user identifier doesn't exist in the
+ # password database, return the path unchanged
+ return path
else:
userhome = os.environ['HOME']
else:
@@ -267,6 +272,8 @@ def expanduser(path):
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
+ # bpo-10496: if the user name from the path doesn't exist in the
+ # password database, return the path unchanged
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
diff --git a/lib-python/2.7/pydoc.py b/lib-python/2.7/pydoc.py
index de9ce1c8a3..87b7c2c0d4 100755
--- a/lib-python/2.7/pydoc.py
+++ b/lib-python/2.7/pydoc.py
@@ -1649,8 +1649,9 @@ class Helper:
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
+ _strprefixes = tuple(p + q for p in ('b', 'r', 'u') for q in ("'", '"'))
_symbols_inverse = {
- 'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
+ 'STRINGS' : ("'", "'''", '"""', '"') + _strprefixes,
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
@@ -1813,7 +1814,12 @@ has the same effect as typing a particular string at the help> prompt.
if not request: break
except (KeyboardInterrupt, EOFError):
break
- request = strip(replace(request, '"', '', "'", ''))
+ request = strip(request)
+ # Make sure significant trailing quotation marks of literals don't
+ # get deleted while cleaning input
+ if (len(request) > 2 and request[0] == request[-1] in ("'", '"')
+ and request[0] not in request[1:-1]):
+ request = request[1:-1]
if lower(request) in ('q', 'quit'): break
self.help(request)
diff --git a/lib-python/2.7/pydoc_data/topics.py b/lib-python/2.7/pydoc_data/topics.py
index 18d436fb5b..5fc84a8ddb 100644
--- a/lib-python/2.7/pydoc_data/topics.py
+++ b/lib-python/2.7/pydoc_data/topics.py
@@ -1,81 +1,13578 @@
# -*- coding: utf-8 -*-
-# Autogenerated by Sphinx on Sat Dec 3 12:36:20 2016
-topics = {'assert': u'\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names. In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O). The current code generator emits no code for an\nassert statement when optimization is requested at compile time. Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n',
- 'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" [target_list] "]"\n | attributeref\n | subscription\n | slicing\n\n(See section Primaries for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section The standard type\nhierarchy).\n\nAssignment of an object to a target list is recursively defined as\nfollows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The\n object must be an iterable with the same number of items as there\n are targets in the target list, and the items are assigned, from\n left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a "global" statement in the\n current code block: the name is bound to the object in the current\n local namespace.\n\n * Otherwise: the name is bound to the object in the current global\n namespace.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in\n square brackets: The object must be an iterable with the same number\n of items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, "TypeError" is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily "AttributeError").\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n "a.x" can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target "a.x" is always\n set as an instance attribute, creating it if necessary. Thus, the\n two occurrences of "a.x" do not necessarily refer to the same\n attribute: if the RHS expression refers to a class attribute, the\n LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield a plain integer. If it is negative, the\n sequence\'s length is added to it. The resulting value must be a\n nonnegative integer less than the sequence\'s length, and the\n sequence is asked to assign the assigned object to its item with\n that index. If the index is out of range, "IndexError" is raised\n (assignment to a subscripted sequence cannot add new items to a\n list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n* If the target is a slicing: The primary expression in the\n reference is evaluated. It should yield a mutable sequence object\n (such as a list). The assigned object should be a sequence object\n of the same type. Next, the lower and upper bound expressions are\n evaluated, insofar they are present; defaults are zero and the\n sequence\'s length. The bounds should evaluate to (small) integers.\n If either bound is negative, the sequence\'s length is added to it.\n The resulting bounds are clipped to lie between zero and the\n sequence\'s length, inclusive. Finally, the sequence object is asked\n to replace the slice with the items of the assigned sequence. The\n length of the slice may be different from the length of the assigned\n sequence, thus changing the length of the target sequence, if the\n object allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints "[0, 2]":\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print x\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section Primaries for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same caveat about\nclass and instance attributes applies as for regular assignments.\n',
- 'atom-identifiers': u'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section Identifiers\nand keywords for lexical definition and section Naming and binding for\ndocumentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name. For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used. If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n',
- 'atom-literals': u"\nLiterals\n********\n\nPython supports string literals and various numeric literals:\n\n literal ::= stringliteral | integer | longinteger\n | floatnumber | imagnumber\n\nEvaluation of a literal yields an object of the given type (string,\ninteger, long integer, floating point number, complex number) with the\ngiven value. The value may be approximated in the case of floating\npoint and imaginary (complex) literals. See section Literals for\ndetails.\n\nAll literals correspond to immutable data types, and hence the\nobject's identity is less important than its value. Multiple\nevaluations of literals with the same value (either the same\noccurrence in the program text or a different occurrence) may obtain\nthe same object or a different object with the same value.\n",
- 'attribute-access': u'\nCustomizing attribute access\n****************************\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should not simply execute "self.name = value" --- this would cause\n a recursive call to itself. Instead, it should insert the value in\n the dictionary of instance attributes, e.g., "self.__dict__[name] =\n value". For new-style classes, rather than accessing the instance\n dictionary, it should call the base class method with the same\n name, for example, "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n===========================================\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See Special method lookup for new-style\n classes.\n\n\nImplementing Descriptors\n========================\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n====================\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass "object()" or "type()").\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to a new-style object instance, "a.x" is transformed\n into the call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a new-style class, "A.x" is transformed into the\n call: "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n=========\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__dict__\'" to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__weakref__\'" to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (Implementing Descriptors) for each variable name. As a\n result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "long", "str" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n',
- 'attribute-references': u'\nAttribute references\n********************\n\nAn attribute reference is a primary followed by a period and a name:\n\n attributeref ::= primary "." identifier\n\nThe primary must evaluate to an object of a type that supports\nattribute references, e.g., a module, list, or an instance. This\nobject is then asked to produce the attribute whose name is the\nidentifier. If this attribute is not available, the exception\n"AttributeError" is raised. Otherwise, the type and value of the\nobject produced is determined by the object. Multiple evaluations of\nthe same attribute reference may yield different objects.\n',
- 'augassign': u'\nAugmented assignment statements\n*******************************\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section Primaries for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same caveat about\nclass and instance attributes applies as for regular assignments.\n',
- 'binary': u'\nBinary arithmetic operations\n****************************\n\nThe binary arithmetic operations have the conventional priority\nlevels. Note that some of these operations also apply to certain non-\nnumeric types. Apart from the power operator, there are only two\nlevels, one for multiplicative operators and one for additive\noperators:\n\n m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | m_expr "/" u_expr\n | m_expr "%" u_expr\n a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n\nThe "*" (multiplication) operator yields the product of its arguments.\nThe arguments must either both be numbers, or one argument must be an\ninteger (plain or long) and the other must be a sequence. In the\nformer case, the numbers are converted to a common type and then\nmultiplied together. In the latter case, sequence repetition is\nperformed; a negative repetition factor yields an empty sequence.\n\nThe "/" (division) and "//" (floor division) operators yield the\nquotient of their arguments. The numeric arguments are first\nconverted to a common type. Plain or long integer division yields an\ninteger of the same type; the result is that of mathematical division\nwith the \'floor\' function applied to the result. Division by zero\nraises the "ZeroDivisionError" exception.\n\nThe "%" (modulo) operator yields the remainder from the division of\nthe first argument by the second. The numeric arguments are first\nconverted to a common type. A zero right argument raises the\n"ZeroDivisionError" exception. The arguments may be floating point\nnumbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 +\n0.34".) The modulo operator always yields a result with the same sign\nas its second operand (or zero); the absolute value of the result is\nstrictly smaller than the absolute value of the second operand [2].\n\nThe integer division and modulo operators are connected by the\nfollowing identity: "x == (x/y)*y + (x%y)". Integer division and\nmodulo are also connected with the built-in function "divmod()":\n"divmod(x, y) == (x/y, x%y)". These identities don\'t hold for\nfloating point numbers; there similar identities hold approximately\nwhere "x/y" is replaced by "floor(x/y)" or "floor(x/y) - 1" [3].\n\nIn addition to performing the modulo operation on numbers, the "%"\noperator is also overloaded by string and unicode objects to perform\nstring formatting (also known as interpolation). The syntax for string\nformatting is described in the Python Library Reference, section\nString Formatting Operations.\n\nDeprecated since version 2.3: The floor division operator, the modulo\noperator, and the "divmod()" function are no longer defined for\ncomplex numbers. Instead, convert to a floating point number using\nthe "abs()" function if appropriate.\n\nThe "+" (addition) operator yields the sum of its arguments. The\narguments must either both be numbers or both sequences of the same\ntype. In the former case, the numbers are converted to a common type\nand then added together. In the latter case, the sequences are\nconcatenated.\n\nThe "-" (subtraction) operator yields the difference of its arguments.\nThe numeric arguments are first converted to a common type.\n',
- 'bitwise': u'\nBinary bitwise operations\n*************************\n\nEach of the three bitwise operations has a different priority level:\n\n and_expr ::= shift_expr | and_expr "&" shift_expr\n xor_expr ::= and_expr | xor_expr "^" and_expr\n or_expr ::= xor_expr | or_expr "|" xor_expr\n\nThe "&" operator yields the bitwise AND of its arguments, which must\nbe plain or long integers. The arguments are converted to a common\ntype.\n\nThe "^" operator yields the bitwise XOR (exclusive OR) of its\narguments, which must be plain or long integers. The arguments are\nconverted to a common type.\n\nThe "|" operator yields the bitwise (inclusive) OR of its arguments,\nwhich must be plain or long integers. The arguments are converted to\na common type.\n',
- 'bltin-code-objects': u'\nCode Objects\n************\n\nCode objects are used by the implementation to represent "pseudo-\ncompiled" executable Python code such as a function body. They differ\nfrom function objects because they don\'t contain a reference to their\nglobal execution environment. Code objects are returned by the built-\nin "compile()" function and can be extracted from function objects\nthrough their "func_code" attribute. See also the "code" module.\n\nA code object can be executed or evaluated by passing it (instead of a\nsource string) to the "exec" statement or the built-in "eval()"\nfunction.\n\nSee The standard type hierarchy for more information.\n',
- 'bltin-ellipsis-object': u'\nThe Ellipsis Object\n*******************\n\nThis object is used by extended slice notation (see Slicings). It\nsupports no special operations. There is exactly one ellipsis object,\nnamed "Ellipsis" (a built-in name).\n\nIt is written as "Ellipsis". When in a subscript, it can also be\nwritten as "...", for example "seq[...]".\n',
- 'bltin-file-objects': u'\nFile Objects\n************\n\nFile objects are implemented using C\'s "stdio" package and can be\ncreated with the built-in "open()" function. File objects are also\nreturned by some other built-in functions and methods, such as\n"os.popen()" and "os.fdopen()" and the "makefile()" method of socket\nobjects. Temporary files can be created using the "tempfile" module,\nand high-level file operations such as copying, moving, and deleting\nfiles and directories can be achieved with the "shutil" module.\n\nWhen a file operation fails for an I/O-related reason, the exception\n"IOError" is raised. This includes situations where the operation is\nnot defined for some reason, like "seek()" on a tty device or writing\na file opened for reading.\n\nFiles have the following methods:\n\nfile.close()\n\n Close the file. A closed file cannot be read or written any more.\n Any operation which requires that the file be open will raise a\n "ValueError" after the file has been closed. Calling "close()"\n more than once is allowed.\n\n As of Python 2.5, you can avoid having to call this method\n explicitly if you use the "with" statement. For example, the\n following code will automatically close *f* when the "with" block\n is exited:\n\n from __future__ import with_statement # This isn\'t required in Python 2.6\n\n with open("hello.txt") as f:\n for line in f:\n print line,\n\n In older versions of Python, you would have needed to do this to\n get the same effect:\n\n f = open("hello.txt")\n try:\n for line in f:\n print line,\n finally:\n f.close()\n\n Note: Not all "file-like" types in Python support use as a\n context manager for the "with" statement. If your code is\n intended to work with any file-like object, you can use the\n function "contextlib.closing()" instead of using the object\n directly.\n\nfile.flush()\n\n Flush the internal buffer, like "stdio"\'s "fflush()". This may be\n a no-op on some file-like objects.\n\n Note: "flush()" does not necessarily write the file\'s data to\n disk. Use "flush()" followed by "os.fsync()" to ensure this\n behavior.\n\nfile.fileno()\n\n Return the integer "file descriptor" that is used by the underlying\n implementation to request I/O operations from the operating system.\n This can be useful for other, lower level interfaces that use file\n descriptors, such as the "fcntl" module or "os.read()" and friends.\n\n Note: File-like objects which do not have a real file descriptor\n should *not* provide this method!\n\nfile.isatty()\n\n Return "True" if the file is connected to a tty(-like) device, else\n "False".\n\n Note: If a file-like object is not associated with a real file,\n this method should *not* be implemented.\n\nfile.next()\n\n A file object is its own iterator, for example "iter(f)" returns\n *f* (unless *f* is closed). When a file is used as an iterator,\n typically in a "for" loop (for example, "for line in f: print\n line.strip()"), the "next()" method is called repeatedly. This\n method returns the next input line, or raises "StopIteration" when\n EOF is hit when the file is open for reading (behavior is undefined\n when the file is open for writing). In order to make a "for" loop\n the most efficient way of looping over the lines of a file (a very\n common operation), the "next()" method uses a hidden read-ahead\n buffer. As a consequence of using a read-ahead buffer, combining\n "next()" with other file methods (like "readline()") does not work\n right. However, using "seek()" to reposition the file to an\n absolute position will flush the read-ahead buffer.\n\n New in version 2.3.\n\nfile.read([size])\n\n Read at most *size* bytes from the file (less if the read hits EOF\n before obtaining *size* bytes). If the *size* argument is negative\n or omitted, read all data until EOF is reached. The bytes are\n returned as a string object. An empty string is returned when EOF\n is encountered immediately. (For certain files, like ttys, it\n makes sense to continue reading after an EOF is hit.) Note that\n this method may call the underlying C function "fread()" more than\n once in an effort to acquire as close to *size* bytes as possible.\n Also note that when in non-blocking mode, less data than was\n requested may be returned, even if no *size* parameter was given.\n\n Note: This function is simply a wrapper for the underlying\n "fread()" C function, and will behave the same in corner cases,\n such as whether the EOF value is cached.\n\nfile.readline([size])\n\n Read one entire line from the file. A trailing newline character\n is kept in the string (but may be absent when a file ends with an\n incomplete line). [6] If the *size* argument is present and non-\n negative, it is a maximum byte count (including the trailing\n newline) and an incomplete line may be returned. When *size* is not\n 0, an empty string is returned *only* when EOF is encountered\n immediately.\n\n Note: Unlike "stdio"\'s "fgets()", the returned string contains\n null characters ("\'\\0\'") if they occurred in the input.\n\nfile.readlines([sizehint])\n\n Read until EOF using "readline()" and return a list containing the\n lines thus read. If the optional *sizehint* argument is present,\n instead of reading up to EOF, whole lines totalling approximately\n *sizehint* bytes (possibly after rounding up to an internal buffer\n size) are read. Objects implementing a file-like interface may\n choose to ignore *sizehint* if it cannot be implemented, or cannot\n be implemented efficiently.\n\nfile.xreadlines()\n\n This method returns the same thing as "iter(f)".\n\n New in version 2.1.\n\n Deprecated since version 2.3: Use "for line in file" instead.\n\nfile.seek(offset[, whence])\n\n Set the file\'s current position, like "stdio"\'s "fseek()". The\n *whence* argument is optional and defaults to "os.SEEK_SET" or "0"\n (absolute file positioning); other values are "os.SEEK_CUR" or "1"\n (seek relative to the current position) and "os.SEEK_END" or "2"\n (seek relative to the file\'s end). There is no return value.\n\n For example, "f.seek(2, os.SEEK_CUR)" advances the position by two\n and "f.seek(-3, os.SEEK_END)" sets the position to the third to\n last.\n\n Note that if the file is opened for appending (mode "\'a\'" or\n "\'a+\'"), any "seek()" operations will be undone at the next write.\n If the file is only opened for writing in append mode (mode "\'a\'"),\n this method is essentially a no-op, but it remains useful for files\n opened in append mode with reading enabled (mode "\'a+\'"). If the\n file is opened in text mode (without "\'b\'"), only offsets returned\n by "tell()" are legal. Use of other offsets causes undefined\n behavior.\n\n Note that not all file objects are seekable.\n\n Changed in version 2.6: Passing float values as offset has been\n deprecated.\n\nfile.tell()\n\n Return the file\'s current position, like "stdio"\'s "ftell()".\n\n Note: On Windows, "tell()" can return illegal values (after an\n "fgets()") when reading files with Unix-style line-endings. Use\n binary mode ("\'rb\'") to circumvent this problem.\n\nfile.truncate([size])\n\n Truncate the file\'s size. If the optional *size* argument is\n present, the file is truncated to (at most) that size. The size\n defaults to the current position. The current file position is not\n changed. Note that if a specified size exceeds the file\'s current\n size, the result is platform-dependent: possibilities include that\n the file may remain unchanged, increase to the specified size as if\n zero-filled, or increase to the specified size with undefined new\n content. Availability: Windows, many Unix variants.\n\nfile.write(str)\n\n Write a string to the file. There is no return value. Due to\n buffering, the string may not actually show up in the file until\n the "flush()" or "close()" method is called.\n\nfile.writelines(sequence)\n\n Write a sequence of strings to the file. The sequence can be any\n iterable object producing strings, typically a list of strings.\n There is no return value. (The name is intended to match\n "readlines()"; "writelines()" does not add line separators.)\n\nFiles support the iterator protocol. Each iteration returns the same\nresult as "readline()", and iteration ends when the "readline()"\nmethod returns an empty string.\n\nFile objects also offer a number of other interesting attributes.\nThese are not required for file-like objects, but should be\nimplemented if they make sense for the particular object.\n\nfile.closed\n\n bool indicating the current state of the file object. This is a\n read-only attribute; the "close()" method changes the value. It may\n not be available on all file-like objects.\n\nfile.encoding\n\n The encoding that this file uses. When Unicode strings are written\n to a file, they will be converted to byte strings using this\n encoding. In addition, when the file is connected to a terminal,\n the attribute gives the encoding that the terminal is likely to use\n (that information might be incorrect if the user has misconfigured\n the terminal). The attribute is read-only and may not be present\n on all file-like objects. It may also be "None", in which case the\n file uses the system default encoding for converting Unicode\n strings.\n\n New in version 2.3.\n\nfile.errors\n\n The Unicode error handler used along with the encoding.\n\n New in version 2.6.\n\nfile.mode\n\n The I/O mode for the file. If the file was created using the\n "open()" built-in function, this will be the value of the *mode*\n parameter. This is a read-only attribute and may not be present on\n all file-like objects.\n\nfile.name\n\n If the file object was created using "open()", the name of the\n file. Otherwise, some string that indicates the source of the file\n object, of the form "<...>". This is a read-only attribute and may\n not be present on all file-like objects.\n\nfile.newlines\n\n If Python was built with *universal newlines* enabled (the default)\n this read-only attribute exists, and for files opened in universal\n newline read mode it keeps track of the types of newlines\n encountered while reading the file. The values it can take are\n "\'\\r\'", "\'\\n\'", "\'\\r\\n\'", "None" (unknown, no newlines read yet) or\n a tuple containing all the newline types seen, to indicate that\n multiple newline conventions were encountered. For files not opened\n in universal newlines read mode the value of this attribute will be\n "None".\n\nfile.softspace\n\n Boolean that indicates whether a space character needs to be\n printed before another value when using the "print" statement.\n Classes that are trying to simulate a file object should also have\n a writable "softspace" attribute, which should be initialized to\n zero. This will be automatic for most classes implemented in\n Python (care may be needed for objects that override attribute\n access); types implemented in C will have to provide a writable\n "softspace" attribute.\n\n Note: This attribute is not used to control the "print"\n statement, but to allow the implementation of "print" to keep\n track of its internal state.\n',
- 'bltin-null-object': u'\nThe Null Object\n***************\n\nThis object is returned by functions that don\'t explicitly return a\nvalue. It supports no special operations. There is exactly one null\nobject, named "None" (a built-in name).\n\nIt is written as "None".\n',
- 'bltin-type-objects': u'\nType Objects\n************\n\nType objects represent the various object types. An object\'s type is\naccessed by the built-in function "type()". There are no special\noperations on types. The standard module "types" defines names for\nall standard built-in types.\n\nTypes are written like this: "<type \'int\'>".\n',
- 'booleans': u'\nBoolean operations\n******************\n\n or_test ::= and_test | or_test "or" and_test\n and_test ::= not_test | and_test "and" not_test\n not_test ::= comparison | "not" not_test\n\nIn the context of Boolean operations, and also when expressions are\nused by control flow statements, the following values are interpreted\nas false: "False", "None", numeric zero of all types, and empty\nstrings and containers (including strings, tuples, lists,\ndictionaries, sets and frozensets). All other values are interpreted\nas true. (See the "__nonzero__()" special method for a way to change\nthis.)\n\nThe operator "not" yields "True" if its argument is false, "False"\notherwise.\n\nThe expression "x and y" first evaluates *x*; if *x* is false, its\nvalue is returned; otherwise, *y* is evaluated and the resulting value\nis returned.\n\nThe expression "x or y" first evaluates *x*; if *x* is true, its value\nis returned; otherwise, *y* is evaluated and the resulting value is\nreturned.\n\n(Note that neither "and" nor "or" restrict the value and type they\nreturn to "False" and "True", but rather return the last evaluated\nargument. This is sometimes useful, e.g., if "s" is a string that\nshould be replaced by a default value if it is empty, the expression\n"s or \'foo\'" yields the desired value. Because "not" has to invent a\nvalue anyway, it does not bother to return a value of the same type as\nits argument, so e.g., "not \'foo\'" yields "False", not "\'\'".)\n',
- 'break': u'\nThe "break" statement\n*********************\n\n break_stmt ::= "break"\n\n"break" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition within that\nloop.\n\nIt terminates the nearest enclosing loop, skipping the optional "else"\nclause if the loop has one.\n\nIf a "for" loop is terminated by "break", the loop control target\nkeeps its current value.\n\nWhen "break" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nloop.\n',
- 'callable-types': u'\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n',
- 'calls': u'\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","]\n | expression genexpr_for] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and certain class instances\nthemselves are callable; extensions may define additional callable\nobject types). All argument expressions are evaluated before the call\nis attempted. Please refer to section Function definitions for the\nsyntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a "TypeError" exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is "None", it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a "TypeError"\nexception is raised. Otherwise, the list of filled slots is used as\nthe argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use "PyArg_ParseTuple()" to parse\ntheir arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "*identifier" is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "**identifier" is present; in this case, that formal\nparameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax "*expression" appears in the function call, "expression"\nmust evaluate to an iterable. Elements from this iterable are treated\nas if they were additional positional arguments; if there are\npositional arguments *x1*, ..., *xN*, and "expression" evaluates to a\nsequence *y1*, ..., *yM*, this is equivalent to a call with M+N\npositional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the "*expression" syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the "**expression" argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print a, b\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the "*expression" syntax\nto be used in the same call, so in practice this confusion does not\narise.\n\nIf the syntax "**expression" appears in the function call,\n"expression" must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both "expression" and as an explicit keyword argument, a\n"TypeError" exception is raised.\n\nFormal parameters using the syntax "*identifier" or "**identifier"\ncannot be used as positional argument slots or as keyword argument\nnames. Formal parameters using the syntax "(sublist)" cannot be used\nas keyword argument names; the outermost sublist corresponds to a\nsingle unnamed argument slot, and the argument value is assigned to\nthe sublist using the usual tuple assignment rules after all other\nparameter processing is done.\n\nA call always returns some value, possibly "None", unless it raises an\nexception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n Function definitions. When the code block executes a "return"\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see Built-in Functions for the\n descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a "__call__()" method; the effect is then the\n same as if that method was called.\n',
- 'class': u'\nClass definitions\n*****************\n\nA class definition defines a class object (see section The standard\ntype hierarchy):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section Naming and binding), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with "self.name = value". Both\nclass and instance variables are accessible through the notation\n""self.name"", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n',
- 'comparisons': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe forms "<>" and "!=" are equivalent; for consistency with C, "!="\nis preferred; where "!=" is mentioned below "<>" is also accepted.\nThe "<>" spelling is considered obsolescent.\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, objects of\ndifferent types *always* compare unequal, and are ordered consistently\nbut arbitrarily. You can control comparison behavior of objects of\nnon-built-in types by defining a "__cmp__" method or rich comparison\nmethods like "__gt__", described in section Special method names.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the "in" and "not in"\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. Unicode and 8-bit strings are fully interoperable in\n this behavior. [4]\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "cmp([1,2,x], [1,2,y])" returns\n the same as "cmp(x,y)". If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, "[1,2] <\n [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nThe operators "in" and "not in" test for collection membership. "x in\ns" evaluates to true if *x* is a member of the collection *s*, and\nfalse otherwise. "x not in s" returns the negation of "x in s". The\ncollection membership test has traditionally been bound to sequences;\nan object is a member of a collection if the collection is a sequence\nand contains an element equal to that object. However, it make sense\nfor many other object types to support membership tests without being\na sequence. In particular, dictionaries (for keys) and sets support\nmembership testing.\n\nFor the list and tuple types, "x in y" is true if and only if there\nexists an index *i* such that either "x is y[i]" or "x == y[i]" is\ntrue.\n\nFor the Unicode and string types, "x in y" is true if and only if *x*\nis a substring of *y*. An equivalent test is "y.find(x) != -1".\nNote, *x* and *y* need not be the same type; consequently, "u\'ab\' in\n\'abc\'" will return "True". Empty strings are always considered to be a\nsubstring of any other string, so """ in "abc"" will return "True".\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength "1".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [7]\n',
- 'compound': u'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs. "try" specifies exception handlers and/or cleanup\ncode for a group of statements. Function and class definitions are\nalso syntactically compound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n if test1: if test2: print x\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print" statements are executed:\n\n if x < y < z: print x; print y; print z\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n | decorated\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT". Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section Boolean operations\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function "range()" returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s "for i := a to b\ndo"; e.g., "range(3)" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n"try"..."except"..."finally" did not work. "try"..."except" had to be\nnested in "try"..."finally".\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject, or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the "sys" module:\n"sys.exc_type" receives the object identifying the exception;\n"sys.exc_value" receives the exception\'s parameter;\n"sys.exc_traceback" receives a traceback object (see section The\nstandard type hierarchy) identifying the point in the program where\nthe exception occurred. These details are also available through the\n"sys.exc_info()" function, which returns a tuple "(exc_type,\nexc_value, exc_traceback)". Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception, it is re-raised at the end of the\n"finally" clause. If the "finally" clause raises another exception or\nexecutes a "return" or "break" statement, the saved exception is\ndiscarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\nExceptions, and information on using the "raise" statement to generate\nexceptions may be found in section The raise statement.\n\n\nThe "with" statement\n====================\n\nNew in version 2.5.\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section With Statement\nContext Managers). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the "with" statement is only allowed when the\n "with_statement" feature has been enabled. It is always enabled in\n Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 343** - The "with" statement\n The specification, background, and examples for the Python "with"\n statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection The standard type hierarchy):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier ["," "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use "None" as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section Calls.\nA function call always assigns values to all parameters mentioned in\nthe parameter list, either from position arguments, from keyword\narguments, or from default values. If the form ""*identifier"" is\npresent, it is initialized to a tuple receiving any excess positional\nparameters, defaulting to the empty tuple. If the form\n""**identifier"" is present, it is initialized to a new dictionary\nreceiving any excess keyword arguments, defaulting to a new empty\ndictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section Lambdas. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section Naming and binding for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section The standard\ntype hierarchy):\n\n classdef ::= "class" classname [inheritance] ":" suite\n inheritance ::= "(" [expression_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. It first evaluates the\ninheritance list, if present. Each item in the inheritance list\nshould evaluate to a class object or class type which allows\nsubclassing. The class\'s suite is then executed in a new execution\nframe (see section Naming and binding), using a newly created local\nnamespace and the original global namespace. (Usually, the suite\ncontains only function definitions.) When the class\'s suite finishes\nexecution, its execution frame is discarded but its local namespace is\nsaved. [4] A class object is then created using the inheritance list\nfor the base classes and the saved local namespace for the attribute\ndictionary. The class name is bound to this class object in the\noriginal local namespace.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass variables; they are shared by all instances. To create instance\nvariables, they can be set in a method with "self.name = value". Both\nclass and instance variables are accessible through the notation\n""self.name"", and an instance variable hides a class variable with\nthe same name when accessed in this way. Class variables can be used\nas defaults for instance variables, but using mutable values there can\nlead to unexpected results. For *new-style class*es, descriptors can\nbe used to create instance variables with different implementation\ndetails.\n\nClass definitions, like function definitions, may be wrapped by one or\nmore *decorator* expressions. The evaluation rules for the decorator\nexpressions are the same as for functions. The result must be a class\nobject, which is then bound to the class name.\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n',
- 'context-managers': u'\nWith Statement Context Managers\n*******************************\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section The with\nstatement), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see Context Manager Types.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 343** - The "with" statement\n The specification, background, and examples for the Python "with"\n statement.\n',
- 'continue': u'\nThe "continue" statement\n************************\n\n continue_stmt ::= "continue"\n\n"continue" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition or "finally"\nclause within that loop. It continues with the next cycle of the\nnearest enclosing loop.\n\nWhen "continue" passes control out of a "try" statement with a\n"finally" clause, that "finally" clause is executed before really\nstarting the next loop cycle.\n',
- 'conversions': u'\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," the arguments\nare coerced using the coercion rules listed at Coercion rules. If\nboth arguments are standard numeric types, the following coercions are\napplied:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the\n other is converted to floating point;\n\n* otherwise, if either argument is a long integer, the other is\n converted to long integer;\n\n* otherwise, both must be plain integers and no conversion is\n necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions can define their own\ncoercions.\n',
- 'customization': u'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called after the instance has been created (by "__new__()"), but\n before it is returned to the caller. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])".\n\n Because "__new__()" and "__init__()" work together in constructing\n objects ("__new__()" to create it, and "__init__()" to customise\n it), no non-"None" value may be returned by "__init__()"; doing so\n will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_traceback" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.exc_traceback" or "sys.last_traceback". Circular references\n which are garbage are detected when the option cycle detector is\n enabled (it\'s on by default), but can only be cleaned up if there\n are no Python-level "__del__()" methods involved. Refer to the\n documentation for the "gc" module for more information about how\n "__del__()" methods are handled by the cycle detector,\n particularly the description of the "garbage" value.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\n See also the "-R" command-line option.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function and by string conversions\n (reverse quotes) to compute the "official" string representation of\n an object. If at all possible, this should look like a valid\n Python expression that could be used to recreate an object with the\n same value (given an appropriate environment). If this is not\n possible, a string of the form "<...some useful description...>"\n should be returned. The return value must be a string object. If a\n class defines "__repr__()" but not "__str__()", then "__repr__()"\n is also used when an "informal" string representation of instances\n of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the "str()" built-in function and by the "print"\n statement to compute the "informal" string representation of an\n object. This differs from "__repr__()" in that it does not have to\n be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to "__cmp__()" below. The\n correspondence between operator symbols and method names is as\n follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call "x.__ne__(y)",\n "x>y" calls "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if "self < other",\n zero if "self == other", a positive integer if "self > other". If\n no "__cmp__()", "__eq__()" or "__ne__()" operation is defined,\n class instances are compared by object identity ("address"). See\n also the description of "__hash__()" for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by "__cmp__()" has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n If a class does not define a "__cmp__()" or "__eq__()" method it\n should not define a "__hash__()" operation either; if it defines\n "__cmp__()" or "__eq__()" but not "__hash__()", its instances will\n not be usable in hashed collections. If a class defines mutable\n objects and implements a "__cmp__()" or "__eq__()" method, it\n should not implement "__hash__()", since hashable collection\n implementations require that an object\'s hash value is immutable\n (if the object\'s hash value changes, it will be in the wrong hash\n bucket).\n\n User-defined classes have "__cmp__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns a result derived from\n "id(x)".\n\n Classes which inherit a "__hash__()" method from a parent class but\n change the meaning of "__cmp__()" or "__eq__()" such that the hash\n value returned is no longer appropriate (e.g. by switching to a\n value-based concept of equality instead of the default identity\n based equality) can explicitly flag themselves as being unhashable\n by setting "__hash__ = None" in the class definition. Doing so\n means that not only will instances of the class raise an\n appropriate "TypeError" when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable)"\n (unlike classes which define their own "__hash__()" to explicitly\n raise "TypeError").\n\n Changed in version 2.5: "__hash__()" may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: "__hash__" may now be set to "None" to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True", or their integer\n equivalents "0" or "1". When this method is not defined,\n "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__nonzero__()", all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement "unicode()" built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n',
- 'debugger': u'\n"pdb" --- The Python Debugger\n*****************************\n\n**Source code:** Lib/pdb.py\n\n======================================================================\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible --- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source. The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > <string>(0)?()\n (Pdb) continue\n > <string>(1)?()\n (Pdb) continue\n NameError: \'spam\'\n > <string>(1)?()\n (Pdb)\n\n"pdb.py" can also be invoked as a script to debug other scripts. For\nexample:\n\n python -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 2.4: Restarting post-mortem behavior added.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "c" command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print spam\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print spam\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement[, globals[, locals]])\n\n Execute the *statement* (given as a string) under debugger control.\n The debugger prompt appears before any code is executed; you can\n set breakpoints and type "continue", or you can step through the\n statement using "step" or "next" (all these commands are explained\n below). The optional *globals* and *locals* arguments specify the\n environment in which the code is executed; by default the\n dictionary of the module "__main__" is used. (See the explanation\n of the "exec" statement or the "eval()" built-in function.)\n\npdb.runeval(expression[, globals[, locals]])\n\n Evaluate the *expression* (given as a string) under debugger\n control. When "runeval()" returns, it returns the value of the\n expression. Otherwise this function is similar to "run()".\n\npdb.runcall(function[, argument, ...])\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When "runcall()" returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem([traceback])\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name. If you want\nto access further features, you have to do this yourself:\n\nclass pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None)\n\n "Pdb" is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying "cmd.Cmd" class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 2.7: The *skip* argument.\n\n run(statement[, globals[, locals]])\n runeval(expression[, globals[, locals]])\n runcall(function[, argument, ...])\n set_trace()\n\n See the documentation for the functions explained above.\n',
- 'del': u'\nThe "del" statement\n*******************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a "global"\nstatement in the same code block. If the name is unbound, a\n"NameError" exception will be raised.\n\nIt is illegal to delete a name from the local namespace if it occurs\nas a free variable in a nested block.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n',
- 'dict': u'\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection The standard type hierarchy. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n',
- 'dynamic-features': u'\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nIf "exec" is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n"SyntaxError" unless the exec explicitly specifies the local namespace\nfor the "exec". (In other words, "exec obj" would be illegal, but\n"exec obj in ns" would be legal.)\n\nThe "eval()", "execfile()", and "input()" functions and the "exec"\nstatement do not have access to the full environment for resolving\nnames. Names may be resolved in the local and global namespaces of\nthe caller. Free variables are not resolved in the nearest enclosing\nnamespace, but in the global namespace. [1] The "exec" statement and\nthe "eval()" and "execfile()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n',
- 'else': u'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section Boolean operations\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n',
- 'exceptions': u'\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nExceptions can also be identified by strings, in which case the\n"except" clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section The try\nstatement and "raise" statement in section The raise statement.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n',
- 'exec': u'\nThe "exec" statement\n********************\n\n exec_stmt ::= "exec" or_expr ["in" expression ["," expression]]\n\nThis statement supports dynamic execution of Python code. The first\nexpression should evaluate to either a Unicode string, a *Latin-1*\nencoded string, an open file object, a code object, or a tuple. If it\nis a string, the string is parsed as a suite of Python statements\nwhich is then executed (unless a syntax error occurs). [1] If it is an\nopen file, the file is parsed until EOF and executed. If it is a code\nobject, it is simply executed. For the interpretation of a tuple, see\nbelow. In all cases, the code that\'s executed is expected to be valid\nas file input (see section File input). Be aware that the "return"\nand "yield" statements may not be used outside of function definitions\neven within the context of code passed to the "exec" statement.\n\nIn all cases, if the optional parts are omitted, the code is executed\nin the current scope. If only the first expression after "in" is\nspecified, it should be a dictionary, which will be used for both the\nglobal and the local variables. If two expressions are given, they\nare used for the global and local variables, respectively. If\nprovided, *locals* can be any mapping object. Remember that at module\nlevel, globals and locals are the same dictionary. If two separate\nobjects are given as *globals* and *locals*, the code will be executed\nas if it were embedded in a class definition.\n\nThe first expression may also be a tuple of length 2 or 3. In this\ncase, the optional parts must be omitted. The form "exec(expr,\nglobals)" is equivalent to "exec expr in globals", while the form\n"exec(expr, globals, locals)" is equivalent to "exec expr in globals,\nlocals". The tuple form of "exec" provides compatibility with Python\n3, where "exec" is a function rather than a statement.\n\nChanged in version 2.4: Formerly, *locals* was required to be a\ndictionary.\n\nAs a side effect, an implementation may insert additional keys into\nthe dictionaries given besides those corresponding to variable names\nset by the executed code. For example, the current implementation may\nadd a reference to the dictionary of the built-in module "__builtin__"\nunder the key "__builtins__" (!).\n\n**Programmer\'s hints:** dynamic evaluation of expressions is supported\nby the built-in function "eval()". The built-in functions "globals()"\nand "locals()" return the current global and local dictionary,\nrespectively, which may be useful to pass around for use by "exec".\n\n-[ Footnotes ]-\n\n[1] Note that the parser only accepts the Unix-style end of line\n convention. If you are reading the code from a file, make sure to\n use *universal newlines* mode to convert Windows or Mac-style\n newlines.\n',
- 'execmodel': u'\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The file read by the\nbuilt-in function "execfile()" is a code block. The string argument\npassed to the built-in function "eval()" and to the "exec" statement\nis a code block. The expression read and evaluated by the built-in\nfunction "input()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, a\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, in the\nsecond position of an "except" clause header or after "as" in a "with"\nstatement. The "import" statement of the form "from ... import *"\nbinds all names defined in the imported module, except those beginning\nwith an underscore. This form may only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a "SyntaxError".\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "__builtin__". The global namespace is searched first.\nIf the name is not found there, the builtins namespace is searched.\nThe global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "__builtin__" (note: no\n\'s\'); when in any other module, "__builtins__" is an alias for the\ndictionary of the "__builtin__" module itself. "__builtins__" can be\nset to a user-created dictionary to create a weak form of restricted\nexecution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "__builtin__" (no \'s\') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nIf "exec" is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n"SyntaxError" unless the exec explicitly specifies the local namespace\nfor the "exec". (In other words, "exec obj" would be illegal, but\n"exec obj in ns" would be legal.)\n\nThe "eval()", "execfile()", and "input()" functions and the "exec"\nstatement do not have access to the full environment for resolving\nnames. Names may be resolved in the local and global namespaces of\nthe caller. Free variables are not resolved in the nearest enclosing\nnamespace, but in the global namespace. [1] The "exec" statement and\nthe "eval()" and "execfile()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nExceptions can also be identified by strings, in which case the\n"except" clause is selected by object identity. An arbitrary value\ncan be raised along with the identifying string which can be passed to\nthe handler.\n\nNote: Messages to exceptions are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section The try\nstatement and "raise" statement in section The raise statement.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n',
- 'exprlists': u'\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: "()".)\n',
- 'floating': u'\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts of floating point numbers can\nlook like octal integers, but are interpreted using radix 10. For\nexample, "077e010" is legal, and denotes the same number as "77e10".\nThe allowed range of floating point literals is implementation-\ndependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator "-" and the\nliteral "1".\n',
- 'for': u'\nThe "for" statement\n*******************\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments, and then the suite is executed. When the items are\nexhausted (which is immediately when the sequence is empty), the suite\nin the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there was no next\nitem.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nThe target list is not deleted when the loop is finished, but if the\nsequence is empty, it will not have been assigned to at all by the\nloop. Hint: the built-in function "range()" returns a sequence of\nintegers suitable to emulate the effect of Pascal\'s "for i := a to b\ndo"; e.g., "range(3)" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n',
- 'formatstrings': u'\nFormat String Syntax\n********************\n\nThe "str.format()" method and the "Formatter" class share the same\nsyntax for format strings (although in the case of "Formatter",\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n"{}". Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n"{{" and "}}".\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= <any source character except "]"> +\n conversion ::= "r" | "s"\n format_spec ::= <described in the next section>\n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point "\'!\'", and a *format_spec*, which is\npreceded by a colon "\':\'". These specify a non-default format for the\nreplacement value.\n\nSee also the Format Specification Mini-Language section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings "\'10\'" or\n"\':-]\'") within a format string. The *arg_name* can be followed by any\nnumber of index or attribute expressions. An expression of the form\n"\'.name\'" selects the named attribute using "getattr()", while an\nexpression of the form "\'[index]\'" does an index lookup using\n"__getitem__()".\n\nChanged in version 2.7: The positional argument specifiers can be\nomitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the "__format__()"\nmethod of the value itself. However, in some cases it is desirable to\nforce a type to be formatted as a string, overriding its own\ndefinition of formatting. By converting the value to a string before\ncalling "__format__()", the normal formatting logic is bypassed.\n\nTwo conversion flags are currently supported: "\'!s\'" which calls\n"str()" on the value, and "\'!r\'" which calls "repr()".\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields may contain a field name,\nconversion flag and format specification, but deeper nesting is not\nallowed. The replacement fields within the format_spec are\nsubstituted before the *format_spec* string is interpreted. This\nallows the formatting of a value to be dynamically specified.\n\nSee the Format examples section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see Format String Syntax). They can also be passed directly to the\nbuilt-in "format()" function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string ("""") produces\nthe same result as if you had called "str()" on the value. A non-empty\nformat string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= <any character>\n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nIf a valid *align* value is specified, it can be preceded by a *fill*\ncharacter that can be any character and defaults to a space if\nomitted. It is not possible to use a literal curly brace (""{"" or\n""}"") as the *fill* character when using the "str.format()" method.\nHowever, it is possible to insert a curly brace with a nested\nreplacement field. This limitation doesn\'t affect the "format()"\nfunction.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'<\'" | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | "\'>\'" | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | "\'=\'" | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. It becomes the default when \'0\' |\n | | immediately precedes the field width. |\n +-----------+------------------------------------------------------------+\n | "\'^\'" | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | "\'+\'" | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | "\'-\'" | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe "\'#\'" option is only valid for integers, and only for binary,\noctal, or hexadecimal output. If present, it specifies that the\noutput will be prefixed by "\'0b\'", "\'0o\'", or "\'0x\'", respectively.\n\nThe "\',\'" option signals the use of a comma for a thousands separator.\nFor a locale aware separator, use the "\'n\'" integer presentation type\ninstead.\n\nChanged in version 2.7: Added the "\',\'" option (see also **PEP 378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nWhen no explicit alignment is given, preceding the *width* field by a\nzero ("\'0\'") character enables sign-aware zero-padding for numeric\ntypes. This is equivalent to a *fill* character of "\'0\'" with an\n*alignment* type of "\'=\'".\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with "\'f\'" and "\'F\'", or before and after the decimal point\nfor a floating point value formatted with "\'g\'" or "\'G\'". For non-\nnumber types the field indicates the maximum field size - in other\nwords, how many characters will be used from the field content. The\n*precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'s\'" | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'s\'". |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'b\'" | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | "\'c\'" | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | "\'d\'" | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | "\'o\'" | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | "\'x\'" | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\'X\'" | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'d\'", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'d\'". |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except "\'n\'"\nand "None"). When doing so, "float()" is used to convert the integer\nto a floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | "\'e\'" | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'E\'" | Exponent notation. Same as "\'e\'" except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | "\'f\'" | Fixed point. Displays the number as a fixed-point number. |\n | | The default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'F\'" | Fixed point. Same as "\'f\'". |\n +-----------+------------------------------------------------------------+\n | "\'g\'" | General format. For a given precision "p >= 1", this |\n | | rounds the number to "p" significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type "\'e\'" and precision "p-1" |\n | | would have exponent "exp". Then if "-4 <= exp < p", the |\n | | number is formatted with presentation type "\'f\'" and |\n | | precision "p-1-exp". Otherwise, the number is formatted |\n | | with presentation type "\'e\'" and precision "p-1". In both |\n | | cases insignificant trailing zeros are removed from the |\n | | significand, and the decimal point is also removed if |\n | | there are no remaining digits following it. Positive and |\n | | negative infinity, positive and negative zero, and nans, |\n | | are formatted as "inf", "-inf", "0", "-0" and "nan" |\n | | respectively, regardless of the precision. A precision of |\n | | "0" is treated as equivalent to a precision of "1". The |\n | | default precision is "6". |\n +-----------+------------------------------------------------------------+\n | "\'G\'" | General format. Same as "\'g\'" except switches to "\'E\'" if |\n | | the number gets too large. The representations of infinity |\n | | and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | "\'n\'" | Number. This is the same as "\'g\'", except that it uses the |\n | | current locale setting to insert the appropriate number |\n | | separator characters. |\n +-----------+------------------------------------------------------------+\n | "\'%\'" | Percentage. Multiplies the number by 100 and displays in |\n | | fixed ("\'f\'") format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | The same as "\'g\'". |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the "str.format()" syntax and\ncomparison with the old "%"-formatting.\n\nIn most of the cases the syntax is similar to the old "%"-formatting,\nwith the addition of the "{}" and with ":" used instead of "%". For\nexample, "\'%03.2f\'" can be translated to "\'{:03.2f}\'".\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 2.7+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point(object):\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing "%s" and "%r":\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing "%+f", "%-f", and "% f" and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing "%x" and "%o" and converting the value to different bases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19.5\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 88.64%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12):\n ... for base in \'dXob\':\n ... print \'{0:{width}{base}}\'.format(num, base=base, width=width),\n ... print\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n',
- 'function': u'\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection The standard type hierarchy):\n\n decorated ::= decorators (classdef | funcdef)\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [argument_list [","]] ")"] NEWLINE\n funcdef ::= "def" funcname "(" [parameter_list] ")" ":" suite\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" identifier ["," "**" identifier]\n | "**" identifier\n | defparameter [","] )\n defparameter ::= parameter ["=" expression]\n sublist ::= parameter ("," parameter)* [","]\n parameter ::= identifier | "(" sublist ")"\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code:\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to:\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more top-level *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters must also have a default value --- this is a syntactic\nrestriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use "None" as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section Calls.\nA function call always assigns values to all parameters mentioned in\nthe parameter list, either from position arguments, from keyword\narguments, or from default values. If the form ""*identifier"" is\npresent, it is initialized to a tuple receiving any excess positional\nparameters, defaulting to the empty tuple. If the form\n""**identifier"" is present, it is initialized to a new dictionary\nreceiving any excess keyword arguments, defaulting to a new empty\ndictionary.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section Lambdas. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section Naming and binding for details.\n',
- 'global': u'\nThe "global" statement\n**********************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe "global" statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without "global", although free variables may refer to\nglobals without being declared global.\n\nNames listed in a "global" statement must not be used in the same code\nblock textually preceding that "global" statement.\n\nNames listed in a "global" statement must not be defined as formal\nparameters or in a "for" loop control target, "class" definition,\nfunction definition, or "import" statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the latter two restrictions, but programs should not abuse\nthis freedom, as future implementations may enforce them or silently\nchange the meaning of the program.\n\n**Programmer\'s note:** the "global" is a directive to the parser. It\napplies only to code parsed at the same time as the "global"\nstatement. In particular, a "global" statement contained in an "exec"\nstatement does not affect the code block *containing* the "exec"\nstatement, and code contained in an "exec" statement is unaffected by\n"global" statements in the code containing the "exec" statement. The\nsame applies to the "eval()", "execfile()" and "compile()" functions.\n',
- 'id-classes': u'\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "__builtin__" module. When\n not in interactive mode, "_" has no special meaning and is not\n defined. See section The import statement.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the Special method names section and\n elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section Identifiers (Names).\n',
- 'identifiers': u'\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions:\n\n identifier ::= (letter|"_") (letter | digit | "_")*\n letter ::= lowercase | uppercase\n lowercase ::= "a"..."z"\n uppercase ::= "A"..."Z"\n digit ::= "0"..."9"\n\nIdentifiers are unlimited in length. Case is significant.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n and del from not while\n as elif global or with\n assert else if pass yield\n break except import print\n class exec in raise\n continue finally is return\n def for lambda try\n\nChanged in version 2.4: "None" became a constant and is now recognized\nby the compiler as a name for the built-in object "None". Although it\nis not a keyword, you cannot assign a different object to it.\n\nChanged in version 2.5: Using "as" and "with" as identifiers triggers\na warning. To use them as keywords, enable the "with_statement"\nfuture feature .\n\nChanged in version 2.6: "as" and "with" are full keywords.\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n"_*"\n Not imported by "from module import *". The special identifier "_"\n is used in the interactive interpreter to store the result of the\n last evaluation; it is stored in the "__builtin__" module. When\n not in interactive mode, "_" has no special meaning and is not\n defined. See section The import statement.\n\n Note: The name "_" is often used in conjunction with\n internationalization; refer to the documentation for the\n "gettext" module for more information on this convention.\n\n"__*__"\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the Special method names section and\n elsewhere. More will likely be defined in future versions of\n Python. *Any* use of "__*__" names, in any context, that does not\n follow explicitly documented use, is subject to breakage without\n warning.\n\n"__*"\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section Identifiers (Names).\n',
- 'if': u'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section Boolean operations\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n',
- 'imaginary': u'\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., "(3+4j)". Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n',
- 'import': u'\nThe "import" statement\n**********************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nImport statements are executed in two steps: (1) find a module, and\ninitialize it if necessary; (2) define a name or names in the local\nnamespace (of the scope where the "import" statement occurs). The\nstatement comes in two forms differing on whether it uses the "from"\nkeyword. The first form (without "from") repeats these steps for each\nidentifier in the list. The form with "from" performs step (1) once,\nand then performs step (2) repeatedly.\n\nTo understand how step (1) occurs, one must first understand how\nPython handles hierarchical naming of modules. To help organize\nmodules and provide a hierarchy in naming, Python has a concept of\npackages. A package can contain other packages and modules while\nmodules cannot contain other modules or packages. From a file system\nperspective, packages are directories and modules are files.\n\nOnce the name of the module is known (unless otherwise specified, the\nterm "module" will refer to both packages and modules), searching for\nthe module or package can begin. The first place checked is\n"sys.modules", the cache of all modules that have been imported\npreviously. If the module is found there then it is used in step (2)\nof import.\n\nIf the module is not found in the cache, then "sys.meta_path" is\nsearched (the specification for "sys.meta_path" can be found in **PEP\n302**). The object is a list of *finder* objects which are queried in\norder as to whether they know how to load the module by calling their\n"find_module()" method with the name of the module. If the module\nhappens to be contained within a package (as denoted by the existence\nof a dot in the name), then a second argument to "find_module()" is\ngiven as the value of the "__path__" attribute from the parent package\n(everything up to the last dot in the name of the module being\nimported). If a finder can find the module it returns a *loader*\n(discussed later) or returns "None".\n\nIf none of the finders on "sys.meta_path" are able to find the module\nthen some implicitly defined finders are queried. Implementations of\nPython vary in what implicit meta path finders are defined. The one\nthey all do define, though, is one that handles "sys.path_hooks",\n"sys.path_importer_cache", and "sys.path".\n\nThe implicit finder searches for the requested module in the "paths"\nspecified in one of two places ("paths" do not have to be file system\npaths). If the module being imported is supposed to be contained\nwithin a package then the second argument passed to "find_module()",\n"__path__" on the parent package, is used as the source of paths. If\nthe module is not contained in a package then "sys.path" is used as\nthe source of paths.\n\nOnce the source of paths is chosen it is iterated over to find a\nfinder that can handle that path. The dict at\n"sys.path_importer_cache" caches finders for paths and is checked for\na finder. If the path does not have a finder cached then\n"sys.path_hooks" is searched by calling each object in the list with a\nsingle argument of the path, returning a finder or raises\n"ImportError". If a finder is returned then it is cached in\n"sys.path_importer_cache" and then used for that path entry. If no\nfinder can be found but the path exists then a value of "None" is\nstored in "sys.path_importer_cache" to signify that an implicit, file-\nbased finder that handles modules stored as individual files should be\nused for that path. If the path does not exist then a finder which\nalways returns "None" is placed in the cache for the path.\n\nIf no finder can find the module then "ImportError" is raised.\nOtherwise some finder returned a loader whose "load_module()" method\nis called with the name of the module to load (see **PEP 302** for the\noriginal definition of loaders). A loader has several responsibilities\nto perform on a module it loads. First, if the module already exists\nin "sys.modules" (a possibility if the loader is called outside of the\nimport machinery) then it is to use that module for initialization and\nnot a new module. But if the module does not exist in "sys.modules"\nthen it is to be added to that dict before initialization begins. If\nan error occurs during loading of the module and it was added to\n"sys.modules" it is to be removed from the dict. If an error occurs\nbut the module was already in "sys.modules" it is left in the dict.\n\nThe loader must set several attributes on the module. "__name__" is to\nbe set to the name of the module. "__file__" is to be the "path" to\nthe file unless the module is built-in (and thus listed in\n"sys.builtin_module_names") in which case the attribute is not set. If\nwhat is being imported is a package then "__path__" is to be set to a\nlist of paths to be searched when looking for modules and packages\ncontained within the package being imported. "__package__" is optional\nbut should be set to the name of package that contains the module or\npackage (the empty string is used for module not contained in a\npackage). "__loader__" is also optional but should be set to the\nloader object that is loading the module.\n\nIf an error occurs during loading then the loader raises "ImportError"\nif some other exception is not already being propagated. Otherwise the\nloader returns the module that was loaded and initialized.\n\nWhen step (1) finishes without raising an exception, step (2) can\nbegin.\n\nThe first form of "import" statement binds the module name in the\nlocal namespace to the module object, and then goes on to import the\nnext identifier, if any. If the module name is followed by "as", the\nname following "as" is used as the local name for the module.\n\nThe "from" form does not bind the module name: it goes through the\nlist of identifiers, looks each one of them up in the module found in\nstep (1), and binds the name in the local namespace to the object thus\nfound. As with the first form of "import", an alternate local name\ncan be supplied by specifying ""as" localname". If a name is not\nfound, "ImportError" is raised. If the list of identifiers is\nreplaced by a star ("\'*\'"), all public names defined in the module are\nbound in the local namespace of the "import" statement..\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named "__all__"; if defined, it must\nbe a sequence of strings which are names defined or imported by that\nmodule. The names given in "__all__" are all considered public and\nare required to exist. If "__all__" is not defined, the set of public\nnames includes all names found in the module\'s namespace which do not\nbegin with an underscore character ("\'_\'"). "__all__" should contain\nthe entire public API. It is intended to avoid accidentally exporting\nitems that are not part of the API (such as library modules which were\nimported and used within the module).\n\nThe "from" form with "*" may only occur in a module scope. If the\nwild card form of import --- "import *" --- is used in a function and\nthe function contains or is a nested block with free variables, the\ncompiler will raise a "SyntaxError".\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after "from" you\ncan specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n"from . import mod" from a module in the "pkg" package then you will\nend up importing "pkg.mod". If you execute "from ..subpkg2 import mod"\nfrom within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\nspecification for relative imports is contained within **PEP 328**.\n\n"importlib.import_module()" is provided to support applications that\ndetermine which modules need to be loaded dynamically.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python. The future\nstatement is intended to ease migration to future versions of Python\nthat introduce incompatible changes to the language. It allows use of\nthe new features on a per-module basis before the release in which the\nfeature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 2.6 are "unicode_literals",\n"print_function", "absolute_import", "division", "generators",\n"nested_scopes" and "with_statement". "generators", "with_statement",\n"nested_scopes" are redundant in Python version 2.6 and above because\nthey are always enabled.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module "__future__", described later, and it will\nbe imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by an "exec" statement or calls to the built-in\nfunctions "compile()" and "execfile()" that occur in a module "M"\ncontaining a future statement will, by default, use the new syntax or\nsemantics associated with the future statement. This can, starting\nwith Python 2.2 be controlled by optional arguments to "compile()" ---\nsee the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the "-i" option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also:\n\n **PEP 236** - Back to the __future__\n The original proposal for the __future__ mechanism.\n',
- 'in': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe forms "<>" and "!=" are equivalent; for consistency with C, "!="\nis preferred; where "!=" is mentioned below "<>" is also accepted.\nThe "<>" spelling is considered obsolescent.\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, objects of\ndifferent types *always* compare unequal, and are ordered consistently\nbut arbitrarily. You can control comparison behavior of objects of\nnon-built-in types by defining a "__cmp__" method or rich comparison\nmethods like "__gt__", described in section Special method names.\n\n(This unusual definition of comparison was used to simplify the\ndefinition of operations like sorting and the "in" and "not in"\noperators. In the future, the comparison rules for objects of\ndifferent types are likely to change.)\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. Unicode and 8-bit strings are fully interoperable in\n this behavior. [4]\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "cmp([1,2,x], [1,2,y])" returns\n the same as "cmp(x,y)". If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, "[1,2] <\n [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if their sorted\n (key, value) lists compare equal. [5] Outcomes other than equality\n are resolved consistently, but are not otherwise defined. [6]\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nThe operators "in" and "not in" test for collection membership. "x in\ns" evaluates to true if *x* is a member of the collection *s*, and\nfalse otherwise. "x not in s" returns the negation of "x in s". The\ncollection membership test has traditionally been bound to sequences;\nan object is a member of a collection if the collection is a sequence\nand contains an element equal to that object. However, it make sense\nfor many other object types to support membership tests without being\na sequence. In particular, dictionaries (for keys) and sets support\nmembership testing.\n\nFor the list and tuple types, "x in y" is true if and only if there\nexists an index *i* such that either "x is y[i]" or "x == y[i]" is\ntrue.\n\nFor the Unicode and string types, "x in y" is true if and only if *x*\nis a substring of *y*. An equivalent test is "y.find(x) != -1".\nNote, *x* and *y* need not be the same type; consequently, "u\'ab\' in\n\'abc\'" will return "True". Empty strings are always considered to be a\nsubstring of any other string, so """ in "abc"" will return "True".\n\nChanged in version 2.3: Previously, *x* was required to be a string of\nlength "1".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [7]\n',
- 'integers': u'\nInteger and long integer literals\n*********************************\n\nInteger and long integer literals are described by the following\nlexical definitions:\n\n longinteger ::= integer ("l" | "L")\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"\n octinteger ::= "0" ("o" | "O") octdigit+ | "0" octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n nonzerodigit ::= "1"..."9"\n octdigit ::= "0"..."7"\n bindigit ::= "0" | "1"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n\nAlthough both lower case "\'l\'" and upper case "\'L\'" are allowed as\nsuffix for long integers, it is strongly recommended to always use\n"\'L\'", since the letter "\'l\'" looks too much like the digit "\'1\'".\n\nPlain integer literals that are above the largest representable plain\ninteger (e.g., 2147483647 when using 32-bit arithmetic) are accepted\nas if they were long integers instead. [1] There is no limit for long\ninteger literals apart from what can be stored in available memory.\n\nSome examples of plain integer literals (first row) and long integer\nliterals (second and third rows):\n\n 7 2147483647 0177\n 3L 79228162514264337593543950336L 0377L 0x100000000L\n 79228162514264337593543950336 0xdeadbeef\n',
- 'lambda': u'\nLambdas\n*******\n\n lambda_expr ::= "lambda" [parameter_list]: expression\n old_lambda_expr ::= "lambda" [parameter_list]: old_expression\n\nLambda expressions (sometimes called lambda forms) have the same\nsyntactic position as expressions. They are a shorthand to create\nanonymous functions; the expression "lambda arguments: expression"\nyields a function object. The unnamed object behaves like a function\nobject defined with\n\n def name(arguments):\n return expression\n\nSee section Function definitions for the syntax of parameter lists.\nNote that functions created with lambda expressions cannot contain\nstatements.\n',
- 'lists': u'\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | list_comprehension] "]"\n list_comprehension ::= expression list_for\n list_for ::= "for" target_list "in" old_expression_list [list_iter]\n old_expression_list ::= old_expression [("," old_expression)+ [","]]\n old_expression ::= or_test | old_lambda_expr\n list_iter ::= list_for | list_if\n list_if ::= "if" old_expression [list_iter]\n\nA list display yields a new list object. Its contents are specified\nby providing either a list of expressions or a list comprehension.\nWhen a comma-separated list of expressions is supplied, its elements\nare evaluated from left to right and placed into the list object in\nthat order. When a list comprehension is supplied, it consists of a\nsingle expression followed by at least one "for" clause and zero or\nmore "for" or "if" clauses. In this case, the elements of the new\nlist are those that would be produced by considering each of the "for"\nor "if" clauses a block, nesting from left to right, and evaluating\nthe expression to produce a list element each time the innermost block\nis reached [1].\n',
- 'naming': u'\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\non the interpreter command line the first argument) is a code block.\nA script command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The file read by the\nbuilt-in function "execfile()" is a code block. The string argument\npassed to the built-in function "eval()" and to the "exec" statement\nis a code block. The expression read and evaluated by the built-in\nfunction "input()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes generator expressions since\nthey are implemented using a function scope. This means that the\nfollowing will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block.\nIf a name is bound at the module level, it is a global variable. (The\nvariables of the module code block are local and global.) If a\nvariable is used in a code block but not defined there, it is a *free\nvariable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, a\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, in the\nsecond position of an "except" clause header or after "as" in a "with"\nstatement. The "import" statement of the form "from ... import *"\nbinds all names defined in the imported module, except those beginning\nwith an underscore. This form may only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name). It\nis illegal to unbind a name that is referenced by an enclosing scope;\nthe compiler will report a "SyntaxError".\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the global statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "__builtin__". The global namespace is searched first.\nIf the name is not found there, the builtins namespace is searched.\nThe global statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "__builtin__" (note: no\n\'s\'); when in any other module, "__builtins__" is an alias for the\ndictionary of the "__builtin__" module itself. "__builtins__" can be\nset to a user-created dictionary to create a weak form of restricted\nexecution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "__builtin__" (no \'s\') module and modify its attributes\nappropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nIf "exec" is used in a function and the function contains or is a\nnested block with free variables, the compiler will raise a\n"SyntaxError" unless the exec explicitly specifies the local namespace\nfor the "exec". (In other words, "exec obj" would be illegal, but\n"exec obj in ns" would be legal.)\n\nThe "eval()", "execfile()", and "input()" functions and the "exec"\nstatement do not have access to the full environment for resolving\nnames. Names may be resolved in the local and global namespaces of\nthe caller. Free variables are not resolved in the nearest enclosing\nnamespace, but in the global namespace. [1] The "exec" statement and\nthe "eval()" and "execfile()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n',
- 'numbers': u'\nNumeric literals\n****************\n\nThere are four types of numeric literals: plain integers, long\nintegers, floating point numbers, and imaginary numbers. There are no\ncomplex literals (complex numbers can be formed by adding a real\nnumber and an imaginary number).\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator \'"-"\' and the\nliteral "1".\n',
- 'numeric-types': u'\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "//", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()" (described\n below). Note that "__pow__()" should be defined to accept an\n optional third argument if the ternary version of the built-in\n "pow()" function is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator ("/") is implemented by these methods. The\n "__truediv__()" method is used when "__future__.division" is in\n effect, otherwise "__div__()" is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; "TypeError" will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, to execute the statement "x += y", where\n *x* is an instance of a class that has an "__iadd__()" method,\n "x.__iadd__(y)" is called. If *x* is an instance of a class that\n does not define a "__iadd__()" method, "x.__add__(y)" and\n "y.__radd__(x)" are considered, as with the evaluation of "x + y".\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions "complex()", "int()",\n "long()", and "float()". Should return a value of the appropriate\n type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions "oct()" and "hex()".\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement "operator.index()". Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or "None" if conversion is impossible. When\n the common type would be the type of "other", it is sufficient to\n return "None", since the interpreter will also ask the other object\n to attempt a coercion (but sometimes, if the implementation of the\n other type cannot be changed, it is useful to do the conversion to\n the other type here). A return value of "NotImplemented" is\n equivalent to returning "None".\n',
- 'objects': u'\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'"is"\' operator compares the\nidentity of two objects; the "id()" function returns an integer\nrepresenting its identity (currently implemented as its address). An\nobject\'s *type* is also unchangeable. [1] An object\'s type determines\nthe operations that the object supports (e.g., "does it have a\nlength?") and also defines the possible values for objects of that\ntype. The "type()" function returns an object\'s type (which is an\nobject itself). The *value* of some objects can change. Objects\nwhose value can change are said to be *mutable*; objects whose value\nis unchangeable once they are created are called *immutable*. (The\nvalue of an immutable container object that contains a reference to a\nmutable object can change when the latter\'s value is changed; however\nthe container is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the "gc" module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (ex:\nalways close files).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'"try"..."except"\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a "close()" method. Programs\nare strongly recommended to explicitly close such objects. The\n\'"try"..."finally"\' statement provides a convenient way to do this.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after "a = 1; b = 1",\n"a" and "b" may or may not refer to the same object with the value\none, depending on the implementation, but after "c = []; d = []", "c"\nand "d" are guaranteed to refer to two different, unique, newly\ncreated empty lists. (Note that "c = d = []" assigns the same object\nto both "c" and "d".)\n',
- 'operator-summary': u'\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedences in Python,\nfrom lowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for comparisons, including\ntests, which all have the same precedence and chain from left to right\n--- see section Comparisons --- and exponentiation, which groups from\nright to left).\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| "lambda" | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| "if" -- "else" | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| "or" | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| "and" | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| "not" "x" | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership |\n| ">=", "<>", "!=", "==" | tests and identity tests |\n+-------------------------------------------------+---------------------------------------+\n| "|" | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| "^" | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| "&" | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| "<<", ">>" | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| "+", "-" | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| "*", "/", "//", "%" | Multiplication, division, remainder |\n| | [8] |\n+-------------------------------------------------+---------------------------------------+\n| "+x", "-x", "~x" | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| "**" | Exponentiation [9] |\n+-------------------------------------------------+---------------------------------------+\n| "x[index]", "x[index:index]", | Subscription, slicing, call, |\n| "x(arguments...)", "x.attribute" | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| "(expressions...)", "[expressions...]", "{key: | Binding or tuple display, list |\n| value...}", "`expressions...`" | display, dictionary display, string |\n| | conversion |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] In Python 2.3 and later releases, a list comprehension "leaks"\n the control variables of each "for" it contains into the\n containing scope. However, this behavior is deprecated, and\n relying on it will not work in Python 3.\n\n[2] While "abs(x%y) < abs(y)" is true mathematically, for floats\n it may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that "-1e-100 % 1e100" have the same\n sign as "1e100", the computed result is "-1e-100 + 1e100", which\n is numerically exactly equal to "1e100". The function\n "math.fmod()" returns a result whose sign matches the sign of the\n first argument instead, and so returns "-1e-100" in this case.\n Which approach is more appropriate depends on the application.\n\n[3] If x is very close to an exact integer multiple of y, it\'s\n possible for "floor(x/y)" to be one larger than "(x-x%y)/y" due to\n rounding. In such cases, Python returns the latter result, in\n order to preserve that "divmod(x,y)[0] * y + x % y" be very close\n to "x".\n\n[4] While comparisons between unicode strings make sense at the\n byte level, they may be counter-intuitive to users. For example,\n the strings "u"\\u00C7"" and "u"\\u0043\\u0327"" compare differently,\n even though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using "unicodedata.normalize()".\n\n[5] The implementation computes this efficiently, without\n constructing lists or sorting.\n\n[6] Earlier versions of Python used lexicographic comparison of\n the sorted (key, value) lists, but this was very expensive for the\n common case of comparing for equality. An even earlier version of\n Python compared dictionaries by identity only, but this caused\n surprises because people expected to be able to test a dictionary\n for emptiness by comparing it to "{}".\n\n[7] Due to automatic garbage-collection, free lists, and the\n dynamic nature of descriptors, you may notice seemingly unusual\n behaviour in certain uses of the "is" operator, like those\n involving comparisons between instance methods, or constants.\n Check their documentation for more info.\n\n[8] The "%" operator is also used for string formatting; the same\n precedence applies.\n\n[9] The power operator "**" binds less tightly than an arithmetic\n or bitwise unary operator on its right, that is, "2**-1" is "0.5".\n',
- 'pass': u'\nThe "pass" statement\n********************\n\n pass_stmt ::= "pass"\n\n"pass" is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n',
- 'power': u'\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): "-1**2" results in "-1".\n\nThe power operator has the same semantics as the built-in "pow()"\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type. The result type is that of the\narguments after coercion.\n\nWith mixed operand types, the coercion rules for binary arithmetic\noperators apply. For int and long int operands, the result has the\nsame type as the operands (after coercion) unless the second argument\nis negative; in that case, all arguments are converted to float and a\nfloat result is delivered. For example, "10**2" returns "100", but\n"10**-2" returns "0.01". (This last feature was added in Python 2.2.\nIn Python 2.1 and before, if both arguments were of integer types and\nthe second argument was negative, an exception was raised).\n\nRaising "0.0" to a negative power results in a "ZeroDivisionError".\nRaising a negative number to a fractional power results in a\n"ValueError".\n',
- 'print': u'\nThe "print" statement\n*********************\n\n print_stmt ::= "print" ([expression ("," expression)* [","]]\n | ">>" expression [("," expression)+ [","]])\n\n"print" evaluates each expression in turn and writes the resulting\nobject to standard output (see below). If an object is not a string,\nit is first converted to a string using the rules for string\nconversions. The (resulting or original) string is then written. A\nspace is written before each object is (converted and) written, unless\nthe output system believes it is positioned at the beginning of a\nline. This is the case (1) when no characters have yet been written\nto standard output, (2) when the last character written to standard\noutput is a whitespace character except "\' \'", or (3) when the last\nwrite operation on standard output was not a "print" statement. (In\nsome cases it may be functional to write an empty string to standard\noutput for this reason.)\n\nNote: Objects which act like file objects but which are not the\n built-in file objects often do not properly emulate this aspect of\n the file object\'s behavior, so it is best not to rely on this.\n\nA "\'\\n\'" character is written at the end, unless the "print" statement\nends with a comma. This is the only action if the statement contains\njust the keyword "print".\n\nStandard output is defined as the file object named "stdout" in the\nbuilt-in module "sys". If no such object exists, or if it does not\nhave a "write()" method, a "RuntimeError" exception is raised.\n\n"print" also has an extended form, defined by the second portion of\nthe syntax described above. This form is sometimes referred to as\n""print" chevron." In this form, the first expression after the ">>"\nmust evaluate to a "file-like" object, specifically an object that has\na "write()" method as described above. With this extended form, the\nsubsequent expressions are printed to this file object. If the first\nexpression evaluates to "None", then "sys.stdout" is used as the file\nfor output.\n',
- 'raise': u'\nThe "raise" statement\n*********************\n\n raise_stmt ::= "raise" [expression ["," expression ["," expression]]]\n\nIf no expressions are present, "raise" re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a "TypeError" exception is raised indicating that\nthis is an error (if running under IDLE, a "Queue.Empty" exception is\nraised instead).\n\nOtherwise, "raise" evaluates the expressions to get three objects,\nusing "None" as the value of omitted expressions. The first two\nobjects are used to determine the *type* and *value* of the exception.\n\nIf the first object is an instance, the type of the exception is the\nclass of the instance, the instance itself is the value, and the\nsecond object must be "None".\n\nIf the first object is a class, it becomes the type of the exception.\nThe second object is used to determine the exception value: If it is\nan instance of the class, the instance becomes the exception value. If\nthe second object is a tuple, it is used as the argument list for the\nclass constructor; if it is "None", an empty argument list is used,\nand any other object is treated as a single argument to the\nconstructor. The instance so created by calling the constructor is\nused as the exception value.\n\nIf a third object is present and not "None", it must be a traceback\nobject (see section The standard type hierarchy), and it is\nsubstituted instead of the current location as the place where the\nexception occurred. If the third object is present and not a\ntraceback object or "None", a "TypeError" exception is raised. The\nthree-expression form of "raise" is useful to re-raise an exception\ntransparently in an except clause, but "raise" with no expressions\nshould be preferred if the exception to be re-raised was the most\nrecently active exception in the current scope.\n\nAdditional information on exceptions can be found in section\nExceptions, and information about handling exceptions is in section\nThe try statement.\n',
- 'return': u'\nThe "return" statement\n**********************\n\n return_stmt ::= "return" [expression_list]\n\n"return" may only occur syntactically nested in a function definition,\nnot within a nested class definition.\n\nIf an expression list is present, it is evaluated, else "None" is\nsubstituted.\n\n"return" leaves the current function call with the expression list (or\n"None") as return value.\n\nWhen "return" passes control out of a "try" statement with a "finally"\nclause, that "finally" clause is executed before really leaving the\nfunction.\n\nIn a generator function, the "return" statement is not allowed to\ninclude an "expression_list". In that context, a bare "return"\nindicates that the generator is done and will cause "StopIteration" to\nbe raised.\n',
- 'sequence-types': u'\nEmulating container types\n*************************\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. (For backwards compatibility, the method\n"__getslice__()" (see below) can also be defined to handle simple, but\nnot extended slices.) It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "has_key()", "get()",\n"clear()", "setdefault()", "iterkeys()", "itervalues()",\n"iteritems()", "pop()", "popitem()", "copy()", and "update()" behaving\nsimilar to those for Python\'s standard dictionary objects. The\n"UserDict" module provides a "DictMixin" class to help create those\nmethods from a base set of "__getitem__()", "__setitem__()",\n"__delitem__()", and "keys()". Mutable sequences should provide\nmethods "append()", "count()", "index()", "extend()", "insert()",\n"pop()", "remove()", "reverse()" and "sort()", like Python standard\nlist objects. Finally, sequence types should implement addition\n(meaning concatenation) and multiplication (meaning repetition) by\ndefining the methods "__add__()", "__radd__()", "__iadd__()",\n"__mul__()", "__rmul__()" and "__imul__()" described below; they\nshould not define "__coerce__()" or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should be equivalent of "has_key()"; for sequences,\nit should search through the values. It is further recommended that\nboth mappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "iterkeys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__nonzero__()" method and whose "__len__()"\n method returns zero is considered to be false in a Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__missing__(self, key)\n\n Called by "dict"."__getitem__()" to implement "self[key]" for dict\n subclasses when key is not in the dictionary.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "iterkeys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see Iterator Types.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\n New in version 2.6.\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see this section in the\n language reference.\n',
- 'shifting': u'\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept plain or long integers as arguments. The\narguments are converted to a common type. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as division by "pow(2, n)". A\nleft shift by *n* bits is defined as multiplication with "pow(2, n)".\nNegative shift counts raise a "ValueError" exception.\n\nNote: In the current implementation, the right-hand operand is\n required to be at most "sys.maxsize". If the right-hand operand is\n larger than "sys.maxsize" an "OverflowError" exception is raised.\n',
- 'slicings': u'\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or "del" statements. The syntax for a slicing:\n\n slicing ::= simple_slicing | extended_slicing\n simple_slicing ::= primary "[" short_slice "]"\n extended_slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice | ellipsis\n proper_slice ::= short_slice | long_slice\n short_slice ::= [lower_bound] ":" [upper_bound]\n long_slice ::= short_slice ":" [stride]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n ellipsis ::= "..."\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice nor ellipses). Similarly, when the slice\nlist has exactly one short slice and no trailing comma, the\ninterpretation as a simple slicing takes priority over that as an\nextended slicing.\n\nThe semantics for a simple slicing are as follows. The primary must\nevaluate to a sequence object. The lower and upper bound expressions,\nif present, must evaluate to plain integers; defaults are zero and the\n"sys.maxint", respectively. If either bound is negative, the\nsequence\'s length is added to it. The slicing now selects all items\nwith index *k* such that "i <= k < j" where *i* and *j* are the\nspecified lower and upper bounds. This may be an empty sequence. It\nis not an error if *i* or *j* lie outside the range of valid indexes\n(such items don\'t exist so they aren\'t selected).\n\nThe semantics for an extended slicing are as follows. The primary\nmust evaluate to a mapping object, and it is indexed with a key that\nis constructed from the slice list, as follows. If the slice list\ncontains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key. The conversion of a slice item that is an\nexpression is that expression. The conversion of an ellipsis slice\nitem is the built-in "Ellipsis" object. The conversion of a proper\nslice is a slice object (see section The standard type hierarchy)\nwhose "start", "stop" and "step" attributes are the values of the\nexpressions given as lower bound, upper bound and stride,\nrespectively, substituting "None" for missing expressions.\n',
- 'specialattrs': u'\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the "dir()" built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object\'s\n (writable) attributes.\n\nobject.__methods__\n\n Deprecated since version 2.2: Use the built-in function "dir()" to\n get a list of an object\'s attributes. This attribute is no longer\n available.\n\nobject.__members__\n\n Deprecated since version 2.2: Use the built-in function "dir()" to\n get a list of an object\'s attributes. This attribute is no longer\n available.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\ndefinition.__name__\n\n The name of the class, type, function, method, descriptor, or\n generator instance.\n\nThe following attributes are only supported by *new-style class*es.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in "__mro__".\n\nclass.__subclasses__()\n\n Each new-style class keeps a list of weak references to its\n immediate subclasses. This method returns a list of all those\n references still alive. Example:\n\n >>> int.__subclasses__()\n [<type \'bool\'>]\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found\n in the Python Reference Manual (Basic customization).\n\n[2] As a consequence, the list "[1, 2]" is considered equal to\n "[1.0, 2.0]", and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n operands.\n\n[4] Cased characters are those with general category property\n being one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase),\n or "Lt" (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a\n singleton tuple whose only element is the tuple to be formatted.\n\n[6] The advantage of leaving the newline on is that returning an\n empty string is then an unambiguous EOF indication. It is also\n possible (in cases where it might matter, for example, if you want\n to make an exact copy of a file while scanning its lines) to tell\n whether the last line of a file ended in a newline or not (yes\n this happens!).\n',
- 'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "x.__getitem__(i)" for old-style\nclasses and "type(x).__getitem__(x, i)" for new-style classes. Except\nwhere mentioned, attempts to execute an operation raise an exception\nwhen no appropriate method is defined (typically "AttributeError" or\n"TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called after the instance has been created (by "__new__()"), but\n before it is returned to the caller. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])".\n\n Because "__new__()" and "__init__()" work together in constructing\n objects ("__new__()" to create it, and "__init__()" to customise\n it), no non-"None" value may be returned by "__init__()"; doing so\n will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_traceback" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing "None" in\n "sys.exc_traceback" or "sys.last_traceback". Circular references\n which are garbage are detected when the option cycle detector is\n enabled (it\'s on by default), but can only be cleaned up if there\n are no Python-level "__del__()" methods involved. Refer to the\n documentation for the "gc" module for more information about how\n "__del__()" methods are handled by the cycle detector,\n particularly the description of the "garbage" value.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\n See also the "-R" command-line option.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function and by string conversions\n (reverse quotes) to compute the "official" string representation of\n an object. If at all possible, this should look like a valid\n Python expression that could be used to recreate an object with the\n same value (given an appropriate environment). If this is not\n possible, a string of the form "<...some useful description...>"\n should be returned. The return value must be a string object. If a\n class defines "__repr__()" but not "__str__()", then "__repr__()"\n is also used when an "informal" string representation of instances\n of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the "str()" built-in function and by the "print"\n statement to compute the "informal" string representation of an\n object. This differs from "__repr__()" in that it does not have to\n be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n New in version 2.1.\n\n These are the so-called "rich comparison" methods, and are called\n for comparison operators in preference to "__cmp__()" below. The\n correspondence between operator symbols and method names is as\n follows: "x<y" calls "x.__lt__(y)", "x<=y" calls "x.__le__(y)",\n "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call "x.__ne__(y)",\n "x>y" calls "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__cmp__(self, other)\n\n Called by comparison operations if rich comparison (see above) is\n not defined. Should return a negative integer if "self < other",\n zero if "self == other", a positive integer if "self > other". If\n no "__cmp__()", "__eq__()" or "__ne__()" operation is defined,\n class instances are compared by object identity ("address"). See\n also the description of "__hash__()" for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys. (Note: the\n restriction that exceptions are not propagated by "__cmp__()" has\n been removed since Python 1.5.)\n\nobject.__rcmp__(self, other)\n\n Changed in version 2.1: No longer supported.\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n If a class does not define a "__cmp__()" or "__eq__()" method it\n should not define a "__hash__()" operation either; if it defines\n "__cmp__()" or "__eq__()" but not "__hash__()", its instances will\n not be usable in hashed collections. If a class defines mutable\n objects and implements a "__cmp__()" or "__eq__()" method, it\n should not implement "__hash__()", since hashable collection\n implementations require that an object\'s hash value is immutable\n (if the object\'s hash value changes, it will be in the wrong hash\n bucket).\n\n User-defined classes have "__cmp__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns a result derived from\n "id(x)".\n\n Classes which inherit a "__hash__()" method from a parent class but\n change the meaning of "__cmp__()" or "__eq__()" such that the hash\n value returned is no longer appropriate (e.g. by switching to a\n value-based concept of equality instead of the default identity\n based equality) can explicitly flag themselves as being unhashable\n by setting "__hash__ = None" in the class definition. Doing so\n means that not only will instances of the class raise an\n appropriate "TypeError" when a program attempts to retrieve their\n hash value, but they will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable)"\n (unlike classes which define their own "__hash__()" to explicitly\n raise "TypeError").\n\n Changed in version 2.5: "__hash__()" may now also return a long\n integer object; the 32-bit integer is then derived from the hash of\n that object.\n\n Changed in version 2.6: "__hash__" may now be set to "None" to\n explicitly flag instances of a class as unhashable.\n\nobject.__nonzero__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True", or their integer\n equivalents "0" or "1". When this method is not defined,\n "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__nonzero__()", all its instances are\n considered true.\n\nobject.__unicode__(self)\n\n Called to implement "unicode()" built-in; should return a Unicode\n object. When this method is not defined, string conversion is\n attempted, and the result of string conversion is converted to\n Unicode using the system default encoding.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control in new-style classes.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should not simply execute "self.name = value" --- this would cause\n a recursive call to itself. Instead, it should insert the value in\n the dictionary of instance attributes, e.g., "self.__dict__[name] =\n value". For new-style classes, rather than accessing the instance\n dictionary, it should call the base class method with the same\n name, for example, "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\n\nMore attribute access for new-style classes\n-------------------------------------------\n\nThe following methods only apply to new-style classes.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See Special method lookup for new-style\n classes.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called. Note that descriptors are only invoked for new\nstyle objects or classes (ones that subclass "object()" or "type()").\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to a new-style object instance, "a.x" is transformed\n into the call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a new-style class, "A.x" is transformed into the\n call: "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of both old and new-style classes have a\ndictionary for attribute storage. This wastes space for objects\nhaving very few instance variables. The space consumption can become\nacute when creating large numbers of instances.\n\nThe default can be overridden by defining *__slots__* in a new-style\nclass definition. The *__slots__* declaration takes a sequence of\ninstance variables and reserves just enough space in each instance to\nhold a value for each variable. Space is saved because *__dict__* is\nnot created for each instance.\n\n__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. If defined in a\n new-style class, *__slots__* reserves space for the declared\n variables and prevents the automatic creation of *__dict__* and\n *__weakref__* for each instance.\n\n New in version 2.2.\n\nNotes on using *__slots__*\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__dict__\'" to the\n *__slots__* declaration would not enable the assignment of new\n attributes not specifically listed in the sequence of instance\n variable names.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n Changed in version 2.3: Previously, adding "\'__weakref__\'" to the\n *__slots__* declaration would not enable support for weak\n references.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (Implementing Descriptors) for each variable name. As a\n result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "long", "str" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n Changed in version 2.6: Previously, *__class__* assignment raised an\n error if either new or old class had *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, new-style classes are constructed using "type()". A class\ndefinition is read into a separate namespace and the value of class\nname is bound to the result of "type(name, bases, dict)".\n\nWhen the class definition is read, if *__metaclass__* is defined then\nthe callable assigned to it will be called instead of "type()". This\nallows classes or functions to be written which monitor or alter the\nclass creation process:\n\n* Modifying the class dictionary prior to the class being created.\n\n* Returning an instance of another class -- essentially performing\n the role of a factory function.\n\nThese steps will have to be performed in the metaclass\'s "__new__()"\nmethod -- "type.__new__()" can then be called from this method to\ncreate a class with different properties. This example adds a new\nelement to the class dictionary before creating the class:\n\n class metacls(type):\n def __new__(mcs, name, bases, dict):\n dict[\'foo\'] = \'metacls was here\'\n return type.__new__(mcs, name, bases, dict)\n\nYou can of course also override other class methods (or add new\nmethods); for example defining a custom "__call__()" method in the\nmetaclass allows custom behavior when the class is called, e.g. not\nalways creating a new instance.\n\n__metaclass__\n\n This variable can be any callable accepting arguments for "name",\n "bases", and "dict". Upon class creation, the callable is used\n instead of the built-in "type()".\n\n New in version 2.2.\n\nThe appropriate metaclass is determined by the following precedence\nrules:\n\n* If "dict[\'__metaclass__\']" exists, it is used.\n\n* Otherwise, if there is at least one base class, its metaclass is\n used (this looks for a *__class__* attribute first and if not found,\n uses its type).\n\n* Otherwise, if a global variable named __metaclass__ exists, it is\n used.\n\n* Otherwise, the old-style, classic metaclass (types.ClassType) is\n used.\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored including logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\n\nCustomizing instance and subclass checks\n========================================\n\nNew in version 2.6.\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also:\n\n **PEP 3119** - Introducing Abstract Base Classes\n Includes the specification for customizing "isinstance()" and\n "issubclass()" behavior through "__instancecheck__()" and\n "__subclasscheck__()", with motivation for this functionality in\n the context of adding Abstract Base Classes (see the "abc"\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. (For backwards compatibility, the method\n"__getslice__()" (see below) can also be defined to handle simple, but\nnot extended slices.) It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "has_key()", "get()",\n"clear()", "setdefault()", "iterkeys()", "itervalues()",\n"iteritems()", "pop()", "popitem()", "copy()", and "update()" behaving\nsimilar to those for Python\'s standard dictionary objects. The\n"UserDict" module provides a "DictMixin" class to help create those\nmethods from a base set of "__getitem__()", "__setitem__()",\n"__delitem__()", and "keys()". Mutable sequences should provide\nmethods "append()", "count()", "index()", "extend()", "insert()",\n"pop()", "remove()", "reverse()" and "sort()", like Python standard\nlist objects. Finally, sequence types should implement addition\n(meaning concatenation) and multiplication (meaning repetition) by\ndefining the methods "__add__()", "__radd__()", "__iadd__()",\n"__mul__()", "__rmul__()" and "__imul__()" described below; they\nshould not define "__coerce__()" or other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should be equivalent of "has_key()"; for sequences,\nit should search through the values. It is further recommended that\nboth mappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "iterkeys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__nonzero__()" method and whose "__len__()"\n method returns zero is considered to be false in a Boolean context.\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__missing__(self, key)\n\n Called by "dict"."__getitem__()" to implement "self[key]" for dict\n subclasses when key is not in the dictionary.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container, and should also be made\n available as the method "iterkeys()".\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see Iterator Types.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\n New in version 2.6.\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see this section in the\n language reference.\n\n\nAdditional methods for emulation of sequence types\n==================================================\n\nThe following optional methods can be defined to further emulate\nsequence objects. Immutable sequences methods should at most only\ndefine "__getslice__()"; mutable sequences might define all three\nmethods.\n\nobject.__getslice__(self, i, j)\n\n Deprecated since version 2.0: Support slice objects as parameters\n to the "__getitem__()" method. (However, built-in types in CPython\n currently still implement "__getslice__()". Therefore, you have to\n override it in derived classes when implementing slicing.)\n\n Called to implement evaluation of "self[i:j]". The returned object\n should be of the same type as *self*. Note that missing *i* or *j*\n in the slice expression are replaced by zero or "sys.maxsize",\n respectively. If negative indexes are used in the slice, the\n length of the sequence is added to that index. If the instance does\n not implement the "__len__()" method, an "AttributeError" is\n raised. No guarantee is made that indexes adjusted this way are not\n still negative. Indexes which are greater than the length of the\n sequence are not modified. If no "__getslice__()" is found, a slice\n object is created instead, and passed to "__getitem__()" instead.\n\nobject.__setslice__(self, i, j, sequence)\n\n Called to implement assignment to "self[i:j]". Same notes for *i*\n and *j* as for "__getslice__()".\n\n This method is deprecated. If no "__setslice__()" is found, or for\n extended slicing of the form "self[i:j:k]", a slice object is\n created, and passed to "__setitem__()", instead of "__setslice__()"\n being called.\n\nobject.__delslice__(self, i, j)\n\n Called to implement deletion of "self[i:j]". Same notes for *i* and\n *j* as for "__getslice__()". This method is deprecated. If no\n "__delslice__()" is found, or for extended slicing of the form\n "self[i:j:k]", a slice object is created, and passed to\n "__delitem__()", instead of "__delslice__()" being called.\n\nNotice that these methods are only invoked when a single slice with a\nsingle colon is used, and the slice method is available. For slice\noperations involving extended slice notation, or in absence of the\nslice methods, "__getitem__()", "__setitem__()" or "__delitem__()" is\ncalled with a slice object as argument.\n\nThe following example demonstrate how to make your program or module\ncompatible with earlier versions of Python (assuming that methods\n"__getitem__()", "__setitem__()" and "__delitem__()" support slice\nobjects as arguments):\n\n class MyClass:\n ...\n def __getitem__(self, index):\n ...\n def __setitem__(self, index, value):\n ...\n def __delitem__(self, index):\n ...\n\n if sys.version_info < (2, 0):\n # They won\'t be defined if version is at least 2.0 final\n\n def __getslice__(self, i, j):\n return self[max(0, i):max(0, j):]\n def __setslice__(self, i, j, seq):\n self[max(0, i):max(0, j):] = seq\n def __delslice__(self, i, j):\n del self[max(0, i):max(0, j):]\n ...\n\nNote the calls to "max()"; these are necessary because of the handling\nof negative indices before the "__*slice__()" methods are called.\nWhen negative indexes are used, the "__*item__()" methods receive them\nas provided, but the "__*slice__()" methods get a "cooked" form of the\nindex values. For each negative index value, the length of the\nsequence is added to the index before calling the method (which may\nstill result in a negative index); this is the customary handling of\nnegative indexes by the built-in sequence types, and the "__*item__()"\nmethods are expected to do this as well. However, since they should\nalready be doing that, negative indexes cannot be passed in; they must\nbe constrained to the bounds of the sequence before being passed to\nthe "__*item__()" methods. Calling "max(0, i)" conveniently returns\nthe proper value.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "//", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()" (described\n below). Note that "__pow__()" should be defined to accept an\n optional third argument if the ternary version of the built-in\n "pow()" function is to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__div__(self, other)\nobject.__truediv__(self, other)\n\n The division operator ("/") is implemented by these methods. The\n "__truediv__()" method is used when "__future__.division" is in\n effect, otherwise "__div__()" is used. If only one of these two\n methods is defined, the object will not support division in the\n alternate context; "TypeError" will be raised instead.\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rdiv__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "%", "divmod()", "pow()", "**",\n "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__idiv__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, to execute the statement "x += y", where\n *x* is an instance of a class that has an "__iadd__()" method,\n "x.__iadd__(y)" is called. If *x* is an instance of a class that\n does not define a "__iadd__()" method, "x.__add__(y)" and\n "y.__radd__(x)" are considered, as with the evaluation of "x + y".\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__long__(self)\nobject.__float__(self)\n\n Called to implement the built-in functions "complex()", "int()",\n "long()", and "float()". Should return a value of the appropriate\n type.\n\nobject.__oct__(self)\nobject.__hex__(self)\n\n Called to implement the built-in functions "oct()" and "hex()".\n Should return a string value.\n\nobject.__index__(self)\n\n Called to implement "operator.index()". Also called whenever\n Python needs an integer object (such as in slicing). Must return\n an integer (int or long).\n\n New in version 2.5.\n\nobject.__coerce__(self, other)\n\n Called to implement "mixed-mode" numeric arithmetic. Should either\n return a 2-tuple containing *self* and *other* converted to a\n common numeric type, or "None" if conversion is impossible. When\n the common type would be the type of "other", it is sufficient to\n return "None", since the interpreter will also ask the other object\n to attempt a coercion (but sometimes, if the implementation of the\n other type cannot be changed, it is useful to do the conversion to\n the other type here). A return value of "NotImplemented" is\n equivalent to returning "None".\n\n\nCoercion rules\n==============\n\nThis section used to document the rules for coercion. As the language\nhas evolved, the coercion rules have become hard to document\nprecisely; documenting what one version of one particular\nimplementation does is undesirable. Instead, here are some informal\nguidelines regarding coercion. In Python 3, coercion will not be\nsupported.\n\n* If the left operand of a % operator is a string or Unicode object,\n no coercion takes place and the string formatting operation is\n invoked instead.\n\n* It is no longer recommended to define a coercion operation. Mixed-\n mode operations on types that don\'t define coercion pass the\n original arguments to the operation.\n\n* New-style classes (those derived from "object") never invoke the\n "__coerce__()" method in response to a binary operator; the only\n time "__coerce__()" is invoked is when the built-in function\n "coerce()" is called.\n\n* For most intents and purposes, an operator that returns\n "NotImplemented" is treated the same as one that is not implemented\n at all.\n\n* Below, "__op__()" and "__rop__()" are used to signify the generic\n method names corresponding to an operator; "__iop__()" is used for\n the corresponding in-place operator. For example, for the operator\n \'"+"\', "__add__()" and "__radd__()" are used for the left and right\n variant of the binary operator, and "__iadd__()" for the in-place\n variant.\n\n* For objects *x* and *y*, first "x.__op__(y)" is tried. If this is\n not implemented or returns "NotImplemented", "y.__rop__(x)" is\n tried. If this is also not implemented or returns "NotImplemented",\n a "TypeError" exception is raised. But see the following exception:\n\n* Exception to the previous item: if the left operand is an instance\n of a built-in type or a new-style class, and the right operand is an\n instance of a proper subclass of that type or class and overrides\n the base\'s "__rop__()" method, the right operand\'s "__rop__()"\n method is tried *before* the left operand\'s "__op__()" method.\n\n This is done so that a subclass can completely override binary\n operators. Otherwise, the left operand\'s "__op__()" method would\n always accept the right operand: when an instance of a given class\n is expected, an instance of a subclass of that class is always\n acceptable.\n\n* When either operand type defines a coercion, this coercion is\n called before that type\'s "__op__()" or "__rop__()" method is\n called, but no sooner. If the coercion returns an object of a\n different type for the operand whose coercion is invoked, part of\n the process is redone using the new object.\n\n* When an in-place operator (like \'"+="\') is used, if the left\n operand implements "__iop__()", it is invoked without any coercion.\n When the operation falls back to "__op__()" and/or "__rop__()", the\n normal coercion rules apply.\n\n* In "x + y", if *x* is a sequence that implements sequence\n concatenation, sequence concatenation is invoked.\n\n* In "x * y", if one operand is a sequence that implements sequence\n repetition, and the other is an integer ("int" or "long"), sequence\n repetition is invoked.\n\n* Rich comparisons (implemented by methods "__eq__()" and so on)\n never use coercion. Three-way comparison (implemented by\n "__cmp__()") does use coercion under the same conditions as other\n binary operations use it.\n\n* In the current implementation, the built-in numeric types "int",\n "long", "float", and "complex" do not use coercion. All these types\n implement a "__coerce__()" method, for use by the built-in\n "coerce()" function.\n\n Changed in version 2.7: The complex type no longer makes implicit\n calls to the "__coerce__()" method for mixed-type binary arithmetic\n operations.\n\n\nWith Statement Context Managers\n===============================\n\nNew in version 2.5.\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section The with\nstatement), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see Context Manager Types.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 343** - The "with" statement\n The specification, background, and examples for the Python "with"\n statement.\n\n\nSpecial method lookup for old-style classes\n===========================================\n\nFor old-style classes, special methods are always looked up in exactly\nthe same way as any other method or attribute. This is the case\nregardless of whether the method is being looked up explicitly as in\n"x.__getitem__(i)" or implicitly as in "x[i]".\n\nThis behaviour means that special methods may exhibit different\nbehaviour for different instances of a single old-style class if the\nappropriate special attributes are set differently:\n\n >>> class C:\n ... pass\n ...\n >>> c1 = C()\n >>> c2 = C()\n >>> c1.__len__ = lambda: 5\n >>> c2.__len__ = lambda: 9\n >>> len(c1)\n 5\n >>> len(c2)\n 9\n\n\nSpecial method lookup for new-style classes\n===========================================\n\nFor new-style classes, implicit invocations of special methods are\nonly guaranteed to work correctly if defined on an object\'s type, not\nin the object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception (unlike the equivalent example\nwith old-style classes):\n\n >>> class C(object):\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print "Metaclass getattribute invoked"\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object):\n ... __metaclass__ = Meta\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print "Class getattribute invoked"\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type,\n under certain controlled conditions. It generally isn\'t a good\n idea though, since it can lead to some very strange behaviour if\n it is handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as "__add__()") fails the operation is not\n supported, which is why the reflected method is not called.\n',
- 'string-methods': u'\nString Methods\n**************\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Some of them are also available on\n"bytearray" objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange section. To output formatted strings use\ntemplate strings or the "%" operator described in the String\nFormatting Operations section. Also, see the "re" module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n "\'strict\'", meaning that encoding errors raise "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'" and any other\n name registered via "codecs.register_error()", see section Codec\n Base Classes.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n "\'strict\'", meaning that encoding errors raise a "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'",\n "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any other name\n registered via "codecs.register_error()", see section Codec Base\n Classes. For a list of possible encodings, see section Standard\n Encodings.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for "\'xmlcharrefreplace\'" and\n "\'backslashreplace\'" and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found within the slice "s[start:end]". Optional arguments *start*\n and *end* are interpreted as in slice notation. Return "-1" if\n *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know\n the position of *sub*. To check if *sub* is a substring or not,\n use the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See Format String Syntax for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3,\n and should be preferred to the "%" formatting described in String\n Formatting Operations in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The *sep* argument\n may consist of multiple characters (for example,\n "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n empty string with a specified separator returns "[\'\']".\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example, "\' 1 2 3 \'.split()" returns "[\'1\', \'2\', \'3\']", and\n "\' 1 2 3 \'.split(None, 1)" returns "[\'1\', \'2 3 \']".\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n Python recognizes ""\\r"", ""\\n"", and ""\\r\\n"" as line boundaries\n for 8-bit strings.\n\n For example:\n\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()\n [\'ab c\', \'\', \'de fg\', \'kl\']\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines(True)\n [\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line:\n\n >>> "".splitlines()\n []\n >>> "One line\\n".splitlines()\n [\'One line\']\n\n For comparison, "split(\'\\n\')" gives:\n\n >>> \'\'.split(\'\\n\')\n [\'\']\n >>> \'Two lines\\n\'.split(\'\\n\')\n [\'Two lines\', \'\']\n\nunicode.splitlines([keepends])\n\n Return a list of the lines in the string, like "str.splitlines()".\n However, the Unicode method splits on the following line\n boundaries, which are a superset of the *universal newlines*\n recognized for 8-bit strings.\n\n +-------------------------+-------------------------------+\n | Representation | Description |\n +=========================+===============================+\n | "\\n" | Line Feed |\n +-------------------------+-------------------------------+\n | "\\r" | Carriage Return |\n +-------------------------+-------------------------------+\n | "\\r\\n" | Carriage Return + Line Feed |\n +-------------------------+-------------------------------+\n | "\\v" or "\\x0b" | Line Tabulation |\n +-------------------------+-------------------------------+\n | "\\f" or "\\x0c" | Form Feed |\n +-------------------------+-------------------------------+\n | "\\x1c" | File Separator |\n +-------------------------+-------------------------------+\n | "\\x1d" | Group Separator |\n +-------------------------+-------------------------------+\n | "\\x1e" | Record Separator |\n +-------------------------+-------------------------------+\n | "\\x85" | Next Line (C1 Control Code) |\n +-------------------------+-------------------------------+\n | "\\u2028" | Line Separator |\n +-------------------------+-------------------------------+\n | "\\u2029" | Paragraph Separator |\n +-------------------------+-------------------------------+\n\n Changed in version 2.7: "\\v" and "\\f" added to list of line\n boundaries.\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the "maketrans()" helper function in the "string"\n module to create a translation table. For string objects, set the\n *table* argument to "None" for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a "None" *table* argument.\n\n For Unicode objects, the "translate()" method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or "None". Unmapped characters\n are left untouched. Characters mapped to "None" are deleted. Note,\n a more flexible approach is to create a custom character mapping\n codec using the "codecs" module (see "encodings.cp1251" for an\n example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to "len(s)".\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return "True" if there are only numeric characters in S, "False"\n otherwise. Numeric characters include digit characters, and all\n characters that have the Unicode numeric value property, e.g.\n U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return "True" if there are only decimal characters in S, "False"\n otherwise. Decimal characters include digit characters, and all\n characters that can be used to form decimal-radix numbers, e.g.\n U+0660, ARABIC-INDIC DIGIT ZERO.\n',
- 'strings': u'\nString literals\n***************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "ur" | "R" | "U" | "UR" | "Ur" | "uR"\n | "b" | "B" | "br" | "Br" | "bR" | "BR"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'"\n | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | escapeseq\n longstringitem ::= longstringchar | escapeseq\n shortstringchar ::= <any source character except "\\" or newline or the quote>\n longstringchar ::= <any source character except "\\">\n escapeseq ::= "\\" <any ASCII character>\n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the "stringprefix" and the rest of\nthe string literal. The source character set is defined by the\nencoding declaration; it is ASCII if no encoding declaration is given\nin the source file; see section Encoding declarations.\n\nIn plain English: String literals can be enclosed in matching single\nquotes ("\'") or double quotes ("""). They can also be enclosed in\nmatching groups of three single or double quotes (these are generally\nreferred to as *triple-quoted strings*). The backslash ("\\")\ncharacter is used to escape characters that otherwise have a special\nmeaning, such as newline, backslash itself, or the quote character.\nString literals may optionally be prefixed with a letter "\'r\'" or\n"\'R\'"; such strings are called *raw strings* and use different rules\nfor interpreting backslash escape sequences. A prefix of "\'u\'" or\n"\'U\'" makes the string a Unicode string. Unicode strings use the\nUnicode character set as defined by the Unicode Consortium and ISO\n10646. Some additional escape sequences, described below, are\navailable in Unicode strings. A prefix of "\'b\'" or "\'B\'" is ignored in\nPython 2; it indicates that the literal should become a bytes literal\nin Python 3 (e.g. when code is automatically converted with 2to3). A\n"\'u\'" or "\'b\'" prefix may be followed by an "\'r\'" prefix.\n\nIn triple-quoted strings, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the string. (A "quote" is the character used to open the\nstring, i.e. either "\'" or """.)\n\nUnless an "\'r\'" or "\'R\'" prefix is present, escape sequences in\nstrings are interpreted according to rules similar to those used by\nStandard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\newline" | Ignored | |\n+-------------------+-----------------------------------+---------+\n| "\\\\" | Backslash ("\\") | |\n+-------------------+-----------------------------------+---------+\n| "\\\'" | Single quote ("\'") | |\n+-------------------+-----------------------------------+---------+\n| "\\"" | Double quote (""") | |\n+-------------------+-----------------------------------+---------+\n| "\\a" | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| "\\b" | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| "\\f" | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| "\\n" | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| "\\N{name}" | Character named *name* in the | |\n| | Unicode database (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| "\\r" | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| "\\t" | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| "\\uxxxx" | Character with 16-bit hex value | (1) |\n| | *xxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| "\\Uxxxxxxxx" | Character with 32-bit hex value | (2) |\n| | *xxxxxxxx* (Unicode only) | |\n+-------------------+-----------------------------------+---------+\n| "\\v" | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| "\\ooo" | Character with octal value *ooo* | (3,5) |\n+-------------------+-----------------------------------+---------+\n| "\\xhh" | Character with hex value *hh* | (4,5) |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. Individual code units which form parts of a surrogate pair can\n be encoded using this escape sequence.\n\n2. Any Unicode character can be encoded this way, but characters\n outside the Basic Multilingual Plane (BMP) will be encoded using a\n surrogate pair if Python is compiled to use 16-bit code units (the\n default).\n\n3. As in Standard C, up to three octal digits are accepted.\n\n4. Unlike in Standard C, exactly two hex digits are required.\n\n5. In a string literal, hexadecimal and octal escapes denote the\n byte with the given value; it is not necessary that the byte\n encodes a character in the source character set. In a Unicode\n literal, these escapes denote a Unicode character with the given\n value.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the string*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences marked as "(Unicode only)"\nin the table above fall into the category of unrecognized escapes for\nnon-Unicode string literals.\n\nWhen an "\'r\'" or "\'R\'" prefix is present, a character following a\nbackslash is included in the string without change, and *all\nbackslashes are left in the string*. For example, the string literal\n"r"\\n"" consists of two characters: a backslash and a lowercase "\'n\'".\nString quotes can be escaped with a backslash, but the backslash\nremains in the string; for example, "r"\\""" is a valid string literal\nconsisting of two characters: a backslash and a double quote; "r"\\""\nis not a valid string literal (even a raw string cannot end in an odd\nnumber of backslashes). Specifically, *a raw string cannot end in a\nsingle backslash* (since the backslash would escape the following\nquote character). Note also that a single backslash followed by a\nnewline is interpreted as those two characters as part of the string,\n*not* as a line continuation.\n\nWhen an "\'r\'" or "\'R\'" prefix is used in conjunction with a "\'u\'" or\n"\'U\'" prefix, then the "\\uXXXX" and "\\UXXXXXXXX" escape sequences are\nprocessed while *all other backslashes are left in the string*. For\nexample, the string literal "ur"\\u0062\\n"" consists of three Unicode\ncharacters: \'LATIN SMALL LETTER B\', \'REVERSE SOLIDUS\', and \'LATIN\nSMALL LETTER N\'. Backslashes can be escaped with a preceding\nbackslash; however, both remain in the string. As a result, "\\uXXXX"\nescape sequences are only recognized when there are an odd number of\nbackslashes.\n',
- 'subscriptions': u'\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object of a sequence or mapping type.\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to a\nplain integer. If this value is negative, the length of the sequence\nis added to it (so that, e.g., "x[-1]" selects the last item of "x".)\nThe resulting value must be a nonnegative integer less than the number\nof items in the sequence, and the subscription selects the item whose\nindex is that value (counting from zero).\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n',
- 'truth': u'\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an "if" or\n"while" condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* "None"\n\n* "False"\n\n* zero of any numeric type, for example, "0", "0L", "0.0", "0j".\n\n* any empty sequence, for example, "\'\'", "()", "[]".\n\n* any empty mapping, for example, "{}".\n\n* instances of user-defined classes, if the class defines a\n "__nonzero__()" or "__len__()" method, when that method returns the\n integer zero or "bool" value "False". [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn "0" or "False" for false and "1" or "True" for true, unless\notherwise stated. (Important exception: the Boolean operations "or"\nand "and" always return one of their operands.)\n',
- 'try': u'\nThe "try" statement\n*******************\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression [("as" | ",") identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nChanged in version 2.5: In previous versions of Python,\n"try"..."except"..."finally" did not work. "try"..."except" had to be\nnested in "try"..."finally".\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject, or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified in that except clause, if present, and the except\nclause\'s suite is executed. All except clauses must have an\nexecutable block. When the end of this block is reached, execution\ncontinues normally after the entire try statement. (This means that\nif two nested handlers exist for the same exception, and the exception\noccurs in the try clause of the inner handler, the outer handler will\nnot handle the exception.)\n\nBefore an except clause\'s suite is executed, details about the\nexception are assigned to three variables in the "sys" module:\n"sys.exc_type" receives the object identifying the exception;\n"sys.exc_value" receives the exception\'s parameter;\n"sys.exc_traceback" receives a traceback object (see section The\nstandard type hierarchy) identifying the point in the program where\nthe exception occurred. These details are also available through the\n"sys.exc_info()" function, which returns a tuple "(exc_type,\nexc_value, exc_traceback)". Use of the corresponding variables is\ndeprecated in favor of this function, since their use is unsafe in a\nthreaded program. As of Python 1.5, the variables are restored to\ntheir previous values (before the call) when returning from a function\nthat handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception, it is re-raised at the end of the\n"finally" clause. If the "finally" clause raises another exception or\nexecutes a "return" or "break" statement, the saved exception is\ndiscarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\nExceptions, and information on using the "raise" statement to generate\nexceptions may be found in section The raise statement.\n',
- 'types': u'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.).\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name "None". It\n is used to signify the absence of a value in many situations, e.g.,\n it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "NotImplemented". Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "Ellipsis". It is used to indicate the presence of the "..." syntax\n in a slice. Its truth value is true.\n\n"numbers.Number"\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n "numbers.Integral"\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are three types of integers:\n\n Plain integers\n These represent numbers in the range -2147483648 through\n 2147483647. (The range may be larger on machines with a\n larger natural word size, but not smaller.) When the result\n of an operation would fall outside this range, the result is\n normally returned as a long integer (in some cases, the\n exception "OverflowError" is raised instead). For the\n purpose of shift and mask operations, integers are assumed to\n have a binary, 2\'s complement notation using 32 or more bits,\n and hiding no bits from the user (i.e., all 4294967296\n different bit patterns correspond to different values).\n\n Long integers\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans\n These represent the truth values False and True. The two\n objects representing the values "False" and "True" are the\n only Boolean objects. The Boolean type is a subtype of plain\n integers, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ""False"" or\n ""True"" are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers and the least surprises when\n switching between the plain and long integer domains. Any\n operation, if it yields a result in the plain integer domain,\n will yield the same result in the long integer domain or when\n using mixed operands. The switch between domains is transparent\n to the programmer.\n\n "numbers.Real" ("float")\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these are\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n "numbers.Complex"\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number "z" can be retrieved through the read-only\n attributes "z.real" and "z.imag".\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function "len()" returns the number of items\n of a sequence. When the length of a sequence is *n*, the index set\n contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence *a* is\n selected by "a[i]".\n\n Sequences also support slicing: "a[i:j]" selects all items with\n index *k* such that *i* "<=" *k* "<" *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: "a[i:j:k]" selects all items of *a* with index *x* where\n "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n The items of a string are characters. There is no separate\n character type; a character is represented by a string of one\n item. Characters represent (at least) 8-bit bytes. The\n built-in functions "chr()" and "ord()" convert between\n characters and nonnegative integers representing the byte\n values. Bytes with the values 0--127 usually represent the\n corresponding ASCII values, but the interpretation of values\n is up to the program. The string data type is also used to\n represent arrays of bytes, e.g., to hold data read from a\n file.\n\n (On systems whose native character set is not ASCII, strings\n may use EBCDIC in their internal representation, provided the\n functions "chr()" and "ord()" implement a mapping between\n ASCII and EBCDIC, and string comparison preserves the ASCII\n order. Or perhaps someone can propose a better rule?)\n\n Unicode\n The items of a Unicode object are Unicode code units. A\n Unicode code unit is represented by a Unicode object of one\n item and can hold either a 16-bit or 32-bit value\n representing a Unicode ordinal (the maximum value for the\n ordinal is given in "sys.maxunicode", and depends on how\n Python is configured at compile time). Surrogate pairs may\n be present in the Unicode object, and will be reported as two\n separate items. The built-in functions "unichr()" and\n "ord()" convert between code units and nonnegative integers\n representing the Unicode ordinals as defined in the Unicode\n Standard 3.0. Conversion from and to other encodings are\n possible through the Unicode method "encode()" and the built-\n in function "unicode()".\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and "del" (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in "bytearray()" constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module "array" provides an additional example of a\n mutable sequence type.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function "len()"\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., "1" and\n "1.0"), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n "set()" constructor and can be modified afterwards by several\n methods, such as "add()".\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in "frozenset()" constructor. As a frozenset is immutable\n and *hashable*, it can be used again as an element of another\n set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation "a[k]" selects the item indexed by "k"\n from the mapping "a"; this can be used in expressions and as the\n target of assignments or "del" statements. The built-in function\n "len()" returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., "1" and "1.0")\n then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the "{...}"\n notation (see section Dictionary displays).\n\n The extension modules "dbm", "gdbm", and "bsddb" provide\n additional examples of mapping types.\n\nCallable types\n These are the types to which the function call operation (see\n section Calls) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section Function definitions). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +-------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +=========================+=================================+=============+\n | "__doc__" "func_doc" | The function\'s documentation | Writable |\n | | string, or "None" if | |\n | | unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | "__name__" "func_name" | The function\'s name | Writable |\n +-------------------------+---------------------------------+-------------+\n | "__module__" | The name of the module the | Writable |\n | | function was defined in, or | |\n | | "None" if unavailable. | |\n +-------------------------+---------------------------------+-------------+\n | "__defaults__" | A tuple containing default | Writable |\n | "func_defaults" | argument values for those | |\n | | arguments that have defaults, | |\n | | or "None" if no arguments have | |\n | | a default value. | |\n +-------------------------+---------------------------------+-------------+\n | "__code__" "func_code" | The code object representing | Writable |\n | | the compiled function body. | |\n +-------------------------+---------------------------------+-------------+\n | "__globals__" | A reference to the dictionary | Read-only |\n | "func_globals" | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +-------------------------+---------------------------------+-------------+\n | "__dict__" "func_dict" | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +-------------------------+---------------------------------+-------------+\n | "__closure__" | "None" or a tuple of cells that | Read-only |\n | "func_closure" | contain bindings for the | |\n | | function\'s free variables. | |\n +-------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Changed in version 2.4: "func_name" is now writable.\n\n Changed in version 2.6: The double-underscore attributes\n "__closure__", "__code__", "__defaults__", and "__globals__"\n were introduced as aliases for the corresponding "func_*"\n attributes for forwards compatibility with Python 3.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n User-defined methods\n A user-defined method object combines a class, a class instance\n (or "None") and any callable object (normally a user-defined\n function).\n\n Special read-only attributes: "im_self" is the class instance\n object, "im_func" is the function object; "im_class" is the\n class of "im_self" for bound methods or the class that asked for\n the method for unbound methods; "__doc__" is the method\'s\n documentation (same as "im_func.__doc__"); "__name__" is the\n method name (same as "im_func.__name__"); "__module__" is the\n name of the module the method was defined in, or "None" if\n unavailable.\n\n Changed in version 2.2: "im_self" used to refer to the class\n that defined the method.\n\n Changed in version 2.6: For Python 3 forward-compatibility,\n "im_func" is also available as "__func__", and "im_self" as\n "__self__".\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object, an unbound\n user-defined method object, or a class method object. When the\n attribute is a user-defined method object, a new method object\n is only created if the class from which it is being retrieved is\n the same as, or a derived class of, the class stored in the\n original method object; otherwise, the original method object is\n used as it is.\n\n When a user-defined method object is created by retrieving a\n user-defined function object from a class, its "im_self"\n attribute is "None" and the method object is said to be unbound.\n When one is created by retrieving a user-defined function object\n from a class via one of its instances, its "im_self" attribute\n is the instance, and the method object is said to be bound. In\n either case, the new method\'s "im_class" attribute is the class\n from which the retrieval takes place, and its "im_func"\n attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the "im_func"\n attribute of the new instance is not the original method object\n but its "im_func" attribute.\n\n When a user-defined method object is created by retrieving a\n class method object from a class or instance, its "im_self"\n attribute is the class itself, and its "im_func" attribute is\n the function object underlying the class method.\n\n When an unbound user-defined method object is called, the\n underlying function ("im_func") is called, with the restriction\n that the first argument must be an instance of the proper class\n ("im_class") or of a derived class thereof.\n\n When a bound user-defined method object is called, the\n underlying function ("im_func") is called, inserting the class\n instance ("im_self") in front of the argument list. For\n instance, when "C" is a class which contains a definition for a\n function "f()", and "x" is an instance of "C", calling "x.f(1)"\n is equivalent to calling "C.f(x, 1)".\n\n When a user-defined method object is derived from a class method\n object, the "class instance" stored in "im_self" will actually\n be the class itself, so that calling either "x.f(1)" or "C.f(1)"\n is equivalent to calling "f(C,1)" where "f" is the underlying\n function.\n\n Note that the transformation from function object to (unbound or\n bound) method object happens each time the attribute is\n retrieved from the class or instance. In some cases, a fruitful\n optimization is to assign the attribute to a local variable and\n call that local variable. Also notice that this transformation\n only happens for user-defined functions; other callable objects\n (and all non-callable objects) are retrieved without\n transformation. It is also important to note that user-defined\n functions which are attributes of a class instance are not\n converted to bound methods; this *only* happens when the\n function is an attribute of the class.\n\n Generator functions\n A function or method which uses the "yield" statement (see\n section The yield statement) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s "next()" method will cause the function to\n execute until it provides a value using the "yield" statement.\n When the function executes a "return" statement or falls off the\n end, a "StopIteration" exception is raised and the iterator will\n have reached the end of the set of values to be returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are "len()" and "math.sin()"\n ("math" is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: "__doc__" is the function\'s documentation\n string, or "None" if unavailable; "__name__" is the function\'s\n name; "__self__" is set to "None" (but see the next item);\n "__module__" is the name of the module the function was defined\n in or "None" if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n "alist.append()", assuming *alist* is a list object. In this\n case, the special read-only attribute "__self__" is set to the\n object denoted by *alist*.\n\n Class Types\n Class types, or "new-style classes," are callable. These\n objects normally act as factories for new instances of\n themselves, but variations are possible for class types that\n override "__new__()". The arguments of the call are passed to\n "__new__()" and, in the typical case, to "__init__()" to\n initialize the new instance.\n\n Classic Classes\n Class objects are described below. When a class object is\n called, a new class instance (also described below) is created\n and returned. This implies a call to the class\'s "__init__()"\n method if it has one. Any arguments are passed on to the\n "__init__()" method. If there is no "__init__()" method, the\n class must be called without arguments.\n\n Class instances\n Class instances are described below. Class instances are\n callable only when the class has a "__call__()" method;\n "x(arguments)" is a shorthand for "x.__call__(arguments)".\n\nModules\n Modules are imported by the "import" statement (see section The\n import statement). A module object has a namespace implemented by a\n dictionary object (this is the dictionary referenced by the\n func_globals attribute of functions defined in the module).\n Attribute references are translated to lookups in this dictionary,\n e.g., "m.x" is equivalent to "m.__dict__["x"]". A module object\n does not contain the code object used to initialize the module\n (since it isn\'t needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n\n Special read-only attribute: "__dict__" is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: "__name__" is the module\'s name;\n "__doc__" is the module\'s documentation string, or "None" if\n unavailable; "__file__" is the pathname of the file from which the\n module was loaded, if it was loaded from a file. The "__file__"\n attribute is not present for C modules that are statically linked\n into the interpreter; for extension modules loaded dynamically from\n a shared library, it is the pathname of the shared library file.\n\nClasses\n Both class types (new-style classes) and class objects (old-\n style/classic classes) are typically created by class definitions\n (see section Class definitions). A class has a namespace\n implemented by a dictionary object. Class attribute references are\n translated to lookups in this dictionary, e.g., "C.x" is translated\n to "C.__dict__["x"]" (although for new-style classes in particular\n there are a number of hooks which allow for other means of locating\n attributes). When the attribute name is not found there, the\n attribute search continues in the base classes. For old-style\n classes, the search is depth-first, left-to-right in the order of\n occurrence in the base class list. New-style classes use the more\n complex C3 method resolution order which behaves correctly even in\n the presence of \'diamond\' inheritance structures where there are\n multiple inheritance paths leading back to a common ancestor.\n Additional details on the C3 MRO used by new-style classes can be\n found in the documentation accompanying the 2.3 release at\n https://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class "C", say) would yield a\n user-defined function object or an unbound user-defined method\n object whose associated class is either "C" or one of its base\n classes, it is transformed into an unbound user-defined method\n object whose "im_class" attribute is "C". When it would yield a\n class method object, it is transformed into a bound user-defined\n method object whose "im_self" attribute is "C". When it would\n yield a static method object, it is transformed into the object\n wrapped by the static method object. See section Implementing\n Descriptors for another way in which attributes retrieved from a\n class may differ from those actually contained in its "__dict__"\n (note that only new-style classes support descriptors).\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: "__name__" is the class name; "__module__" is\n the module name in which the class was defined; "__dict__" is the\n dictionary containing the class\'s namespace; "__bases__" is a tuple\n (possibly empty or a singleton) containing the base classes, in the\n order of their occurrence in the base class list; "__doc__" is the\n class\'s documentation string, or "None" if undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object or an unbound user-defined method object whose\n associated class is the class (call it "C") of the instance for\n which the attribute reference was initiated or one of its bases, it\n is transformed into a bound user-defined method object whose\n "im_class" attribute is "C" and whose "im_self" attribute is the\n instance. Static method and class method objects are also\n transformed, as if they had been retrieved from class "C"; see\n above under "Classes". See section Implementing Descriptors for\n another way in which attributes of a class retrieved via its\n instances may differ from the objects actually stored in the\n class\'s "__dict__". If no class attribute is found, and the\n object\'s class has a "__getattr__()" method, that is called to\n satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n "__setattr__()" or "__delattr__()" method, this is called instead\n of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n Special method names.\n\n Special attributes: "__dict__" is the attribute dictionary;\n "__class__" is the instance\'s class.\n\nFiles\n A file object represents an open file. File objects are created by\n the "open()" built-in function, and also by "os.popen()",\n "os.fdopen()", and the "makefile()" method of socket objects (and\n perhaps by other functions or methods provided by extension\n modules). The objects "sys.stdin", "sys.stdout" and "sys.stderr"\n are initialized to file objects corresponding to the interpreter\'s\n standard input, output and error streams. See File Objects for\n complete documentation of file objects.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: "co_name" gives the function name;\n "co_argcount" is the number of positional arguments (including\n arguments with default values); "co_nlocals" is the number of\n local variables used by the function (including arguments);\n "co_varnames" is a tuple containing the names of the local\n variables (starting with the argument names); "co_cellvars" is a\n tuple containing the names of local variables that are\n referenced by nested functions; "co_freevars" is a tuple\n containing the names of free variables; "co_code" is a string\n representing the sequence of bytecode instructions; "co_consts"\n is a tuple containing the literals used by the bytecode;\n "co_names" is a tuple containing the names used by the bytecode;\n "co_filename" is the filename from which the code was compiled;\n "co_firstlineno" is the first line number of the function;\n "co_lnotab" is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); "co_stacksize" is the required stack size\n (including local variables); "co_flags" is an integer encoding a\n number of flags for the interpreter.\n\n The following flag bits are defined for "co_flags": bit "0x04"\n is set if the function uses the "*arguments" syntax to accept an\n arbitrary number of positional arguments; bit "0x08" is set if\n the function uses the "**keywords" syntax to accept arbitrary\n keyword arguments; bit "0x20" is set if the function is a\n generator.\n\n Future feature declarations ("from __future__ import division")\n also use bits in "co_flags" to indicate whether a code object\n was compiled with a particular feature enabled: bit "0x2000" is\n set if the function was compiled with future division enabled;\n bits "0x10" and "0x1000" were used in earlier versions of\n Python.\n\n Other bits in "co_flags" are reserved for internal use.\n\n If a code object represents a function, the first item in\n "co_consts" is the documentation string of the function, or\n "None" if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: "f_back" is to the previous stack\n frame (towards the caller), or "None" if this is the bottom\n stack frame; "f_code" is the code object being executed in this\n frame; "f_locals" is the dictionary used to look up local\n variables; "f_globals" is used for global variables;\n "f_builtins" is used for built-in (intrinsic) names;\n "f_restricted" is a flag indicating whether the function is\n executing in restricted execution mode; "f_lasti" gives the\n precise instruction (this is an index into the bytecode string\n of the code object).\n\n Special writable attributes: "f_trace", if not "None", is a\n function called at the start of each source code line (this is\n used by the debugger); "f_exc_type", "f_exc_value",\n "f_exc_traceback" represent the last exception raised in the\n parent frame provided another exception was ever raised in the\n current frame (in all other cases they are "None"); "f_lineno"\n is the current line number of the frame --- writing to this from\n within a trace function jumps to the given line (only for the\n bottom-most frame). A debugger can implement a Jump command\n (aka Set Next Statement) by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n The try statement.) It is accessible as "sys.exc_traceback", and\n also as the third item of the tuple returned by\n "sys.exc_info()". The latter is the preferred interface, since\n it works correctly when the program is using multiple threads.\n When the program contains no suitable handler, the stack trace\n is written (nicely formatted) to the standard error stream; if\n the interpreter is interactive, it is also made available to the\n user as "sys.last_traceback".\n\n Special read-only attributes: "tb_next" is the next level in the\n stack trace (towards the frame where the exception occurred), or\n "None" if there is no next level; "tb_frame" points to the\n execution frame of the current level; "tb_lineno" gives the line\n number where the exception occurred; "tb_lasti" indicates the\n precise instruction. The line number and last instruction in\n the traceback may differ from the line number of its frame\n object if the exception occurred in a "try" statement with no\n matching except clause or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices when *extended slice\n syntax* is used. This is a slice using two colons, or multiple\n slices or ellipses separated by commas, e.g., "a[i:j:step]",\n "a[i:j, k:l]", or "a[..., i:j]". They are also created by the\n built-in "slice()" function.\n\n Special read-only attributes: "start" is the lower bound; "stop"\n is the upper bound; "step" is the step value; each is "None" if\n omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the extended slice that the slice\n object would describe if applied to a sequence of *length*\n items. It returns a tuple of three integers; respectively\n these are the *start* and *stop* indices and the *step* or\n stride length of the slice. Missing or out-of-bounds indices\n are handled in a manner consistent with regular slices.\n\n New in version 2.3.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n "staticmethod()" constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in "classmethod()" constructor.\n',
- 'typesfunctions': u'\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: "func(argument-list)".\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee Function definitions for more information.\n',
- 'typesmapping': u'\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry. (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass dict(**kwarg)\nclass dict(mapping, **kwarg)\nclass dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterable*\n object. Each item in the iterable must itself be an iterable with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to "{"one": 1, "two": 2, "three": 3}":\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for building a dictionary from\n keyword arguments added.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a "KeyError" if\n *key* is not in the map.\n\n If a subclass of dict defines a method "__missing__()" and *key*\n is not present, the "d[key]" operation calls that method with\n the key *key* as argument. The "d[key]" operation then returns\n or raises whatever is returned or raised by the\n "__missing__(key)" call. No other operations or methods invoke\n "__missing__()". If "__missing__()" is not defined, "KeyError"\n is raised. "__missing__()" must be a method; it cannot be an\n instance variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n The example above shows part of the implementation of\n "collections.Counter". A different "__missing__" method is used\n by "collections.defaultdict".\n\n New in version 2.5: Recognition of __missing__ methods of dict\n subclasses.\n\n d[key] = value\n\n Set "d[key]" to *value*.\n\n del d[key]\n\n Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not\n in the map.\n\n key in d\n\n Return "True" if *d* has a key *key*, else "False".\n\n New in version 2.2.\n\n key not in d\n\n Equivalent to "not key in d".\n\n New in version 2.2.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for "iterkeys()".\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n "fromkeys()" is a class method that returns a new dictionary.\n *value* defaults to "None".\n\n New in version 2.3.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to "None", so\n that this method never raises a "KeyError".\n\n has_key(key)\n\n Test for the presence of *key* in the dictionary. "has_key()"\n is deprecated in favor of "key in d".\n\n items()\n\n Return a copy of the dictionary\'s list of "(key, value)" pairs.\n\n **CPython implementation detail:** Keys and values are listed in\n an arbitrary order which is non-random, varies across Python\n implementations, and depends on the dictionary\'s history of\n insertions and deletions.\n\n If "items()", "keys()", "values()", "iteritems()", "iterkeys()",\n and "itervalues()" are called with no intervening modifications\n to the dictionary, the lists will directly correspond. This\n allows the creation of "(value, key)" pairs using "zip()":\n "pairs = zip(d.values(), d.keys())". The same relationship\n holds for the "iterkeys()" and "itervalues()" methods: "pairs =\n zip(d.itervalues(), d.iterkeys())" provides the same value for\n "pairs". Another way to create the same list is "pairs = [(v, k)\n for (k, v) in d.iteritems()]".\n\n iteritems()\n\n Return an iterator over the dictionary\'s "(key, value)" pairs.\n See the note for "dict.items()".\n\n Using "iteritems()" while adding or deleting entries in the\n dictionary may raise a "RuntimeError" or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n iterkeys()\n\n Return an iterator over the dictionary\'s keys. See the note for\n "dict.items()".\n\n Using "iterkeys()" while adding or deleting entries in the\n dictionary may raise a "RuntimeError" or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n itervalues()\n\n Return an iterator over the dictionary\'s values. See the note\n for "dict.items()".\n\n Using "itervalues()" while adding or deleting entries in the\n dictionary may raise a "RuntimeError" or fail to iterate over\n all entries.\n\n New in version 2.2.\n\n keys()\n\n Return a copy of the dictionary\'s list of keys. See the note\n for "dict.items()".\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a "KeyError" is raised.\n\n New in version 2.3.\n\n popitem()\n\n Remove and return an arbitrary "(key, value)" pair from the\n dictionary.\n\n "popitem()" is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling "popitem()" raises a "KeyError".\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to "None".\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return "None".\n\n "update()" accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: "d.update(red=1,\n blue=2)".\n\n Changed in version 2.4: Allowed the argument to be an iterable\n of key/value pairs and allowed keyword arguments.\n\n values()\n\n Return a copy of the dictionary\'s list of values. See the note\n for "dict.items()".\n\n viewitems()\n\n Return a new view of the dictionary\'s items ("(key, value)"\n pairs). See below for documentation of view objects.\n\n New in version 2.7.\n\n viewkeys()\n\n Return a new view of the dictionary\'s keys. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n viewvalues()\n\n Return a new view of the dictionary\'s values. See below for\n documentation of view objects.\n\n New in version 2.7.\n\n Dictionaries compare equal if and only if they have the same "(key,\n value)" pairs.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.viewkeys()", "dict.viewvalues()" and\n"dict.viewitems()" are *view objects*. They provide a dynamic view on\nthe dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of "(key, value)") in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of "(value, key)" pairs using\n "zip()": "pairs = zip(d.values(), d.keys())". Another way to\n create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n Return "True" if *x* is in the underlying dictionary\'s keys, values\n or items (in the latter case, *x* should be a "(key, value)"\n tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that (key, value) pairs are unique and\nhashable, then the items view is also set-like. (Values views are not\ntreated as set-like since the entries are generally not unique.) Then\nthese set operations are available ("other" refers either to another\nview or a set):\n\ndictview & other\n\n Return the intersection of the dictview and the other object as a\n new set.\n\ndictview | other\n\n Return the union of the dictview and the other object as a new set.\n\ndictview - other\n\n Return the difference between the dictview and the other object\n (all elements in *dictview* that aren\'t in *other*) as a new set.\n\ndictview ^ other\n\n Return the symmetric difference (all elements either in *dictview*\n or *other*, but not in both) of the dictview and the other object\n as a new set.\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.viewkeys()\n >>> values = dishes.viewvalues()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n',
- 'typesmethods': u'\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as "append()" on lists)\nand class instance methods. Built-in methods are described with the\ntypes that support them.\n\nThe implementation adds two special read-only attributes to class\ninstance methods: "m.im_self" is the object on which the method\noperates, and "m.im_func" is the function implementing the method.\nCalling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to\ncalling "m.im_func(m.im_self, arg-1, arg-2, ..., arg-n)".\n\nClass instance methods are either *bound* or *unbound*, referring to\nwhether the method was accessed through an instance or a class,\nrespectively. When a method is unbound, its "im_self" attribute will\nbe "None" and if called, an explicit "self" object must be passed as\nthe first argument. In this case, "self" must be an instance of the\nunbound method\'s class (or a subclass of that class), otherwise a\n"TypeError" is raised.\n\nLike function objects, methods objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object ("meth.im_func"), setting method\nattributes on either bound or unbound methods is disallowed.\nAttempting to set an attribute on a method results in an\n"AttributeError" being raised. In order to set a method attribute,\nyou need to explicitly set it on the underlying function object:\n\n >>> class C:\n ... def method(self):\n ... pass\n ...\n >>> c = C()\n >>> c.method.whoami = \'my name is method\' # can\'t set on the method\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n AttributeError: \'instancemethod\' object has no attribute \'whoami\'\n >>> c.method.im_func.whoami = \'my name is method\'\n >>> c.method.whoami\n \'my name is method\'\n\nSee The standard type hierarchy for more information.\n',
- 'typesmodules': u'\nModules\n*******\n\nThe only special operation on a module is attribute access: "m.name",\nwhere *m* is a module and *name* accesses a name defined in *m*\'s\nsymbol table. Module attributes can be assigned to. (Note that the\n"import" statement is not, strictly speaking, an operation on a module\nobject; "import foo" does not require a module object named *foo* to\nexist, rather it requires an (external) *definition* for a module\nnamed *foo* somewhere.)\n\nA special attribute of every module is "__dict__". This is the\ndictionary containing the module\'s symbol table. Modifying this\ndictionary will actually change the module\'s symbol table, but direct\nassignment to the "__dict__" attribute is not possible (you can write\n"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but you can\'t\nwrite "m.__dict__ = {}"). Modifying "__dict__" directly is not\nrecommended.\n\nModules built into the interpreter are written like this: "<module\n\'sys\' (built-in)>". If loaded from a file, they are written as\n"<module \'os\' from \'/usr/local/lib/pythonX.Y/os.pyc\'>".\n',
- 'typesseq': u'\nSequence Types --- "str", "unicode", "list", "tuple", "bytearray", "buffer", "xrange"\n*************************************************************************************\n\nThere are seven sequence types: strings, Unicode strings, lists,\ntuples, bytearrays, buffers, and xrange objects.\n\nFor other containers see the built in "dict" and "set" classes, and\nthe "collections" module.\n\nString literals are written in single or double quotes: "\'xyzzy\'",\n""frobozz"". See String literals for more about string literals.\nUnicode strings are much like strings, but are specified in the syntax\nusing a preceding "\'u\'" character: "u\'abc\'", "u"def"". In addition to\nthe functionality described here, there are also string-specific\nmethods described in the String Methods section. Lists are constructed\nwith square brackets, separating items with commas: "[a, b, c]".\nTuples are constructed by the comma operator (not within square\nbrackets), with or without enclosing parentheses, but an empty tuple\nmust have the enclosing parentheses, such as "a, b, c" or "()". A\nsingle item tuple must have a trailing comma, such as "(d,)".\n\nBytearray objects are created with the built-in function\n"bytearray()".\n\nBuffer objects are not directly supported by Python syntax, but can be\ncreated by calling the built-in function "buffer()". They don\'t\nsupport concatenation or repetition.\n\nObjects of type xrange are similar to buffers in that there is no\nspecific syntax to create them, but they are created using the\n"xrange()" function. They don\'t support slicing, concatenation or\nrepetition, and using "in", "not in", "min()" or "max()" on them is\ninefficient.\n\nMost sequence types support the following operations. The "in" and\n"not in" operations have the same priorities as the comparison\noperations. The "+" and "*" operations have the same priority as the\ncorresponding numeric operations. [3] Additional methods are provided\nfor Mutable Sequence Types.\n\nThis table lists the sequence operations sorted in ascending priority.\nIn the table, *s* and *t* are sequences of the same type; *n*, *i* and\n*j* are integers:\n\n+--------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+====================+==================================+============+\n| "x in s" | "True" if an item of *s* is | (1) |\n| | equal to *x*, else "False" | |\n+--------------------+----------------------------------+------------+\n| "x not in s" | "False" if an item of *s* is | (1) |\n| | equal to *x*, else "True" | |\n+--------------------+----------------------------------+------------+\n| "s + t" | the concatenation of *s* and *t* | (6) |\n+--------------------+----------------------------------+------------+\n| "s * n, n * s" | equivalent to adding *s* to | (2) |\n| | itself *n* times | |\n+--------------------+----------------------------------+------------+\n| "s[i]" | *i*th item of *s*, origin 0 | (3) |\n+--------------------+----------------------------------+------------+\n| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) |\n+--------------------+----------------------------------+------------+\n| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+--------------------+----------------------------------+------------+\n| "len(s)" | length of *s* | |\n+--------------------+----------------------------------+------------+\n| "min(s)" | smallest item of *s* | |\n+--------------------+----------------------------------+------------+\n| "max(s)" | largest item of *s* | |\n+--------------------+----------------------------------+------------+\n| "s.index(x)" | index of the first occurrence of | |\n| | *x* in *s* | |\n+--------------------+----------------------------------+------------+\n| "s.count(x)" | total number of occurrences of | |\n| | *x* in *s* | |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must compare\nequal and the two sequences must be of the same type and have the same\nlength. (For full details see Comparisons in the language reference.)\n\nNotes:\n\n1. When *s* is a string or Unicode string object the "in" and "not\n in" operations act like a substring test. In Python versions\n before 2.3, *x* had to be a string of length 1. In Python 2.3 and\n beyond, *x* may be a string of any length.\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n empty sequence of the same type as *s*). Note that items in the\n sequence *s* are not copied; they are referenced multiple times.\n This often haunts new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that "[[]]" is a one-element list containing\n an empty list, so all three elements of "[[]] * 3" are references\n to this single empty list. Modifying any of the elements of\n "lists" modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n Further explanation is available in the FAQ entry How do I create a\n multidimensional list?.\n\n3. If *i* or *j* is negative, the index is relative to the end of\n the string: "len(s) + i" or "len(s) + j" is substituted. But note\n that "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that "i <= k < j". If *i* or *j* is\n greater than "len(s)", use "len(s)". If *i* is omitted or "None",\n use "0". If *j* is omitted or "None", use "len(s)". If *i* is\n greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index "x = i + n*k" such that "0 <= n <\n (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k",\n "i+3*k" and so on, stopping when *j* is reached (but never\n including *j*). If *i* or *j* is greater than "len(s)", use\n "len(s)". If *i* or *j* are omitted or "None", they become "end"\n values (which end depends on the sign of *k*). Note, *k* cannot be\n zero. If *k* is "None", it is treated like "1".\n\n6. **CPython implementation detail:** If *s* and *t* are both\n strings, some Python implementations such as CPython can usually\n perform an in-place optimization for assignments of the form "s = s\n + t" or "s += t". When applicable, this optimization makes\n quadratic run-time much less likely. This optimization is both\n version and implementation dependent. For performance sensitive\n code, it is preferable to use the "str.join()" method which assures\n consistent linear concatenation performance across versions and\n implementations.\n\n Changed in version 2.4: Formerly, string concatenation never\n occurred in-place.\n\n\nString Methods\n==============\n\nBelow are listed the string methods which both 8-bit strings and\nUnicode objects support. Some of them are also available on\n"bytearray" objects.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the Sequence Types --- str, unicode, list, tuple,\nbytearray, buffer, xrange section. To output formatted strings use\ntemplate strings or the "%" operator described in the String\nFormatting Operations section. Also, see the "re" module for string\nfunctions based on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.decode([encoding[, errors]])\n\n Decodes the string using the codec registered for *encoding*.\n *encoding* defaults to the default string encoding. *errors* may\n be given to set a different error handling scheme. The default is\n "\'strict\'", meaning that encoding errors raise "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'" and any other\n name registered via "codecs.register_error()", see section Codec\n Base Classes.\n\n New in version 2.2.\n\n Changed in version 2.3: Support for other error handling schemes\n added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.encode([encoding[, errors]])\n\n Return an encoded version of the string. Default encoding is the\n current default string encoding. *errors* may be given to set a\n different error handling scheme. The default for *errors* is\n "\'strict\'", meaning that encoding errors raise a "UnicodeError".\n Other possible values are "\'ignore\'", "\'replace\'",\n "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any other name\n registered via "codecs.register_error()", see section Codec Base\n Classes. For a list of possible encodings, see section Standard\n Encodings.\n\n New in version 2.0.\n\n Changed in version 2.3: Support for "\'xmlcharrefreplace\'" and\n "\'backslashreplace\'" and other error handling schemes added.\n\n Changed in version 2.7: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\n Changed in version 2.5: Accept tuples as *suffix*.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found within the slice "s[start:end]". Optional arguments *start*\n and *end* are interpreted as in slice notation. Return "-1" if\n *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know\n the position of *sub*. To check if *sub* is a substring or not,\n use the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See Format String Syntax for a description of the various\n formatting options that can be specified in format strings.\n\n This method of string formatting is the new standard in Python 3,\n and should be preferred to the "%" formatting described in String\n Formatting Operations in new code.\n\n New in version 2.6.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. The separator between elements is the\n string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\n New in version 2.5.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\n Changed in version 2.4: Support for the *fillchar* argument.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\n New in version 2.5.\n\nstr.rsplit([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\n New in version 2.4.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.split([sep[, maxsplit]])\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The *sep* argument\n may consist of multiple characters (for example,\n "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n empty string with a specified separator returns "[\'\']".\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example, "\' 1 2 3 \'.split()" returns "[\'1\', \'2\', \'3\']", and\n "\' 1 2 3 \'.split(None, 1)" returns "[\'1\', \'2 3 \']".\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n Python recognizes ""\\r"", ""\\n"", and ""\\r\\n"" as line boundaries\n for 8-bit strings.\n\n For example:\n\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()\n [\'ab c\', \'\', \'de fg\', \'kl\']\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines(True)\n [\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line:\n\n >>> "".splitlines()\n []\n >>> "One line\\n".splitlines()\n [\'One line\']\n\n For comparison, "split(\'\\n\')" gives:\n\n >>> \'\'.split(\'\\n\')\n [\'\']\n >>> \'Two lines\\n\'.split(\'\\n\')\n [\'Two lines\', \'\']\n\nunicode.splitlines([keepends])\n\n Return a list of the lines in the string, like "str.splitlines()".\n However, the Unicode method splits on the following line\n boundaries, which are a superset of the *universal newlines*\n recognized for 8-bit strings.\n\n +-------------------------+-------------------------------+\n | Representation | Description |\n +=========================+===============================+\n | "\\n" | Line Feed |\n +-------------------------+-------------------------------+\n | "\\r" | Carriage Return |\n +-------------------------+-------------------------------+\n | "\\r\\n" | Carriage Return + Line Feed |\n +-------------------------+-------------------------------+\n | "\\v" or "\\x0b" | Line Tabulation |\n +-------------------------+-------------------------------+\n | "\\f" or "\\x0c" | Form Feed |\n +-------------------------+-------------------------------+\n | "\\x1c" | File Separator |\n +-------------------------+-------------------------------+\n | "\\x1d" | Group Separator |\n +-------------------------+-------------------------------+\n | "\\x1e" | Record Separator |\n +-------------------------+-------------------------------+\n | "\\x85" | Next Line (C1 Control Code) |\n +-------------------------+-------------------------------+\n | "\\u2028" | Line Separator |\n +-------------------------+-------------------------------+\n | "\\u2029" | Paragraph Separator |\n +-------------------------+-------------------------------+\n\n Changed in version 2.7: "\\v" and "\\f" added to list of line\n boundaries.\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\n Changed in version 2.5: Accept tuples as *prefix*.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\n Changed in version 2.2.2: Support for the *chars* argument.\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa.\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.translate(table[, deletechars])\n\n Return a copy of the string where all characters occurring in the\n optional argument *deletechars* are removed, and the remaining\n characters have been mapped through the given translation table,\n which must be a string of length 256.\n\n You can use the "maketrans()" helper function in the "string"\n module to create a translation table. For string objects, set the\n *table* argument to "None" for translations that only delete\n characters:\n\n >>> \'read this short text\'.translate(None, \'aeiou\')\n \'rd ths shrt txt\'\n\n New in version 2.6: Support for a "None" *table* argument.\n\n For Unicode objects, the "translate()" method does not accept the\n optional *deletechars* argument. Instead, it returns a copy of the\n *s* where all characters have been mapped through the given\n translation table which must be a mapping of Unicode ordinals to\n Unicode ordinals, Unicode strings or "None". Unmapped characters\n are left untouched. Characters mapped to "None" are deleted. Note,\n a more flexible approach is to create a custom character mapping\n codec using the "codecs" module (see "encodings.cp1251" for an\n example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n For 8-bit strings, this method is locale-dependent.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to "len(s)".\n\n New in version 2.2.2.\n\nThe following methods are present only on unicode objects:\n\nunicode.isnumeric()\n\n Return "True" if there are only numeric characters in S, "False"\n otherwise. Numeric characters include digit characters, and all\n characters that have the Unicode numeric value property, e.g.\n U+2155, VULGAR FRACTION ONE FIFTH.\n\nunicode.isdecimal()\n\n Return "True" if there are only decimal characters in S, "False"\n otherwise. Decimal characters include digit characters, and all\n characters that can be used to form decimal-radix numbers, e.g.\n U+0660, ARABIC-INDIC DIGIT ZERO.\n\n\nString Formatting Operations\n============================\n\nString and Unicode objects have one unique built-in operation: the "%"\noperator (modulo). This is also known as the string *formatting* or\n*interpolation* operator. Given "format % values" (where *format* is\na string or Unicode object), "%" conversion specifications in *format*\nare replaced with zero or more elements of *values*. The effect is\nsimilar to the using "sprintf()" in the C language. If *format* is a\nUnicode object, or if any of the objects being converted using the\n"%s" conversion are Unicode objects, the result will also be a Unicode\nobject.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [5] Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The "\'%\'" character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence\n of characters (for example, "(somename)").\n\n3. Conversion flags (optional), which affect the result of some\n conversion types.\n\n4. Minimum field width (optional). If specified as an "\'*\'"\n (asterisk), the actual width is read from the next element of the\n tuple in *values*, and the object to convert comes after the\n minimum field width and optional precision.\n\n5. Precision (optional), given as a "\'.\'" (dot) followed by the\n precision. If specified as "\'*\'" (an asterisk), the actual width\n is read from the next element of the tuple in *values*, and the\n value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the "\'%\'" character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print \'%(language)s has %(number)03d quote types.\' % \\\n... {"language": "Python", "number": 2}\nPython has 002 quote types.\n\nIn this case no "*" specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag | Meaning |\n+===========+=======================================================================+\n| "\'#\'" | The value conversion will use the "alternate form" (where defined |\n| | below). |\n+-----------+-----------------------------------------------------------------------+\n| "\'0\'" | The conversion will be zero padded for numeric values. |\n+-----------+-----------------------------------------------------------------------+\n| "\'-\'" | The converted value is left adjusted (overrides the "\'0\'" conversion |\n| | if both are given). |\n+-----------+-----------------------------------------------------------------------+\n| "\' \'" | (a space) A blank should be left before a positive number (or empty |\n| | string) produced by a signed conversion. |\n+-----------+-----------------------------------------------------------------------+\n| "\'+\'" | A sign character ("\'+\'" or "\'-\'") will precede the conversion |\n| | (overrides a "space" flag). |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier ("h", "l", or "L") may be present, but is ignored as\nit is not necessary for Python -- so e.g. "%ld" is identical to "%d".\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion | Meaning | Notes |\n+==============+=======================================================+=========+\n| "\'d\'" | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'i\'" | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'o\'" | Signed octal value. | (1) |\n+--------------+-------------------------------------------------------+---------+\n| "\'u\'" | Obsolete type -- it is identical to "\'d\'". | (7) |\n+--------------+-------------------------------------------------------+---------+\n| "\'x\'" | Signed hexadecimal (lowercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| "\'X\'" | Signed hexadecimal (uppercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| "\'e\'" | Floating point exponential format (lowercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'E\'" | Floating point exponential format (uppercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'f\'" | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'F\'" | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| "\'g\'" | Floating point format. Uses lowercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'G\'" | Floating point format. Uses uppercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| "\'c\'" | Single character (accepts integer or single character | |\n| | string). | |\n+--------------+-------------------------------------------------------+---------+\n| "\'r\'" | String (converts any Python object using repr()). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| "\'s\'" | String (converts any Python object using "str()"). | (6) |\n+--------------+-------------------------------------------------------+---------+\n| "\'%\'" | No argument is converted, results in a "\'%\'" | |\n| | character in the result. | |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero ("\'0\'") to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n2. The alternate form causes a leading "\'0x\'" or "\'0X\'" (depending\n on whether the "\'x\'" or "\'X\'" format was used) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n point, even if no digits follow it.\n\n The precision determines the number of digits after the decimal\n point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n point, and trailing zeroes are not removed as they would otherwise\n be.\n\n The precision determines the number of significant digits before\n and after the decimal point and defaults to 6.\n\n5. The "%r" conversion was added in Python 2.0.\n\n The precision determines the maximal number of characters used.\n\n6. If the object or format provided is a "unicode" string, the\n resulting string will also be "unicode".\n\n The precision determines the maximal number of characters used.\n\n7. See **PEP 237**.\n\nSince Python strings have an explicit length, "%s" conversions do not\nassume that "\'\\0\'" is the end of the string.\n\nChanged in version 2.7: "%f" conversions for numbers whose absolute\nvalue is over 1e50 are no longer replaced by "%g" conversions.\n\nAdditional string operations are defined in standard modules "string"\nand "re".\n\n\nXRange Type\n===========\n\nThe "xrange" type is an immutable sequence which is commonly used for\nlooping. The advantage of the "xrange" type is that an "xrange"\nobject will always take the same amount of memory, no matter the size\nof the range it represents. There are no consistent performance\nadvantages.\n\nXRange objects have very little behavior: they only support indexing,\niteration, and the "len()" function.\n\n\nMutable Sequence Types\n======================\n\nList and "bytearray" objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | same as "s[len(s):len(s)] = [x]" | (2) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(t)" or "s += t" | for the most part the same as | (3) |\n| | "s[len(s):len(s)] = t" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s *= n" | updates *s* with its contents | (11) |\n| | repeated *n* times | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.count(x)" | return number of *i*\'s for which | |\n| | "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.index(x[, i[, j]])" | return smallest *k* such that | (4) |\n| | "s[k] == x" and "i <= k < j" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | same as "s[i:i] = [x]" | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | same as "x = s[i]; del s[i]; | (6) |\n| | return x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | same as "del s[s.index(x)]" | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])" | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted\n multiple parameters and implicitly joined them into a tuple; this\n no longer works in Python 2.0. Use of this misfeature has been\n deprecated since Python 1.4.\n\n3. *t* can be any iterable object.\n\n4. Raises "ValueError" when *x* is not found in *s*. When a\n negative index is passed as the second or third parameter to the\n "index()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, "index()" didn\'t have arguments\n for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n "insert()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The "pop()" method\'s optional argument *i* defaults to "-1", so\n that by default the last item is removed and returned.\n\n7. The "sort()" and "reverse()" methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The "sort()" method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: "cmp=lambda x,y:\n cmp(x.lower(), y.lower())". The default value is "None".\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: "key=str.lower". The\n default value is "None".\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n "functools.cmp_to_key()" to convert an old-style *cmp* function to\n a *key* function.\n\n Changed in version 2.3: Support for "None" as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the "sort()" method is guaranteed to\n be stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being\n sorted, the effect of attempting to mutate, or even inspect, the\n list is undefined. The C implementation of Python 2.3 and newer\n makes the list appear empty for the duration, and raises\n "ValueError" if it can detect that the list has been mutated\n during a sort.\n\n11. The value *n* is an integer, or an object implementing\n "__index__()". Zero and negative values of *n* clear the\n sequence. Items in the sequence are not copied; they are\n referenced multiple times, as explained for "s * n" under Sequence\n Types --- str, unicode, list, tuple, bytearray, buffer, xrange.\n',
- 'typesseq-mutable': u'\nMutable Sequence Types\n**********************\n\nList and "bytearray" objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object):\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | same as "s[len(s):len(s)] = [x]" | (2) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(t)" or "s += t" | for the most part the same as | (3) |\n| | "s[len(s):len(s)] = t" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s *= n" | updates *s* with its contents | (11) |\n| | repeated *n* times | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.count(x)" | return number of *i*\'s for which | |\n| | "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.index(x[, i[, j]])" | return smallest *k* such that | (4) |\n| | "s[k] == x" and "i <= k < j" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | same as "s[i:i] = [x]" | (5) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | same as "x = s[i]; del s[i]; | (6) |\n| | return x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | same as "del s[s.index(x)]" | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (7) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.sort([cmp[, key[, | sort the items of *s* in place | (7)(8)(9)(10) |\n| reverse]]])" | | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The C implementation of Python has historically accepted\n multiple parameters and implicitly joined them into a tuple; this\n no longer works in Python 2.0. Use of this misfeature has been\n deprecated since Python 1.4.\n\n3. *t* can be any iterable object.\n\n4. Raises "ValueError" when *x* is not found in *s*. When a\n negative index is passed as the second or third parameter to the\n "index()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, "index()" didn\'t have arguments\n for specifying start and stop positions.\n\n5. When a negative index is passed as the first parameter to the\n "insert()" method, the list length is added, as for slice indices.\n If it is still negative, it is truncated to zero, as for slice\n indices.\n\n Changed in version 2.3: Previously, all negative indices were\n truncated to zero.\n\n6. The "pop()" method\'s optional argument *i* defaults to "-1", so\n that by default the last item is removed and returned.\n\n7. The "sort()" and "reverse()" methods modify the list in place\n for economy of space when sorting or reversing a large list. To\n remind you that they operate by side effect, they don\'t return the\n sorted or reversed list.\n\n8. The "sort()" method takes optional arguments for controlling the\n comparisons.\n\n *cmp* specifies a custom comparison function of two arguments (list\n items) which should return a negative, zero or positive number\n depending on whether the first argument is considered smaller than,\n equal to, or larger than the second argument: "cmp=lambda x,y:\n cmp(x.lower(), y.lower())". The default value is "None".\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: "key=str.lower". The\n default value is "None".\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n In general, the *key* and *reverse* conversion processes are much\n faster than specifying an equivalent *cmp* function. This is\n because *cmp* is called multiple times for each list element while\n *key* and *reverse* touch each element only once. Use\n "functools.cmp_to_key()" to convert an old-style *cmp* function to\n a *key* function.\n\n Changed in version 2.3: Support for "None" as an equivalent to\n omitting *cmp* was added.\n\n Changed in version 2.4: Support for *key* and *reverse* was added.\n\n9. Starting with Python 2.3, the "sort()" method is guaranteed to\n be stable. A sort is stable if it guarantees not to change the\n relative order of elements that compare equal --- this is helpful\n for sorting in multiple passes (for example, sort by department,\n then by salary grade).\n\n10. **CPython implementation detail:** While a list is being\n sorted, the effect of attempting to mutate, or even inspect, the\n list is undefined. The C implementation of Python 2.3 and newer\n makes the list appear empty for the duration, and raises\n "ValueError" if it can detect that the list has been mutated\n during a sort.\n\n11. The value *n* is an integer, or an object implementing\n "__index__()". Zero and negative values of *n* clear the\n sequence. Items in the sequence are not copied; they are\n referenced multiple times, as explained for "s * n" under Sequence\n Types --- str, unicode, list, tuple, bytearray, buffer, xrange.\n',
- 'unary': u'\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary "-" (minus) operator yields the negation of its numeric\nargument.\n\nThe unary "+" (plus) operator yields its numeric argument unchanged.\n\nThe unary "~" (invert) operator yields the bitwise inversion of its\nplain or long integer argument. The bitwise inversion of "x" is\ndefined as "-(x+1)". It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n"TypeError" exception is raised.\n',
- 'while': u'\nThe "while" statement\n*********************\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n',
- 'with': u'\nThe "with" statement\n********************\n\nNew in version 2.5.\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section With Statement\nContext Managers). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nNote: In Python 2.5, the "with" statement is only allowed when the\n "with_statement" feature has been enabled. It is always enabled in\n Python 2.6.\n\nChanged in version 2.7: Support for multiple context expressions.\n\nSee also:\n\n **PEP 343** - The "with" statement\n The specification, background, and examples for the Python "with"\n statement.\n',
- 'yield': u'\nThe "yield" statement\n*********************\n\n yield_stmt ::= yield_expression\n\nThe "yield" statement is only used when defining a generator function,\nand is only used in the body of the generator function. Using a\n"yield" statement in a function definition is sufficient to cause that\ndefinition to create a generator function instead of a normal\nfunction.\n\nWhen a generator function is called, it returns an iterator known as a\ngenerator iterator, or more commonly, a generator. The body of the\ngenerator function is executed by calling the generator\'s "next()"\nmethod repeatedly until it raises an exception.\n\nWhen a "yield" statement is executed, the state of the generator is\nfrozen and the value of "expression_list" is returned to "next()"\'s\ncaller. By "frozen" we mean that all local state is retained,\nincluding the current bindings of local variables, the instruction\npointer, and the internal evaluation stack: enough information is\nsaved so that the next time "next()" is invoked, the function can\nproceed exactly as if the "yield" statement were just another external\ncall.\n\nAs of Python version 2.5, the "yield" statement is now allowed in the\n"try" clause of a "try" ... "finally" construct. If the generator is\nnot resumed before it is finalized (by reaching a zero reference count\nor by being garbage collected), the generator-iterator\'s "close()"\nmethod will be called, allowing any pending "finally" clauses to\nexecute.\n\nFor full details of "yield" semantics, refer to the Yield expressions\nsection.\n\nNote: In Python 2.2, the "yield" statement was only allowed when the\n "generators" feature has been enabled. This "__future__" import\n statement was used to enable the feature:\n\n from __future__ import generators\n\nSee also:\n\n **PEP 255** - Simple Generators\n The proposal for adding generators and the "yield" statement to\n Python.\n\n **PEP 342** - Coroutines via Enhanced Generators\n The proposal that, among other generator enhancements, proposed\n allowing "yield" to appear inside a "try" ... "finally" block.\n'}
+# Autogenerated by Sphinx on Sat Aug 26 11:16:28 2017
+topics = {'assert': '\n'
+ 'The "assert" statement\n'
+ '**********************\n'
+ '\n'
+ 'Assert statements are a convenient way to insert debugging '
+ 'assertions\n'
+ 'into a program:\n'
+ '\n'
+ ' assert_stmt ::= "assert" expression ["," expression]\n'
+ '\n'
+ 'The simple form, "assert expression", is equivalent to\n'
+ '\n'
+ ' if __debug__:\n'
+ ' if not expression: raise AssertionError\n'
+ '\n'
+ 'The extended form, "assert expression1, expression2", is '
+ 'equivalent to\n'
+ '\n'
+ ' if __debug__:\n'
+ ' if not expression1: raise AssertionError(expression2)\n'
+ '\n'
+ 'These equivalences assume that "__debug__" and "AssertionError" '
+ 'refer\n'
+ 'to the built-in variables with those names. In the current\n'
+ 'implementation, the built-in variable "__debug__" is "True" under\n'
+ 'normal circumstances, "False" when optimization is requested '
+ '(command\n'
+ 'line option -O). The current code generator emits no code for an\n'
+ 'assert statement when optimization is requested at compile time. '
+ 'Note\n'
+ 'that it is unnecessary to include the source code for the '
+ 'expression\n'
+ 'that failed in the error message; it will be displayed as part of '
+ 'the\n'
+ 'stack trace.\n'
+ '\n'
+ 'Assignments to "__debug__" are illegal. The value for the '
+ 'built-in\n'
+ 'variable is determined when the interpreter starts.\n',
+ 'assignment': '\n'
+ 'Assignment statements\n'
+ '*********************\n'
+ '\n'
+ 'Assignment statements are used to (re)bind names to values and '
+ 'to\n'
+ 'modify attributes or items of mutable objects:\n'
+ '\n'
+ ' assignment_stmt ::= (target_list "=")+ (expression_list | '
+ 'yield_expression)\n'
+ ' target_list ::= target ("," target)* [","]\n'
+ ' target ::= identifier\n'
+ ' | "(" target_list ")"\n'
+ ' | "[" [target_list] "]"\n'
+ ' | attributeref\n'
+ ' | subscription\n'
+ ' | slicing\n'
+ '\n'
+ '(See section Primaries for the syntax definitions for the last '
+ 'three\n'
+ 'symbols.)\n'
+ '\n'
+ 'An assignment statement evaluates the expression list '
+ '(remember that\n'
+ 'this can be a single expression or a comma-separated list, the '
+ 'latter\n'
+ 'yielding a tuple) and assigns the single resulting object to '
+ 'each of\n'
+ 'the target lists, from left to right.\n'
+ '\n'
+ 'Assignment is defined recursively depending on the form of the '
+ 'target\n'
+ '(list). When a target is part of a mutable object (an '
+ 'attribute\n'
+ 'reference, subscription or slicing), the mutable object must\n'
+ 'ultimately perform the assignment and decide about its '
+ 'validity, and\n'
+ 'may raise an exception if the assignment is unacceptable. The '
+ 'rules\n'
+ 'observed by various types and the exceptions raised are given '
+ 'with the\n'
+ 'definition of the object types (see section The standard type\n'
+ 'hierarchy).\n'
+ '\n'
+ 'Assignment of an object to a target list is recursively '
+ 'defined as\n'
+ 'follows.\n'
+ '\n'
+ '* If the target list is a single target: The object is '
+ 'assigned to\n'
+ ' that target.\n'
+ '\n'
+ '* If the target list is a comma-separated list of targets: '
+ 'The\n'
+ ' object must be an iterable with the same number of items as '
+ 'there\n'
+ ' are targets in the target list, and the items are assigned, '
+ 'from\n'
+ ' left to right, to the corresponding targets.\n'
+ '\n'
+ 'Assignment of an object to a single target is recursively '
+ 'defined as\n'
+ 'follows.\n'
+ '\n'
+ '* If the target is an identifier (name):\n'
+ '\n'
+ ' * If the name does not occur in a "global" statement in the\n'
+ ' current code block: the name is bound to the object in the '
+ 'current\n'
+ ' local namespace.\n'
+ '\n'
+ ' * Otherwise: the name is bound to the object in the current '
+ 'global\n'
+ ' namespace.\n'
+ '\n'
+ ' The name is rebound if it was already bound. This may cause '
+ 'the\n'
+ ' reference count for the object previously bound to the name '
+ 'to reach\n'
+ ' zero, causing the object to be deallocated and its '
+ 'destructor (if it\n'
+ ' has one) to be called.\n'
+ '\n'
+ '* If the target is a target list enclosed in parentheses or '
+ 'in\n'
+ ' square brackets: The object must be an iterable with the '
+ 'same number\n'
+ ' of items as there are targets in the target list, and its '
+ 'items are\n'
+ ' assigned, from left to right, to the corresponding targets.\n'
+ '\n'
+ '* If the target is an attribute reference: The primary '
+ 'expression in\n'
+ ' the reference is evaluated. It should yield an object with\n'
+ ' assignable attributes; if this is not the case, "TypeError" '
+ 'is\n'
+ ' raised. That object is then asked to assign the assigned '
+ 'object to\n'
+ ' the given attribute; if it cannot perform the assignment, it '
+ 'raises\n'
+ ' an exception (usually but not necessarily '
+ '"AttributeError").\n'
+ '\n'
+ ' Note: If the object is a class instance and the attribute '
+ 'reference\n'
+ ' occurs on both sides of the assignment operator, the RHS '
+ 'expression,\n'
+ ' "a.x" can access either an instance attribute or (if no '
+ 'instance\n'
+ ' attribute exists) a class attribute. The LHS target "a.x" '
+ 'is always\n'
+ ' set as an instance attribute, creating it if necessary. '
+ 'Thus, the\n'
+ ' two occurrences of "a.x" do not necessarily refer to the '
+ 'same\n'
+ ' attribute: if the RHS expression refers to a class '
+ 'attribute, the\n'
+ ' LHS creates a new instance attribute as the target of the\n'
+ ' assignment:\n'
+ '\n'
+ ' class Cls:\n'
+ ' x = 3 # class variable\n'
+ ' inst = Cls()\n'
+ ' inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x '
+ 'as 3\n'
+ '\n'
+ ' This description does not necessarily apply to descriptor\n'
+ ' attributes, such as properties created with "property()".\n'
+ '\n'
+ '* If the target is a subscription: The primary expression in '
+ 'the\n'
+ ' reference is evaluated. It should yield either a mutable '
+ 'sequence\n'
+ ' object (such as a list) or a mapping object (such as a '
+ 'dictionary).\n'
+ ' Next, the subscript expression is evaluated.\n'
+ '\n'
+ ' If the primary is a mutable sequence object (such as a '
+ 'list), the\n'
+ ' subscript must yield a plain integer. If it is negative, '
+ 'the\n'
+ " sequence's length is added to it. The resulting value must "
+ 'be a\n'
+ " nonnegative integer less than the sequence's length, and "
+ 'the\n'
+ ' sequence is asked to assign the assigned object to its item '
+ 'with\n'
+ ' that index. If the index is out of range, "IndexError" is '
+ 'raised\n'
+ ' (assignment to a subscripted sequence cannot add new items '
+ 'to a\n'
+ ' list).\n'
+ '\n'
+ ' If the primary is a mapping object (such as a dictionary), '
+ 'the\n'
+ " subscript must have a type compatible with the mapping's key "
+ 'type,\n'
+ ' and the mapping is then asked to create a key/datum pair '
+ 'which maps\n'
+ ' the subscript to the assigned object. This can either '
+ 'replace an\n'
+ ' existing key/value pair with the same key value, or insert a '
+ 'new\n'
+ ' key/value pair (if no key with the same value existed).\n'
+ '\n'
+ '* If the target is a slicing: The primary expression in the\n'
+ ' reference is evaluated. It should yield a mutable sequence '
+ 'object\n'
+ ' (such as a list). The assigned object should be a sequence '
+ 'object\n'
+ ' of the same type. Next, the lower and upper bound '
+ 'expressions are\n'
+ ' evaluated, insofar they are present; defaults are zero and '
+ 'the\n'
+ " sequence's length. The bounds should evaluate to (small) "
+ 'integers.\n'
+ " If either bound is negative, the sequence's length is added "
+ 'to it.\n'
+ ' The resulting bounds are clipped to lie between zero and '
+ 'the\n'
+ " sequence's length, inclusive. Finally, the sequence object "
+ 'is asked\n'
+ ' to replace the slice with the items of the assigned '
+ 'sequence. The\n'
+ ' length of the slice may be different from the length of the '
+ 'assigned\n'
+ ' sequence, thus changing the length of the target sequence, '
+ 'if the\n'
+ ' object allows it.\n'
+ '\n'
+ '**CPython implementation detail:** In the current '
+ 'implementation, the\n'
+ 'syntax for targets is taken to be the same as for expressions, '
+ 'and\n'
+ 'invalid syntax is rejected during the code generation phase, '
+ 'causing\n'
+ 'less detailed error messages.\n'
+ '\n'
+ 'WARNING: Although the definition of assignment implies that '
+ 'overlaps\n'
+ "between the left-hand side and the right-hand side are 'safe' "
+ '(for\n'
+ 'example "a, b = b, a" swaps two variables), overlaps *within* '
+ 'the\n'
+ 'collection of assigned-to variables are not safe! For '
+ 'instance, the\n'
+ 'following program prints "[0, 2]":\n'
+ '\n'
+ ' x = [0, 1]\n'
+ ' i = 0\n'
+ ' i, x[i] = 1, 2\n'
+ ' print x\n'
+ '\n'
+ '\n'
+ 'Augmented assignment statements\n'
+ '===============================\n'
+ '\n'
+ 'Augmented assignment is the combination, in a single '
+ 'statement, of a\n'
+ 'binary operation and an assignment statement:\n'
+ '\n'
+ ' augmented_assignment_stmt ::= augtarget augop '
+ '(expression_list | yield_expression)\n'
+ ' augtarget ::= identifier | attributeref | '
+ 'subscription | slicing\n'
+ ' augop ::= "+=" | "-=" | "*=" | "/=" | '
+ '"//=" | "%=" | "**="\n'
+ ' | ">>=" | "<<=" | "&=" | "^=" | "|="\n'
+ '\n'
+ '(See section Primaries for the syntax definitions for the last '
+ 'three\n'
+ 'symbols.)\n'
+ '\n'
+ 'An augmented assignment evaluates the target (which, unlike '
+ 'normal\n'
+ 'assignment statements, cannot be an unpacking) and the '
+ 'expression\n'
+ 'list, performs the binary operation specific to the type of '
+ 'assignment\n'
+ 'on the two operands, and assigns the result to the original '
+ 'target.\n'
+ 'The target is only evaluated once.\n'
+ '\n'
+ 'An augmented assignment expression like "x += 1" can be '
+ 'rewritten as\n'
+ '"x = x + 1" to achieve a similar, but not exactly equal '
+ 'effect. In the\n'
+ 'augmented version, "x" is only evaluated once. Also, when '
+ 'possible,\n'
+ 'the actual operation is performed *in-place*, meaning that '
+ 'rather than\n'
+ 'creating a new object and assigning that to the target, the '
+ 'old object\n'
+ 'is modified instead.\n'
+ '\n'
+ 'With the exception of assigning to tuples and multiple targets '
+ 'in a\n'
+ 'single statement, the assignment done by augmented assignment\n'
+ 'statements is handled the same way as normal assignments. '
+ 'Similarly,\n'
+ 'with the exception of the possible *in-place* behavior, the '
+ 'binary\n'
+ 'operation performed by augmented assignment is the same as the '
+ 'normal\n'
+ 'binary operations.\n'
+ '\n'
+ 'For targets which are attribute references, the same caveat '
+ 'about\n'
+ 'class and instance attributes applies as for regular '
+ 'assignments.\n',
+ 'atom-identifiers': '\n'
+ 'Identifiers (Names)\n'
+ '*******************\n'
+ '\n'
+ 'An identifier occurring as an atom is a name. See '
+ 'section Identifiers\n'
+ 'and keywords for lexical definition and section Naming '
+ 'and binding for\n'
+ 'documentation of naming and binding.\n'
+ '\n'
+ 'When the name is bound to an object, evaluation of the '
+ 'atom yields\n'
+ 'that object. When a name is not bound, an attempt to '
+ 'evaluate it\n'
+ 'raises a "NameError" exception.\n'
+ '\n'
+ '**Private name mangling:** When an identifier that '
+ 'textually occurs in\n'
+ 'a class definition begins with two or more underscore '
+ 'characters and\n'
+ 'does not end in two or more underscores, it is '
+ 'considered a *private\n'
+ 'name* of that class. Private names are transformed to a '
+ 'longer form\n'
+ 'before code is generated for them. The transformation '
+ 'inserts the\n'
+ 'class name, with leading underscores removed and a '
+ 'single underscore\n'
+ 'inserted, in front of the name. For example, the '
+ 'identifier "__spam"\n'
+ 'occurring in a class named "Ham" will be transformed to '
+ '"_Ham__spam".\n'
+ 'This transformation is independent of the syntactical '
+ 'context in which\n'
+ 'the identifier is used. If the transformed name is '
+ 'extremely long\n'
+ '(longer than 255 characters), implementation defined '
+ 'truncation may\n'
+ 'happen. If the class name consists only of underscores, '
+ 'no\n'
+ 'transformation is done.\n',
+ 'atom-literals': '\n'
+ 'Literals\n'
+ '********\n'
+ '\n'
+ 'Python supports string literals and various numeric '
+ 'literals:\n'
+ '\n'
+ ' literal ::= stringliteral | integer | longinteger\n'
+ ' | floatnumber | imagnumber\n'
+ '\n'
+ 'Evaluation of a literal yields an object of the given type '
+ '(string,\n'
+ 'integer, long integer, floating point number, complex '
+ 'number) with the\n'
+ 'given value. The value may be approximated in the case of '
+ 'floating\n'
+ 'point and imaginary (complex) literals. See section '
+ 'Literals for\n'
+ 'details.\n'
+ '\n'
+ 'All literals correspond to immutable data types, and hence '
+ 'the\n'
+ "object's identity is less important than its value. "
+ 'Multiple\n'
+ 'evaluations of literals with the same value (either the '
+ 'same\n'
+ 'occurrence in the program text or a different occurrence) '
+ 'may obtain\n'
+ 'the same object or a different object with the same '
+ 'value.\n',
+ 'attribute-access': '\n'
+ 'Customizing attribute access\n'
+ '****************************\n'
+ '\n'
+ 'The following methods can be defined to customize the '
+ 'meaning of\n'
+ 'attribute access (use of, assignment to, or deletion of '
+ '"x.name") for\n'
+ 'class instances.\n'
+ '\n'
+ 'object.__getattr__(self, name)\n'
+ '\n'
+ ' Called when an attribute lookup has not found the '
+ 'attribute in the\n'
+ ' usual places (i.e. it is not an instance attribute '
+ 'nor is it found\n'
+ ' in the class tree for "self"). "name" is the '
+ 'attribute name. This\n'
+ ' method should return the (computed) attribute value '
+ 'or raise an\n'
+ ' "AttributeError" exception.\n'
+ '\n'
+ ' Note that if the attribute is found through the '
+ 'normal mechanism,\n'
+ ' "__getattr__()" is not called. (This is an '
+ 'intentional asymmetry\n'
+ ' between "__getattr__()" and "__setattr__()".) This is '
+ 'done both for\n'
+ ' efficiency reasons and because otherwise '
+ '"__getattr__()" would have\n'
+ ' no way to access other attributes of the instance. '
+ 'Note that at\n'
+ ' least for instance variables, you can fake total '
+ 'control by not\n'
+ ' inserting any values in the instance attribute '
+ 'dictionary (but\n'
+ ' instead inserting them in another object). See the\n'
+ ' "__getattribute__()" method below for a way to '
+ 'actually get total\n'
+ ' control in new-style classes.\n'
+ '\n'
+ 'object.__setattr__(self, name, value)\n'
+ '\n'
+ ' Called when an attribute assignment is attempted. '
+ 'This is called\n'
+ ' instead of the normal mechanism (i.e. store the value '
+ 'in the\n'
+ ' instance dictionary). *name* is the attribute name, '
+ '*value* is the\n'
+ ' value to be assigned to it.\n'
+ '\n'
+ ' If "__setattr__()" wants to assign to an instance '
+ 'attribute, it\n'
+ ' should not simply execute "self.name = value" --- '
+ 'this would cause\n'
+ ' a recursive call to itself. Instead, it should '
+ 'insert the value in\n'
+ ' the dictionary of instance attributes, e.g., '
+ '"self.__dict__[name] =\n'
+ ' value". For new-style classes, rather than accessing '
+ 'the instance\n'
+ ' dictionary, it should call the base class method with '
+ 'the same\n'
+ ' name, for example, "object.__setattr__(self, name, '
+ 'value)".\n'
+ '\n'
+ 'object.__delattr__(self, name)\n'
+ '\n'
+ ' Like "__setattr__()" but for attribute deletion '
+ 'instead of\n'
+ ' assignment. This should only be implemented if "del '
+ 'obj.name" is\n'
+ ' meaningful for the object.\n'
+ '\n'
+ '\n'
+ 'More attribute access for new-style classes\n'
+ '===========================================\n'
+ '\n'
+ 'The following methods only apply to new-style classes.\n'
+ '\n'
+ 'object.__getattribute__(self, name)\n'
+ '\n'
+ ' Called unconditionally to implement attribute '
+ 'accesses for\n'
+ ' instances of the class. If the class also defines '
+ '"__getattr__()",\n'
+ ' the latter will not be called unless '
+ '"__getattribute__()" either\n'
+ ' calls it explicitly or raises an "AttributeError". '
+ 'This method\n'
+ ' should return the (computed) attribute value or raise '
+ 'an\n'
+ ' "AttributeError" exception. In order to avoid '
+ 'infinite recursion in\n'
+ ' this method, its implementation should always call '
+ 'the base class\n'
+ ' method with the same name to access any attributes it '
+ 'needs, for\n'
+ ' example, "object.__getattribute__(self, name)".\n'
+ '\n'
+ ' Note: This method may still be bypassed when looking '
+ 'up special\n'
+ ' methods as the result of implicit invocation via '
+ 'language syntax\n'
+ ' or built-in functions. See Special method lookup '
+ 'for new-style\n'
+ ' classes.\n'
+ '\n'
+ '\n'
+ 'Implementing Descriptors\n'
+ '========================\n'
+ '\n'
+ 'The following methods only apply when an instance of the '
+ 'class\n'
+ 'containing the method (a so-called *descriptor* class) '
+ 'appears in an\n'
+ '*owner* class (the descriptor must be in either the '
+ "owner's class\n"
+ 'dictionary or in the class dictionary for one of its '
+ 'parents). In the\n'
+ 'examples below, "the attribute" refers to the attribute '
+ 'whose name is\n'
+ "the key of the property in the owner class' "
+ '"__dict__".\n'
+ '\n'
+ 'object.__get__(self, instance, owner)\n'
+ '\n'
+ ' Called to get the attribute of the owner class (class '
+ 'attribute\n'
+ ' access) or of an instance of that class (instance '
+ 'attribute\n'
+ ' access). *owner* is always the owner class, while '
+ '*instance* is the\n'
+ ' instance that the attribute was accessed through, or '
+ '"None" when\n'
+ ' the attribute is accessed through the *owner*. This '
+ 'method should\n'
+ ' return the (computed) attribute value or raise an '
+ '"AttributeError"\n'
+ ' exception.\n'
+ '\n'
+ 'object.__set__(self, instance, value)\n'
+ '\n'
+ ' Called to set the attribute on an instance *instance* '
+ 'of the owner\n'
+ ' class to a new value, *value*.\n'
+ '\n'
+ 'object.__delete__(self, instance)\n'
+ '\n'
+ ' Called to delete the attribute on an instance '
+ '*instance* of the\n'
+ ' owner class.\n'
+ '\n'
+ '\n'
+ 'Invoking Descriptors\n'
+ '====================\n'
+ '\n'
+ 'In general, a descriptor is an object attribute with '
+ '"binding\n'
+ 'behavior", one whose attribute access has been '
+ 'overridden by methods\n'
+ 'in the descriptor protocol: "__get__()", "__set__()", '
+ 'and\n'
+ '"__delete__()". If any of those methods are defined for '
+ 'an object, it\n'
+ 'is said to be a descriptor.\n'
+ '\n'
+ 'The default behavior for attribute access is to get, '
+ 'set, or delete\n'
+ "the attribute from an object's dictionary. For instance, "
+ '"a.x" has a\n'
+ 'lookup chain starting with "a.__dict__[\'x\']", then\n'
+ '"type(a).__dict__[\'x\']", and continuing through the '
+ 'base classes of\n'
+ '"type(a)" excluding metaclasses.\n'
+ '\n'
+ 'However, if the looked-up value is an object defining '
+ 'one of the\n'
+ 'descriptor methods, then Python may override the default '
+ 'behavior and\n'
+ 'invoke the descriptor method instead. Where this occurs '
+ 'in the\n'
+ 'precedence chain depends on which descriptor methods '
+ 'were defined and\n'
+ 'how they were called. Note that descriptors are only '
+ 'invoked for new\n'
+ 'style objects or classes (ones that subclass "object()" '
+ 'or "type()").\n'
+ '\n'
+ 'The starting point for descriptor invocation is a '
+ 'binding, "a.x". How\n'
+ 'the arguments are assembled depends on "a":\n'
+ '\n'
+ 'Direct Call\n'
+ ' The simplest and least common call is when user code '
+ 'directly\n'
+ ' invokes a descriptor method: "x.__get__(a)".\n'
+ '\n'
+ 'Instance Binding\n'
+ ' If binding to a new-style object instance, "a.x" is '
+ 'transformed\n'
+ ' into the call: "type(a).__dict__[\'x\'].__get__(a, '
+ 'type(a))".\n'
+ '\n'
+ 'Class Binding\n'
+ ' If binding to a new-style class, "A.x" is transformed '
+ 'into the\n'
+ ' call: "A.__dict__[\'x\'].__get__(None, A)".\n'
+ '\n'
+ 'Super Binding\n'
+ ' If "a" is an instance of "super", then the binding '
+ '"super(B,\n'
+ ' obj).m()" searches "obj.__class__.__mro__" for the '
+ 'base class "A"\n'
+ ' immediately preceding "B" and then invokes the '
+ 'descriptor with the\n'
+ ' call: "A.__dict__[\'m\'].__get__(obj, '
+ 'obj.__class__)".\n'
+ '\n'
+ 'For instance bindings, the precedence of descriptor '
+ 'invocation depends\n'
+ 'on the which descriptor methods are defined. A '
+ 'descriptor can define\n'
+ 'any combination of "__get__()", "__set__()" and '
+ '"__delete__()". If it\n'
+ 'does not define "__get__()", then accessing the '
+ 'attribute will return\n'
+ 'the descriptor object itself unless there is a value in '
+ "the object's\n"
+ 'instance dictionary. If the descriptor defines '
+ '"__set__()" and/or\n'
+ '"__delete__()", it is a data descriptor; if it defines '
+ 'neither, it is\n'
+ 'a non-data descriptor. Normally, data descriptors '
+ 'define both\n'
+ '"__get__()" and "__set__()", while non-data descriptors '
+ 'have just the\n'
+ '"__get__()" method. Data descriptors with "__set__()" '
+ 'and "__get__()"\n'
+ 'defined always override a redefinition in an instance '
+ 'dictionary. In\n'
+ 'contrast, non-data descriptors can be overridden by '
+ 'instances.\n'
+ '\n'
+ 'Python methods (including "staticmethod()" and '
+ '"classmethod()") are\n'
+ 'implemented as non-data descriptors. Accordingly, '
+ 'instances can\n'
+ 'redefine and override methods. This allows individual '
+ 'instances to\n'
+ 'acquire behaviors that differ from other instances of '
+ 'the same class.\n'
+ '\n'
+ 'The "property()" function is implemented as a data '
+ 'descriptor.\n'
+ 'Accordingly, instances cannot override the behavior of a '
+ 'property.\n'
+ '\n'
+ '\n'
+ '__slots__\n'
+ '=========\n'
+ '\n'
+ 'By default, instances of both old and new-style classes '
+ 'have a\n'
+ 'dictionary for attribute storage. This wastes space for '
+ 'objects\n'
+ 'having very few instance variables. The space '
+ 'consumption can become\n'
+ 'acute when creating large numbers of instances.\n'
+ '\n'
+ 'The default can be overridden by defining *__slots__* in '
+ 'a new-style\n'
+ 'class definition. The *__slots__* declaration takes a '
+ 'sequence of\n'
+ 'instance variables and reserves just enough space in '
+ 'each instance to\n'
+ 'hold a value for each variable. Space is saved because '
+ '*__dict__* is\n'
+ 'not created for each instance.\n'
+ '\n'
+ '__slots__\n'
+ '\n'
+ ' This class variable can be assigned a string, '
+ 'iterable, or sequence\n'
+ ' of strings with variable names used by instances. If '
+ 'defined in a\n'
+ ' new-style class, *__slots__* reserves space for the '
+ 'declared\n'
+ ' variables and prevents the automatic creation of '
+ '*__dict__* and\n'
+ ' *__weakref__* for each instance.\n'
+ '\n'
+ ' New in version 2.2.\n'
+ '\n'
+ 'Notes on using *__slots__*\n'
+ '\n'
+ '* When inheriting from a class without *__slots__*, the '
+ '*__dict__*\n'
+ ' attribute of that class will always be accessible, so '
+ 'a *__slots__*\n'
+ ' definition in the subclass is meaningless.\n'
+ '\n'
+ '* Without a *__dict__* variable, instances cannot be '
+ 'assigned new\n'
+ ' variables not listed in the *__slots__* definition. '
+ 'Attempts to\n'
+ ' assign to an unlisted variable name raises '
+ '"AttributeError". If\n'
+ ' dynamic assignment of new variables is desired, then '
+ 'add\n'
+ ' "\'__dict__\'" to the sequence of strings in the '
+ '*__slots__*\n'
+ ' declaration.\n'
+ '\n'
+ ' Changed in version 2.3: Previously, adding '
+ '"\'__dict__\'" to the\n'
+ ' *__slots__* declaration would not enable the '
+ 'assignment of new\n'
+ ' attributes not specifically listed in the sequence of '
+ 'instance\n'
+ ' variable names.\n'
+ '\n'
+ '* Without a *__weakref__* variable for each instance, '
+ 'classes\n'
+ ' defining *__slots__* do not support weak references to '
+ 'its\n'
+ ' instances. If weak reference support is needed, then '
+ 'add\n'
+ ' "\'__weakref__\'" to the sequence of strings in the '
+ '*__slots__*\n'
+ ' declaration.\n'
+ '\n'
+ ' Changed in version 2.3: Previously, adding '
+ '"\'__weakref__\'" to the\n'
+ ' *__slots__* declaration would not enable support for '
+ 'weak\n'
+ ' references.\n'
+ '\n'
+ '* *__slots__* are implemented at the class level by '
+ 'creating\n'
+ ' descriptors (Implementing Descriptors) for each '
+ 'variable name. As a\n'
+ ' result, class attributes cannot be used to set default '
+ 'values for\n'
+ ' instance variables defined by *__slots__*; otherwise, '
+ 'the class\n'
+ ' attribute would overwrite the descriptor assignment.\n'
+ '\n'
+ '* The action of a *__slots__* declaration is limited to '
+ 'the class\n'
+ ' where it is defined. As a result, subclasses will '
+ 'have a *__dict__*\n'
+ ' unless they also define *__slots__* (which must only '
+ 'contain names\n'
+ ' of any *additional* slots).\n'
+ '\n'
+ '* If a class defines a slot also defined in a base '
+ 'class, the\n'
+ ' instance variable defined by the base class slot is '
+ 'inaccessible\n'
+ ' (except by retrieving its descriptor directly from the '
+ 'base class).\n'
+ ' This renders the meaning of the program undefined. In '
+ 'the future, a\n'
+ ' check may be added to prevent this.\n'
+ '\n'
+ '* Nonempty *__slots__* does not work for classes derived '
+ 'from\n'
+ ' "variable-length" built-in types such as "long", "str" '
+ 'and "tuple".\n'
+ '\n'
+ '* Any non-string iterable may be assigned to '
+ '*__slots__*. Mappings\n'
+ ' may also be used; however, in the future, special '
+ 'meaning may be\n'
+ ' assigned to the values corresponding to each key.\n'
+ '\n'
+ '* *__class__* assignment works only if both classes have '
+ 'the same\n'
+ ' *__slots__*.\n'
+ '\n'
+ ' Changed in version 2.6: Previously, *__class__* '
+ 'assignment raised an\n'
+ ' error if either new or old class had *__slots__*.\n',
+ 'attribute-references': '\n'
+ 'Attribute references\n'
+ '********************\n'
+ '\n'
+ 'An attribute reference is a primary followed by a '
+ 'period and a name:\n'
+ '\n'
+ ' attributeref ::= primary "." identifier\n'
+ '\n'
+ 'The primary must evaluate to an object of a type '
+ 'that supports\n'
+ 'attribute references, e.g., a module, list, or an '
+ 'instance. This\n'
+ 'object is then asked to produce the attribute whose '
+ 'name is the\n'
+ 'identifier. If this attribute is not available, the '
+ 'exception\n'
+ '"AttributeError" is raised. Otherwise, the type and '
+ 'value of the\n'
+ 'object produced is determined by the object. '
+ 'Multiple evaluations of\n'
+ 'the same attribute reference may yield different '
+ 'objects.\n',
+ 'augassign': '\n'
+ 'Augmented assignment statements\n'
+ '*******************************\n'
+ '\n'
+ 'Augmented assignment is the combination, in a single statement, '
+ 'of a\n'
+ 'binary operation and an assignment statement:\n'
+ '\n'
+ ' augmented_assignment_stmt ::= augtarget augop '
+ '(expression_list | yield_expression)\n'
+ ' augtarget ::= identifier | attributeref | '
+ 'subscription | slicing\n'
+ ' augop ::= "+=" | "-=" | "*=" | "/=" | '
+ '"//=" | "%=" | "**="\n'
+ ' | ">>=" | "<<=" | "&=" | "^=" | "|="\n'
+ '\n'
+ '(See section Primaries for the syntax definitions for the last '
+ 'three\n'
+ 'symbols.)\n'
+ '\n'
+ 'An augmented assignment evaluates the target (which, unlike '
+ 'normal\n'
+ 'assignment statements, cannot be an unpacking) and the '
+ 'expression\n'
+ 'list, performs the binary operation specific to the type of '
+ 'assignment\n'
+ 'on the two operands, and assigns the result to the original '
+ 'target.\n'
+ 'The target is only evaluated once.\n'
+ '\n'
+ 'An augmented assignment expression like "x += 1" can be '
+ 'rewritten as\n'
+ '"x = x + 1" to achieve a similar, but not exactly equal effect. '
+ 'In the\n'
+ 'augmented version, "x" is only evaluated once. Also, when '
+ 'possible,\n'
+ 'the actual operation is performed *in-place*, meaning that '
+ 'rather than\n'
+ 'creating a new object and assigning that to the target, the old '
+ 'object\n'
+ 'is modified instead.\n'
+ '\n'
+ 'With the exception of assigning to tuples and multiple targets '
+ 'in a\n'
+ 'single statement, the assignment done by augmented assignment\n'
+ 'statements is handled the same way as normal assignments. '
+ 'Similarly,\n'
+ 'with the exception of the possible *in-place* behavior, the '
+ 'binary\n'
+ 'operation performed by augmented assignment is the same as the '
+ 'normal\n'
+ 'binary operations.\n'
+ '\n'
+ 'For targets which are attribute references, the same caveat '
+ 'about\n'
+ 'class and instance attributes applies as for regular '
+ 'assignments.\n',
+ 'binary': '\n'
+ 'Binary arithmetic operations\n'
+ '****************************\n'
+ '\n'
+ 'The binary arithmetic operations have the conventional priority\n'
+ 'levels. Note that some of these operations also apply to certain '
+ 'non-\n'
+ 'numeric types. Apart from the power operator, there are only two\n'
+ 'levels, one for multiplicative operators and one for additive\n'
+ 'operators:\n'
+ '\n'
+ ' m_expr ::= u_expr | m_expr "*" u_expr | m_expr "//" u_expr | '
+ 'm_expr "/" u_expr\n'
+ ' | m_expr "%" u_expr\n'
+ ' a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n'
+ '\n'
+ 'The "*" (multiplication) operator yields the product of its '
+ 'arguments.\n'
+ 'The arguments must either both be numbers, or one argument must be '
+ 'an\n'
+ 'integer (plain or long) and the other must be a sequence. In the\n'
+ 'former case, the numbers are converted to a common type and then\n'
+ 'multiplied together. In the latter case, sequence repetition is\n'
+ 'performed; a negative repetition factor yields an empty sequence.\n'
+ '\n'
+ 'The "/" (division) and "//" (floor division) operators yield the\n'
+ 'quotient of their arguments. The numeric arguments are first\n'
+ 'converted to a common type. Plain or long integer division yields '
+ 'an\n'
+ 'integer of the same type; the result is that of mathematical '
+ 'division\n'
+ "with the 'floor' function applied to the result. Division by zero\n"
+ 'raises the "ZeroDivisionError" exception.\n'
+ '\n'
+ 'The "%" (modulo) operator yields the remainder from the division '
+ 'of\n'
+ 'the first argument by the second. The numeric arguments are '
+ 'first\n'
+ 'converted to a common type. A zero right argument raises the\n'
+ '"ZeroDivisionError" exception. The arguments may be floating '
+ 'point\n'
+ 'numbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals '
+ '"4*0.7 +\n'
+ '0.34".) The modulo operator always yields a result with the same '
+ 'sign\n'
+ 'as its second operand (or zero); the absolute value of the result '
+ 'is\n'
+ 'strictly smaller than the absolute value of the second operand '
+ '[2].\n'
+ '\n'
+ 'The integer division and modulo operators are connected by the\n'
+ 'following identity: "x == (x/y)*y + (x%y)". Integer division and\n'
+ 'modulo are also connected with the built-in function "divmod()":\n'
+ '"divmod(x, y) == (x/y, x%y)". These identities don\'t hold for\n'
+ 'floating point numbers; there similar identities hold '
+ 'approximately\n'
+ 'where "x/y" is replaced by "floor(x/y)" or "floor(x/y) - 1" [3].\n'
+ '\n'
+ 'In addition to performing the modulo operation on numbers, the '
+ '"%"\n'
+ 'operator is also overloaded by string and unicode objects to '
+ 'perform\n'
+ 'string formatting (also known as interpolation). The syntax for '
+ 'string\n'
+ 'formatting is described in the Python Library Reference, section\n'
+ 'String Formatting Operations.\n'
+ '\n'
+ 'Deprecated since version 2.3: The floor division operator, the '
+ 'modulo\n'
+ 'operator, and the "divmod()" function are no longer defined for\n'
+ 'complex numbers. Instead, convert to a floating point number '
+ 'using\n'
+ 'the "abs()" function if appropriate.\n'
+ '\n'
+ 'The "+" (addition) operator yields the sum of its arguments. The\n'
+ 'arguments must either both be numbers or both sequences of the '
+ 'same\n'
+ 'type. In the former case, the numbers are converted to a common '
+ 'type\n'
+ 'and then added together. In the latter case, the sequences are\n'
+ 'concatenated.\n'
+ '\n'
+ 'The "-" (subtraction) operator yields the difference of its '
+ 'arguments.\n'
+ 'The numeric arguments are first converted to a common type.\n',
+ 'bitwise': '\n'
+ 'Binary bitwise operations\n'
+ '*************************\n'
+ '\n'
+ 'Each of the three bitwise operations has a different priority '
+ 'level:\n'
+ '\n'
+ ' and_expr ::= shift_expr | and_expr "&" shift_expr\n'
+ ' xor_expr ::= and_expr | xor_expr "^" and_expr\n'
+ ' or_expr ::= xor_expr | or_expr "|" xor_expr\n'
+ '\n'
+ 'The "&" operator yields the bitwise AND of its arguments, which '
+ 'must\n'
+ 'be plain or long integers. The arguments are converted to a '
+ 'common\n'
+ 'type.\n'
+ '\n'
+ 'The "^" operator yields the bitwise XOR (exclusive OR) of its\n'
+ 'arguments, which must be plain or long integers. The arguments '
+ 'are\n'
+ 'converted to a common type.\n'
+ '\n'
+ 'The "|" operator yields the bitwise (inclusive) OR of its '
+ 'arguments,\n'
+ 'which must be plain or long integers. The arguments are '
+ 'converted to\n'
+ 'a common type.\n',
+ 'bltin-code-objects': '\n'
+ 'Code Objects\n'
+ '************\n'
+ '\n'
+ 'Code objects are used by the implementation to '
+ 'represent "pseudo-\n'
+ 'compiled" executable Python code such as a function '
+ 'body. They differ\n'
+ "from function objects because they don't contain a "
+ 'reference to their\n'
+ 'global execution environment. Code objects are '
+ 'returned by the built-\n'
+ 'in "compile()" function and can be extracted from '
+ 'function objects\n'
+ 'through their "func_code" attribute. See also the '
+ '"code" module.\n'
+ '\n'
+ 'A code object can be executed or evaluated by passing '
+ 'it (instead of a\n'
+ 'source string) to the "exec" statement or the built-in '
+ '"eval()"\n'
+ 'function.\n'
+ '\n'
+ 'See The standard type hierarchy for more '
+ 'information.\n',
+ 'bltin-ellipsis-object': '\n'
+ 'The Ellipsis Object\n'
+ '*******************\n'
+ '\n'
+ 'This object is used by extended slice notation (see '
+ 'Slicings). It\n'
+ 'supports no special operations. There is exactly '
+ 'one ellipsis object,\n'
+ 'named "Ellipsis" (a built-in name).\n'
+ '\n'
+ 'It is written as "Ellipsis". When in a subscript, '
+ 'it can also be\n'
+ 'written as "...", for example "seq[...]".\n',
+ 'bltin-file-objects': '\n'
+ 'File Objects\n'
+ '************\n'
+ '\n'
+ 'File objects are implemented using C\'s "stdio" '
+ 'package and can be\n'
+ 'created with the built-in "open()" function. File '
+ 'objects are also\n'
+ 'returned by some other built-in functions and methods, '
+ 'such as\n'
+ '"os.popen()" and "os.fdopen()" and the "makefile()" '
+ 'method of socket\n'
+ 'objects. Temporary files can be created using the '
+ '"tempfile" module,\n'
+ 'and high-level file operations such as copying, '
+ 'moving, and deleting\n'
+ 'files and directories can be achieved with the '
+ '"shutil" module.\n'
+ '\n'
+ 'When a file operation fails for an I/O-related reason, '
+ 'the exception\n'
+ '"IOError" is raised. This includes situations where '
+ 'the operation is\n'
+ 'not defined for some reason, like "seek()" on a tty '
+ 'device or writing\n'
+ 'a file opened for reading.\n'
+ '\n'
+ 'Files have the following methods:\n'
+ '\n'
+ 'file.close()\n'
+ '\n'
+ ' Close the file. A closed file cannot be read or '
+ 'written any more.\n'
+ ' Any operation which requires that the file be open '
+ 'will raise a\n'
+ ' "ValueError" after the file has been closed. '
+ 'Calling "close()"\n'
+ ' more than once is allowed.\n'
+ '\n'
+ ' As of Python 2.5, you can avoid having to call this '
+ 'method\n'
+ ' explicitly if you use the "with" statement. For '
+ 'example, the\n'
+ ' following code will automatically close *f* when '
+ 'the "with" block\n'
+ ' is exited:\n'
+ '\n'
+ ' from __future__ import with_statement # This '
+ "isn't required in Python 2.6\n"
+ '\n'
+ ' with open("hello.txt") as f:\n'
+ ' for line in f:\n'
+ ' print line,\n'
+ '\n'
+ ' In older versions of Python, you would have needed '
+ 'to do this to\n'
+ ' get the same effect:\n'
+ '\n'
+ ' f = open("hello.txt")\n'
+ ' try:\n'
+ ' for line in f:\n'
+ ' print line,\n'
+ ' finally:\n'
+ ' f.close()\n'
+ '\n'
+ ' Note: Not all "file-like" types in Python support '
+ 'use as a\n'
+ ' context manager for the "with" statement. If '
+ 'your code is\n'
+ ' intended to work with any file-like object, you '
+ 'can use the\n'
+ ' function "contextlib.closing()" instead of using '
+ 'the object\n'
+ ' directly.\n'
+ '\n'
+ 'file.flush()\n'
+ '\n'
+ ' Flush the internal buffer, like "stdio"\'s '
+ '"fflush()". This may be\n'
+ ' a no-op on some file-like objects.\n'
+ '\n'
+ ' Note: "flush()" does not necessarily write the '
+ "file's data to\n"
+ ' disk. Use "flush()" followed by "os.fsync()" to '
+ 'ensure this\n'
+ ' behavior.\n'
+ '\n'
+ 'file.fileno()\n'
+ '\n'
+ ' Return the integer "file descriptor" that is used '
+ 'by the underlying\n'
+ ' implementation to request I/O operations from the '
+ 'operating system.\n'
+ ' This can be useful for other, lower level '
+ 'interfaces that use file\n'
+ ' descriptors, such as the "fcntl" module or '
+ '"os.read()" and friends.\n'
+ '\n'
+ ' Note: File-like objects which do not have a real '
+ 'file descriptor\n'
+ ' should *not* provide this method!\n'
+ '\n'
+ 'file.isatty()\n'
+ '\n'
+ ' Return "True" if the file is connected to a '
+ 'tty(-like) device, else\n'
+ ' "False".\n'
+ '\n'
+ ' Note: If a file-like object is not associated with '
+ 'a real file,\n'
+ ' this method should *not* be implemented.\n'
+ '\n'
+ 'file.next()\n'
+ '\n'
+ ' A file object is its own iterator, for example '
+ '"iter(f)" returns\n'
+ ' *f* (unless *f* is closed). When a file is used as '
+ 'an iterator,\n'
+ ' typically in a "for" loop (for example, "for line '
+ 'in f: print\n'
+ ' line.strip()"), the "next()" method is called '
+ 'repeatedly. This\n'
+ ' method returns the next input line, or raises '
+ '"StopIteration" when\n'
+ ' EOF is hit when the file is open for reading '
+ '(behavior is undefined\n'
+ ' when the file is open for writing). In order to '
+ 'make a "for" loop\n'
+ ' the most efficient way of looping over the lines of '
+ 'a file (a very\n'
+ ' common operation), the "next()" method uses a '
+ 'hidden read-ahead\n'
+ ' buffer. As a consequence of using a read-ahead '
+ 'buffer, combining\n'
+ ' "next()" with other file methods (like '
+ '"readline()") does not work\n'
+ ' right. However, using "seek()" to reposition the '
+ 'file to an\n'
+ ' absolute position will flush the read-ahead '
+ 'buffer.\n'
+ '\n'
+ ' New in version 2.3.\n'
+ '\n'
+ 'file.read([size])\n'
+ '\n'
+ ' Read at most *size* bytes from the file (less if '
+ 'the read hits EOF\n'
+ ' before obtaining *size* bytes). If the *size* '
+ 'argument is negative\n'
+ ' or omitted, read all data until EOF is reached. '
+ 'The bytes are\n'
+ ' returned as a string object. An empty string is '
+ 'returned when EOF\n'
+ ' is encountered immediately. (For certain files, '
+ 'like ttys, it\n'
+ ' makes sense to continue reading after an EOF is '
+ 'hit.) Note that\n'
+ ' this method may call the underlying C function '
+ '"fread()" more than\n'
+ ' once in an effort to acquire as close to *size* '
+ 'bytes as possible.\n'
+ ' Also note that when in non-blocking mode, less data '
+ 'than was\n'
+ ' requested may be returned, even if no *size* '
+ 'parameter was given.\n'
+ '\n'
+ ' Note: This function is simply a wrapper for the '
+ 'underlying\n'
+ ' "fread()" C function, and will behave the same in '
+ 'corner cases,\n'
+ ' such as whether the EOF value is cached.\n'
+ '\n'
+ 'file.readline([size])\n'
+ '\n'
+ ' Read one entire line from the file. A trailing '
+ 'newline character\n'
+ ' is kept in the string (but may be absent when a '
+ 'file ends with an\n'
+ ' incomplete line). [6] If the *size* argument is '
+ 'present and non-\n'
+ ' negative, it is a maximum byte count (including the '
+ 'trailing\n'
+ ' newline) and an incomplete line may be returned. '
+ 'When *size* is not\n'
+ ' 0, an empty string is returned *only* when EOF is '
+ 'encountered\n'
+ ' immediately.\n'
+ '\n'
+ ' Note: Unlike "stdio"\'s "fgets()", the returned '
+ 'string contains\n'
+ ' null characters ("\'\\0\'") if they occurred in '
+ 'the input.\n'
+ '\n'
+ 'file.readlines([sizehint])\n'
+ '\n'
+ ' Read until EOF using "readline()" and return a list '
+ 'containing the\n'
+ ' lines thus read. If the optional *sizehint* '
+ 'argument is present,\n'
+ ' instead of reading up to EOF, whole lines totalling '
+ 'approximately\n'
+ ' *sizehint* bytes (possibly after rounding up to an '
+ 'internal buffer\n'
+ ' size) are read. Objects implementing a file-like '
+ 'interface may\n'
+ ' choose to ignore *sizehint* if it cannot be '
+ 'implemented, or cannot\n'
+ ' be implemented efficiently.\n'
+ '\n'
+ 'file.xreadlines()\n'
+ '\n'
+ ' This method returns the same thing as "iter(f)".\n'
+ '\n'
+ ' New in version 2.1.\n'
+ '\n'
+ ' Deprecated since version 2.3: Use "for line in '
+ 'file" instead.\n'
+ '\n'
+ 'file.seek(offset[, whence])\n'
+ '\n'
+ ' Set the file\'s current position, like "stdio"\'s '
+ '"fseek()". The\n'
+ ' *whence* argument is optional and defaults to '
+ '"os.SEEK_SET" or "0"\n'
+ ' (absolute file positioning); other values are '
+ '"os.SEEK_CUR" or "1"\n'
+ ' (seek relative to the current position) and '
+ '"os.SEEK_END" or "2"\n'
+ " (seek relative to the file's end). There is no "
+ 'return value.\n'
+ '\n'
+ ' For example, "f.seek(2, os.SEEK_CUR)" advances the '
+ 'position by two\n'
+ ' and "f.seek(-3, os.SEEK_END)" sets the position to '
+ 'the third to\n'
+ ' last.\n'
+ '\n'
+ ' Note that if the file is opened for appending (mode '
+ '"\'a\'" or\n'
+ ' "\'a+\'"), any "seek()" operations will be undone '
+ 'at the next write.\n'
+ ' If the file is only opened for writing in append '
+ 'mode (mode "\'a\'"),\n'
+ ' this method is essentially a no-op, but it remains '
+ 'useful for files\n'
+ ' opened in append mode with reading enabled (mode '
+ '"\'a+\'"). If the\n'
+ ' file is opened in text mode (without "\'b\'"), only '
+ 'offsets returned\n'
+ ' by "tell()" are legal. Use of other offsets causes '
+ 'undefined\n'
+ ' behavior.\n'
+ '\n'
+ ' Note that not all file objects are seekable.\n'
+ '\n'
+ ' Changed in version 2.6: Passing float values as '
+ 'offset has been\n'
+ ' deprecated.\n'
+ '\n'
+ 'file.tell()\n'
+ '\n'
+ " Return the file's current position, like "
+ '"stdio"\'s "ftell()".\n'
+ '\n'
+ ' Note: On Windows, "tell()" can return illegal '
+ 'values (after an\n'
+ ' "fgets()") when reading files with Unix-style '
+ 'line-endings. Use\n'
+ ' binary mode ("\'rb\'") to circumvent this '
+ 'problem.\n'
+ '\n'
+ 'file.truncate([size])\n'
+ '\n'
+ " Truncate the file's size. If the optional *size* "
+ 'argument is\n'
+ ' present, the file is truncated to (at most) that '
+ 'size. The size\n'
+ ' defaults to the current position. The current file '
+ 'position is not\n'
+ ' changed. Note that if a specified size exceeds the '
+ "file's current\n"
+ ' size, the result is platform-dependent: '
+ 'possibilities include that\n'
+ ' the file may remain unchanged, increase to the '
+ 'specified size as if\n'
+ ' zero-filled, or increase to the specified size with '
+ 'undefined new\n'
+ ' content. Availability: Windows, many Unix '
+ 'variants.\n'
+ '\n'
+ 'file.write(str)\n'
+ '\n'
+ ' Write a string to the file. There is no return '
+ 'value. Due to\n'
+ ' buffering, the string may not actually show up in '
+ 'the file until\n'
+ ' the "flush()" or "close()" method is called.\n'
+ '\n'
+ 'file.writelines(sequence)\n'
+ '\n'
+ ' Write a sequence of strings to the file. The '
+ 'sequence can be any\n'
+ ' iterable object producing strings, typically a list '
+ 'of strings.\n'
+ ' There is no return value. (The name is intended to '
+ 'match\n'
+ ' "readlines()"; "writelines()" does not add line '
+ 'separators.)\n'
+ '\n'
+ 'Files support the iterator protocol. Each iteration '
+ 'returns the same\n'
+ 'result as "readline()", and iteration ends when the '
+ '"readline()"\n'
+ 'method returns an empty string.\n'
+ '\n'
+ 'File objects also offer a number of other interesting '
+ 'attributes.\n'
+ 'These are not required for file-like objects, but '
+ 'should be\n'
+ 'implemented if they make sense for the particular '
+ 'object.\n'
+ '\n'
+ 'file.closed\n'
+ '\n'
+ ' bool indicating the current state of the file '
+ 'object. This is a\n'
+ ' read-only attribute; the "close()" method changes '
+ 'the value. It may\n'
+ ' not be available on all file-like objects.\n'
+ '\n'
+ 'file.encoding\n'
+ '\n'
+ ' The encoding that this file uses. When Unicode '
+ 'strings are written\n'
+ ' to a file, they will be converted to byte strings '
+ 'using this\n'
+ ' encoding. In addition, when the file is connected '
+ 'to a terminal,\n'
+ ' the attribute gives the encoding that the terminal '
+ 'is likely to use\n'
+ ' (that information might be incorrect if the user '
+ 'has misconfigured\n'
+ ' the terminal). The attribute is read-only and may '
+ 'not be present\n'
+ ' on all file-like objects. It may also be "None", in '
+ 'which case the\n'
+ ' file uses the system default encoding for '
+ 'converting Unicode\n'
+ ' strings.\n'
+ '\n'
+ ' New in version 2.3.\n'
+ '\n'
+ 'file.errors\n'
+ '\n'
+ ' The Unicode error handler used along with the '
+ 'encoding.\n'
+ '\n'
+ ' New in version 2.6.\n'
+ '\n'
+ 'file.mode\n'
+ '\n'
+ ' The I/O mode for the file. If the file was created '
+ 'using the\n'
+ ' "open()" built-in function, this will be the value '
+ 'of the *mode*\n'
+ ' parameter. This is a read-only attribute and may '
+ 'not be present on\n'
+ ' all file-like objects.\n'
+ '\n'
+ 'file.name\n'
+ '\n'
+ ' If the file object was created using "open()", the '
+ 'name of the\n'
+ ' file. Otherwise, some string that indicates the '
+ 'source of the file\n'
+ ' object, of the form "<...>". This is a read-only '
+ 'attribute and may\n'
+ ' not be present on all file-like objects.\n'
+ '\n'
+ 'file.newlines\n'
+ '\n'
+ ' If Python was built with *universal newlines* '
+ 'enabled (the default)\n'
+ ' this read-only attribute exists, and for files '
+ 'opened in universal\n'
+ ' newline read mode it keeps track of the types of '
+ 'newlines\n'
+ ' encountered while reading the file. The values it '
+ 'can take are\n'
+ ' "\'\\r\'", "\'\\n\'", "\'\\r\\n\'", "None" '
+ '(unknown, no newlines read yet) or\n'
+ ' a tuple containing all the newline types seen, to '
+ 'indicate that\n'
+ ' multiple newline conventions were encountered. For '
+ 'files not opened\n'
+ ' in universal newlines read mode the value of this '
+ 'attribute will be\n'
+ ' "None".\n'
+ '\n'
+ 'file.softspace\n'
+ '\n'
+ ' Boolean that indicates whether a space character '
+ 'needs to be\n'
+ ' printed before another value when using the "print" '
+ 'statement.\n'
+ ' Classes that are trying to simulate a file object '
+ 'should also have\n'
+ ' a writable "softspace" attribute, which should be '
+ 'initialized to\n'
+ ' zero. This will be automatic for most classes '
+ 'implemented in\n'
+ ' Python (care may be needed for objects that '
+ 'override attribute\n'
+ ' access); types implemented in C will have to '
+ 'provide a writable\n'
+ ' "softspace" attribute.\n'
+ '\n'
+ ' Note: This attribute is not used to control the '
+ '"print"\n'
+ ' statement, but to allow the implementation of '
+ '"print" to keep\n'
+ ' track of its internal state.\n',
+ 'bltin-null-object': '\n'
+ 'The Null Object\n'
+ '***************\n'
+ '\n'
+ "This object is returned by functions that don't "
+ 'explicitly return a\n'
+ 'value. It supports no special operations. There is '
+ 'exactly one null\n'
+ 'object, named "None" (a built-in name).\n'
+ '\n'
+ 'It is written as "None".\n',
+ 'bltin-type-objects': '\n'
+ 'Type Objects\n'
+ '************\n'
+ '\n'
+ 'Type objects represent the various object types. An '
+ "object's type is\n"
+ 'accessed by the built-in function "type()". There are '
+ 'no special\n'
+ 'operations on types. The standard module "types" '
+ 'defines names for\n'
+ 'all standard built-in types.\n'
+ '\n'
+ 'Types are written like this: "<type \'int\'>".\n',
+ 'booleans': '\n'
+ 'Boolean operations\n'
+ '******************\n'
+ '\n'
+ ' or_test ::= and_test | or_test "or" and_test\n'
+ ' and_test ::= not_test | and_test "and" not_test\n'
+ ' not_test ::= comparison | "not" not_test\n'
+ '\n'
+ 'In the context of Boolean operations, and also when expressions '
+ 'are\n'
+ 'used by control flow statements, the following values are '
+ 'interpreted\n'
+ 'as false: "False", "None", numeric zero of all types, and empty\n'
+ 'strings and containers (including strings, tuples, lists,\n'
+ 'dictionaries, sets and frozensets). All other values are '
+ 'interpreted\n'
+ 'as true. (See the "__nonzero__()" special method for a way to '
+ 'change\n'
+ 'this.)\n'
+ '\n'
+ 'The operator "not" yields "True" if its argument is false, '
+ '"False"\n'
+ 'otherwise.\n'
+ '\n'
+ 'The expression "x and y" first evaluates *x*; if *x* is false, '
+ 'its\n'
+ 'value is returned; otherwise, *y* is evaluated and the resulting '
+ 'value\n'
+ 'is returned.\n'
+ '\n'
+ 'The expression "x or y" first evaluates *x*; if *x* is true, its '
+ 'value\n'
+ 'is returned; otherwise, *y* is evaluated and the resulting value '
+ 'is\n'
+ 'returned.\n'
+ '\n'
+ '(Note that neither "and" nor "or" restrict the value and type '
+ 'they\n'
+ 'return to "False" and "True", but rather return the last '
+ 'evaluated\n'
+ 'argument. This is sometimes useful, e.g., if "s" is a string '
+ 'that\n'
+ 'should be replaced by a default value if it is empty, the '
+ 'expression\n'
+ '"s or \'foo\'" yields the desired value. Because "not" has to '
+ 'invent a\n'
+ 'value anyway, it does not bother to return a value of the same '
+ 'type as\n'
+ 'its argument, so e.g., "not \'foo\'" yields "False", not '
+ '"\'\'".)\n',
+ 'break': '\n'
+ 'The "break" statement\n'
+ '*********************\n'
+ '\n'
+ ' break_stmt ::= "break"\n'
+ '\n'
+ '"break" may only occur syntactically nested in a "for" or "while"\n'
+ 'loop, but not nested in a function or class definition within that\n'
+ 'loop.\n'
+ '\n'
+ 'It terminates the nearest enclosing loop, skipping the optional '
+ '"else"\n'
+ 'clause if the loop has one.\n'
+ '\n'
+ 'If a "for" loop is terminated by "break", the loop control target\n'
+ 'keeps its current value.\n'
+ '\n'
+ 'When "break" passes control out of a "try" statement with a '
+ '"finally"\n'
+ 'clause, that "finally" clause is executed before really leaving '
+ 'the\n'
+ 'loop.\n',
+ 'callable-types': '\n'
+ 'Emulating callable objects\n'
+ '**************************\n'
+ '\n'
+ 'object.__call__(self[, args...])\n'
+ '\n'
+ ' Called when the instance is "called" as a function; if '
+ 'this method\n'
+ ' is defined, "x(arg1, arg2, ...)" is a shorthand for\n'
+ ' "x.__call__(arg1, arg2, ...)".\n',
+ 'calls': '\n'
+ 'Calls\n'
+ '*****\n'
+ '\n'
+ 'A call calls a callable object (e.g., a *function*) with a '
+ 'possibly\n'
+ 'empty series of *arguments*:\n'
+ '\n'
+ ' call ::= primary "(" [argument_list [","]\n'
+ ' | expression genexpr_for] ")"\n'
+ ' argument_list ::= positional_arguments ["," '
+ 'keyword_arguments]\n'
+ ' ["," "*" expression] ["," '
+ 'keyword_arguments]\n'
+ ' ["," "**" expression]\n'
+ ' | keyword_arguments ["," "*" expression]\n'
+ ' ["," "**" expression]\n'
+ ' | "*" expression ["," keyword_arguments] ["," '
+ '"**" expression]\n'
+ ' | "**" expression\n'
+ ' positional_arguments ::= expression ("," expression)*\n'
+ ' keyword_arguments ::= keyword_item ("," keyword_item)*\n'
+ ' keyword_item ::= identifier "=" expression\n'
+ '\n'
+ 'A trailing comma may be present after the positional and keyword\n'
+ 'arguments but does not affect the semantics.\n'
+ '\n'
+ 'The primary must evaluate to a callable object (user-defined\n'
+ 'functions, built-in functions, methods of built-in objects, class\n'
+ 'objects, methods of class instances, and certain class instances\n'
+ 'themselves are callable; extensions may define additional callable\n'
+ 'object types). All argument expressions are evaluated before the '
+ 'call\n'
+ 'is attempted. Please refer to section Function definitions for '
+ 'the\n'
+ 'syntax of formal *parameter* lists.\n'
+ '\n'
+ 'If keyword arguments are present, they are first converted to\n'
+ 'positional arguments, as follows. First, a list of unfilled slots '
+ 'is\n'
+ 'created for the formal parameters. If there are N positional\n'
+ 'arguments, they are placed in the first N slots. Next, for each\n'
+ 'keyword argument, the identifier is used to determine the\n'
+ 'corresponding slot (if the identifier is the same as the first '
+ 'formal\n'
+ 'parameter name, the first slot is used, and so on). If the slot '
+ 'is\n'
+ 'already filled, a "TypeError" exception is raised. Otherwise, the\n'
+ 'value of the argument is placed in the slot, filling it (even if '
+ 'the\n'
+ 'expression is "None", it fills the slot). When all arguments have\n'
+ 'been processed, the slots that are still unfilled are filled with '
+ 'the\n'
+ 'corresponding default value from the function definition. '
+ '(Default\n'
+ 'values are calculated, once, when the function is defined; thus, a\n'
+ 'mutable object such as a list or dictionary used as default value '
+ 'will\n'
+ "be shared by all calls that don't specify an argument value for "
+ 'the\n'
+ 'corresponding slot; this should usually be avoided.) If there are '
+ 'any\n'
+ 'unfilled slots for which no default value is specified, a '
+ '"TypeError"\n'
+ 'exception is raised. Otherwise, the list of filled slots is used '
+ 'as\n'
+ 'the argument list for the call.\n'
+ '\n'
+ '**CPython implementation detail:** An implementation may provide\n'
+ 'built-in functions whose positional parameters do not have names, '
+ 'even\n'
+ "if they are 'named' for the purpose of documentation, and which\n"
+ 'therefore cannot be supplied by keyword. In CPython, this is the '
+ 'case\n'
+ 'for functions implemented in C that use "PyArg_ParseTuple()" to '
+ 'parse\n'
+ 'their arguments.\n'
+ '\n'
+ 'If there are more positional arguments than there are formal '
+ 'parameter\n'
+ 'slots, a "TypeError" exception is raised, unless a formal '
+ 'parameter\n'
+ 'using the syntax "*identifier" is present; in this case, that '
+ 'formal\n'
+ 'parameter receives a tuple containing the excess positional '
+ 'arguments\n'
+ '(or an empty tuple if there were no excess positional arguments).\n'
+ '\n'
+ 'If any keyword argument does not correspond to a formal parameter\n'
+ 'name, a "TypeError" exception is raised, unless a formal parameter\n'
+ 'using the syntax "**identifier" is present; in this case, that '
+ 'formal\n'
+ 'parameter receives a dictionary containing the excess keyword\n'
+ 'arguments (using the keywords as keys and the argument values as\n'
+ 'corresponding values), or a (new) empty dictionary if there were '
+ 'no\n'
+ 'excess keyword arguments.\n'
+ '\n'
+ 'If the syntax "*expression" appears in the function call, '
+ '"expression"\n'
+ 'must evaluate to an iterable. Elements from this iterable are '
+ 'treated\n'
+ 'as if they were additional positional arguments; if there are\n'
+ 'positional arguments *x1*, ..., *xN*, and "expression" evaluates to '
+ 'a\n'
+ 'sequence *y1*, ..., *yM*, this is equivalent to a call with M+N\n'
+ 'positional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n'
+ '\n'
+ 'A consequence of this is that although the "*expression" syntax '
+ 'may\n'
+ 'appear *after* some keyword arguments, it is processed *before* '
+ 'the\n'
+ 'keyword arguments (and the "**expression" argument, if any -- see\n'
+ 'below). So:\n'
+ '\n'
+ ' >>> def f(a, b):\n'
+ ' ... print a, b\n'
+ ' ...\n'
+ ' >>> f(b=1, *(2,))\n'
+ ' 2 1\n'
+ ' >>> f(a=1, *(2,))\n'
+ ' Traceback (most recent call last):\n'
+ ' File "<stdin>", line 1, in <module>\n'
+ " TypeError: f() got multiple values for keyword argument 'a'\n"
+ ' >>> f(1, *(2,))\n'
+ ' 1 2\n'
+ '\n'
+ 'It is unusual for both keyword arguments and the "*expression" '
+ 'syntax\n'
+ 'to be used in the same call, so in practice this confusion does '
+ 'not\n'
+ 'arise.\n'
+ '\n'
+ 'If the syntax "**expression" appears in the function call,\n'
+ '"expression" must evaluate to a mapping, the contents of which are\n'
+ 'treated as additional keyword arguments. In the case of a keyword\n'
+ 'appearing in both "expression" and as an explicit keyword argument, '
+ 'a\n'
+ '"TypeError" exception is raised.\n'
+ '\n'
+ 'Formal parameters using the syntax "*identifier" or "**identifier"\n'
+ 'cannot be used as positional argument slots or as keyword argument\n'
+ 'names. Formal parameters using the syntax "(sublist)" cannot be '
+ 'used\n'
+ 'as keyword argument names; the outermost sublist corresponds to a\n'
+ 'single unnamed argument slot, and the argument value is assigned '
+ 'to\n'
+ 'the sublist using the usual tuple assignment rules after all other\n'
+ 'parameter processing is done.\n'
+ '\n'
+ 'A call always returns some value, possibly "None", unless it raises '
+ 'an\n'
+ 'exception. How this value is computed depends on the type of the\n'
+ 'callable object.\n'
+ '\n'
+ 'If it is---\n'
+ '\n'
+ 'a user-defined function:\n'
+ ' The code block for the function is executed, passing it the\n'
+ ' argument list. The first thing the code block will do is bind '
+ 'the\n'
+ ' formal parameters to the arguments; this is described in '
+ 'section\n'
+ ' Function definitions. When the code block executes a "return"\n'
+ ' statement, this specifies the return value of the function '
+ 'call.\n'
+ '\n'
+ 'a built-in function or method:\n'
+ ' The result is up to the interpreter; see Built-in Functions for '
+ 'the\n'
+ ' descriptions of built-in functions and methods.\n'
+ '\n'
+ 'a class object:\n'
+ ' A new instance of that class is returned.\n'
+ '\n'
+ 'a class instance method:\n'
+ ' The corresponding user-defined function is called, with an '
+ 'argument\n'
+ ' list that is one longer than the argument list of the call: the\n'
+ ' instance becomes the first argument.\n'
+ '\n'
+ 'a class instance:\n'
+ ' The class must define a "__call__()" method; the effect is then '
+ 'the\n'
+ ' same as if that method was called.\n',
+ 'class': '\n'
+ 'Class definitions\n'
+ '*****************\n'
+ '\n'
+ 'A class definition defines a class object (see section The '
+ 'standard\n'
+ 'type hierarchy):\n'
+ '\n'
+ ' classdef ::= "class" classname [inheritance] ":" suite\n'
+ ' inheritance ::= "(" [expression_list] ")"\n'
+ ' classname ::= identifier\n'
+ '\n'
+ 'A class definition is an executable statement. It first evaluates '
+ 'the\n'
+ 'inheritance list, if present. Each item in the inheritance list\n'
+ 'should evaluate to a class object or class type which allows\n'
+ "subclassing. The class's suite is then executed in a new "
+ 'execution\n'
+ 'frame (see section Naming and binding), using a newly created '
+ 'local\n'
+ 'namespace and the original global namespace. (Usually, the suite\n'
+ "contains only function definitions.) When the class's suite "
+ 'finishes\n'
+ 'execution, its execution frame is discarded but its local namespace '
+ 'is\n'
+ 'saved. [4] A class object is then created using the inheritance '
+ 'list\n'
+ 'for the base classes and the saved local namespace for the '
+ 'attribute\n'
+ 'dictionary. The class name is bound to this class object in the\n'
+ 'original local namespace.\n'
+ '\n'
+ "**Programmer's note:** Variables defined in the class definition "
+ 'are\n'
+ 'class variables; they are shared by all instances. To create '
+ 'instance\n'
+ 'variables, they can be set in a method with "self.name = value". '
+ 'Both\n'
+ 'class and instance variables are accessible through the notation\n'
+ '""self.name"", and an instance variable hides a class variable '
+ 'with\n'
+ 'the same name when accessed in this way. Class variables can be '
+ 'used\n'
+ 'as defaults for instance variables, but using mutable values there '
+ 'can\n'
+ 'lead to unexpected results. For *new-style class*es, descriptors '
+ 'can\n'
+ 'be used to create instance variables with different implementation\n'
+ 'details.\n'
+ '\n'
+ 'Class definitions, like function definitions, may be wrapped by one '
+ 'or\n'
+ 'more *decorator* expressions. The evaluation rules for the '
+ 'decorator\n'
+ 'expressions are the same as for functions. The result must be a '
+ 'class\n'
+ 'object, which is then bound to the class name.\n'
+ '\n'
+ '-[ Footnotes ]-\n'
+ '\n'
+ '[1] The exception is propagated to the invocation stack unless\n'
+ ' there is a "finally" clause which happens to raise another\n'
+ ' exception. That new exception causes the old one to be lost.\n'
+ '\n'
+ '[2] Currently, control "flows off the end" except in the case of\n'
+ ' an exception or the execution of a "return", "continue", or\n'
+ ' "break" statement.\n'
+ '\n'
+ '[3] A string literal appearing as the first statement in the\n'
+ ' function body is transformed into the function\'s "__doc__"\n'
+ " attribute and therefore the function's *docstring*.\n"
+ '\n'
+ '[4] A string literal appearing as the first statement in the class\n'
+ ' body is transformed into the namespace\'s "__doc__" item and\n'
+ " therefore the class's *docstring*.\n",
+ 'comparisons': '\n'
+ 'Comparisons\n'
+ '***********\n'
+ '\n'
+ 'Unlike C, all comparison operations in Python have the same '
+ 'priority,\n'
+ 'which is lower than that of any arithmetic, shifting or '
+ 'bitwise\n'
+ 'operation. Also unlike C, expressions like "a < b < c" have '
+ 'the\n'
+ 'interpretation that is conventional in mathematics:\n'
+ '\n'
+ ' comparison ::= or_expr ( comp_operator or_expr )*\n'
+ ' comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "<>" | '
+ '"!="\n'
+ ' | "is" ["not"] | ["not"] "in"\n'
+ '\n'
+ 'Comparisons yield boolean values: "True" or "False".\n'
+ '\n'
+ 'Comparisons can be chained arbitrarily, e.g., "x < y <= z" '
+ 'is\n'
+ 'equivalent to "x < y and y <= z", except that "y" is '
+ 'evaluated only\n'
+ 'once (but in both cases "z" is not evaluated at all when "x < '
+ 'y" is\n'
+ 'found to be false).\n'
+ '\n'
+ 'Formally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and '
+ '*op1*,\n'
+ '*op2*, ..., *opN* are comparison operators, then "a op1 b op2 '
+ 'c ... y\n'
+ 'opN z" is equivalent to "a op1 b and b op2 c and ... y opN '
+ 'z", except\n'
+ 'that each expression is evaluated at most once.\n'
+ '\n'
+ 'Note that "a op1 b op2 c" doesn\'t imply any kind of '
+ 'comparison between\n'
+ '*a* and *c*, so that, e.g., "x < y > z" is perfectly legal '
+ '(though\n'
+ 'perhaps not pretty).\n'
+ '\n'
+ 'The forms "<>" and "!=" are equivalent; for consistency with '
+ 'C, "!="\n'
+ 'is preferred; where "!=" is mentioned below "<>" is also '
+ 'accepted.\n'
+ 'The "<>" spelling is considered obsolescent.\n'
+ '\n'
+ '\n'
+ 'Value comparisons\n'
+ '=================\n'
+ '\n'
+ 'The operators "<", ">", "==", ">=", "<=", and "!=" compare '
+ 'the values\n'
+ 'of two objects. The objects do not need to have the same '
+ 'type.\n'
+ '\n'
+ 'Chapter Objects, values and types states that objects have a '
+ 'value (in\n'
+ 'addition to type and identity). The value of an object is a '
+ 'rather\n'
+ 'abstract notion in Python: For example, there is no canonical '
+ 'access\n'
+ "method for an object's value. Also, there is no requirement "
+ 'that the\n'
+ 'value of an object should be constructed in a particular way, '
+ 'e.g.\n'
+ 'comprised of all its data attributes. Comparison operators '
+ 'implement a\n'
+ 'particular notion of what the value of an object is. One can '
+ 'think of\n'
+ 'them as defining the value of an object indirectly, by means '
+ 'of their\n'
+ 'comparison implementation.\n'
+ '\n'
+ 'Types can customize their comparison behavior by implementing '
+ 'a\n'
+ '"__cmp__()" method or *rich comparison methods* like '
+ '"__lt__()",\n'
+ 'described in Basic customization.\n'
+ '\n'
+ 'The default behavior for equality comparison ("==" and "!=") '
+ 'is based\n'
+ 'on the identity of the objects. Hence, equality comparison '
+ 'of\n'
+ 'instances with the same identity results in equality, and '
+ 'equality\n'
+ 'comparison of instances with different identities results in\n'
+ 'inequality. A motivation for this default behavior is the '
+ 'desire that\n'
+ 'all objects should be reflexive (i.e. "x is y" implies "x == '
+ 'y").\n'
+ '\n'
+ 'The default order comparison ("<", ">", "<=", and ">=") gives '
+ 'a\n'
+ 'consistent but arbitrary order.\n'
+ '\n'
+ '(This unusual definition of comparison was used to simplify '
+ 'the\n'
+ 'definition of operations like sorting and the "in" and "not '
+ 'in"\n'
+ 'operators. In the future, the comparison rules for objects '
+ 'of\n'
+ 'different types are likely to change.)\n'
+ '\n'
+ 'The behavior of the default equality comparison, that '
+ 'instances with\n'
+ 'different identities are always unequal, may be in contrast '
+ 'to what\n'
+ 'types will need that have a sensible definition of object '
+ 'value and\n'
+ 'value-based equality. Such types will need to customize '
+ 'their\n'
+ 'comparison behavior, and in fact, a number of built-in types '
+ 'have done\n'
+ 'that.\n'
+ '\n'
+ 'The following list describes the comparison behavior of the '
+ 'most\n'
+ 'important built-in types.\n'
+ '\n'
+ '* Numbers of built-in numeric types (Numeric Types --- int, '
+ 'float,\n'
+ ' long, complex) and of the standard library types\n'
+ ' "fractions.Fraction" and "decimal.Decimal" can be compared '
+ 'within\n'
+ ' and across their types, with the restriction that complex '
+ 'numbers do\n'
+ ' not support order comparison. Within the limits of the '
+ 'types\n'
+ ' involved, they compare mathematically (algorithmically) '
+ 'correct\n'
+ ' without loss of precision.\n'
+ '\n'
+ '* Strings (instances of "str" or "unicode") compare\n'
+ ' lexicographically using the numeric equivalents (the result '
+ 'of the\n'
+ ' built-in function "ord()") of their characters. [4] When '
+ 'comparing\n'
+ ' an 8-bit string and a Unicode string, the 8-bit string is '
+ 'converted\n'
+ ' to Unicode. If the conversion fails, the strings are '
+ 'considered\n'
+ ' unequal.\n'
+ '\n'
+ '* Instances of "tuple" or "list" can be compared only within '
+ 'each of\n'
+ ' their types. Equality comparison across these types '
+ 'results in\n'
+ ' unequality, and ordering comparison across these types '
+ 'gives an\n'
+ ' arbitrary order.\n'
+ '\n'
+ ' These sequences compare lexicographically using comparison '
+ 'of\n'
+ ' corresponding elements, whereby reflexivity of the elements '
+ 'is\n'
+ ' enforced.\n'
+ '\n'
+ ' In enforcing reflexivity of elements, the comparison of '
+ 'collections\n'
+ ' assumes that for a collection element "x", "x == x" is '
+ 'always true.\n'
+ ' Based on that assumption, element identity is compared '
+ 'first, and\n'
+ ' element comparison is performed only for distinct '
+ 'elements. This\n'
+ ' approach yields the same result as a strict element '
+ 'comparison\n'
+ ' would, if the compared elements are reflexive. For '
+ 'non-reflexive\n'
+ ' elements, the result is different than for strict element\n'
+ ' comparison.\n'
+ '\n'
+ ' Lexicographical comparison between built-in collections '
+ 'works as\n'
+ ' follows:\n'
+ '\n'
+ ' * For two collections to compare equal, they must be of the '
+ 'same\n'
+ ' type, have the same length, and each pair of '
+ 'corresponding\n'
+ ' elements must compare equal (for example, "[1,2] == '
+ '(1,2)" is\n'
+ ' false because the type is not the same).\n'
+ '\n'
+ ' * Collections are ordered the same as their first unequal '
+ 'elements\n'
+ ' (for example, "cmp([1,2,x], [1,2,y])" returns the same '
+ 'as\n'
+ ' "cmp(x,y)"). If a corresponding element does not exist, '
+ 'the\n'
+ ' shorter collection is ordered first (for example, "[1,2] '
+ '<\n'
+ ' [1,2,3]" is true).\n'
+ '\n'
+ '* Mappings (instances of "dict") compare equal if and only if '
+ 'they\n'
+ ' have equal *(key, value)* pairs. Equality comparison of the '
+ 'keys and\n'
+ ' values enforces reflexivity.\n'
+ '\n'
+ ' Outcomes other than equality are resolved consistently, but '
+ 'are not\n'
+ ' otherwise defined. [5]\n'
+ '\n'
+ '* Most other objects of built-in types compare unequal unless '
+ 'they\n'
+ ' are the same object; the choice whether one object is '
+ 'considered\n'
+ ' smaller or larger than another one is made arbitrarily but\n'
+ ' consistently within one execution of a program.\n'
+ '\n'
+ 'User-defined classes that customize their comparison behavior '
+ 'should\n'
+ 'follow some consistency rules, if possible:\n'
+ '\n'
+ '* Equality comparison should be reflexive. In other words, '
+ 'identical\n'
+ ' objects should compare equal:\n'
+ '\n'
+ ' "x is y" implies "x == y"\n'
+ '\n'
+ '* Comparison should be symmetric. In other words, the '
+ 'following\n'
+ ' expressions should have the same result:\n'
+ '\n'
+ ' "x == y" and "y == x"\n'
+ '\n'
+ ' "x != y" and "y != x"\n'
+ '\n'
+ ' "x < y" and "y > x"\n'
+ '\n'
+ ' "x <= y" and "y >= x"\n'
+ '\n'
+ '* Comparison should be transitive. The following '
+ '(non-exhaustive)\n'
+ ' examples illustrate that:\n'
+ '\n'
+ ' "x > y and y > z" implies "x > z"\n'
+ '\n'
+ ' "x < y and y <= z" implies "x < z"\n'
+ '\n'
+ '* Inverse comparison should result in the boolean negation. '
+ 'In other\n'
+ ' words, the following expressions should have the same '
+ 'result:\n'
+ '\n'
+ ' "x == y" and "not x != y"\n'
+ '\n'
+ ' "x < y" and "not x >= y" (for total ordering)\n'
+ '\n'
+ ' "x > y" and "not x <= y" (for total ordering)\n'
+ '\n'
+ ' The last two expressions apply to totally ordered '
+ 'collections (e.g.\n'
+ ' to sequences, but not to sets or mappings). See also the\n'
+ ' "total_ordering()" decorator.\n'
+ '\n'
+ '* The "hash()" result should be consistent with equality. '
+ 'Objects\n'
+ ' that are equal should either have the same hash value, or '
+ 'be marked\n'
+ ' as unhashable.\n'
+ '\n'
+ 'Python does not enforce these consistency rules.\n'
+ '\n'
+ '\n'
+ 'Membership test operations\n'
+ '==========================\n'
+ '\n'
+ 'The operators "in" and "not in" test for membership. "x in '
+ 's"\n'
+ 'evaluates to "True" if *x* is a member of *s*, and "False" '
+ 'otherwise.\n'
+ '"x not in s" returns the negation of "x in s". All built-in '
+ 'sequences\n'
+ 'and set types support this as well as dictionary, for which '
+ '"in" tests\n'
+ 'whether the dictionary has a given key. For container types '
+ 'such as\n'
+ 'list, tuple, set, frozenset, dict, or collections.deque, the\n'
+ 'expression "x in y" is equivalent to "any(x is e or x == e '
+ 'for e in\n'
+ 'y)".\n'
+ '\n'
+ 'For the string and bytes types, "x in y" is "True" if and '
+ 'only if *x*\n'
+ 'is a substring of *y*. An equivalent test is "y.find(x) != '
+ '-1".\n'
+ 'Empty strings are always considered to be a substring of any '
+ 'other\n'
+ 'string, so """ in "abc"" will return "True".\n'
+ '\n'
+ 'For user-defined classes which define the "__contains__()" '
+ 'method, "x\n'
+ 'in y" returns "True" if "y.__contains__(x)" returns a true '
+ 'value, and\n'
+ '"False" otherwise.\n'
+ '\n'
+ 'For user-defined classes which do not define "__contains__()" '
+ 'but do\n'
+ 'define "__iter__()", "x in y" is "True" if some value "z" '
+ 'with "x ==\n'
+ 'z" is produced while iterating over "y". If an exception is '
+ 'raised\n'
+ 'during the iteration, it is as if "in" raised that '
+ 'exception.\n'
+ '\n'
+ 'Lastly, the old-style iteration protocol is tried: if a class '
+ 'defines\n'
+ '"__getitem__()", "x in y" is "True" if and only if there is a '
+ 'non-\n'
+ 'negative integer index *i* such that "x == y[i]", and all '
+ 'lower\n'
+ 'integer indices do not raise "IndexError" exception. (If any '
+ 'other\n'
+ 'exception is raised, it is as if "in" raised that '
+ 'exception).\n'
+ '\n'
+ 'The operator "not in" is defined to have the inverse true '
+ 'value of\n'
+ '"in".\n'
+ '\n'
+ '\n'
+ 'Identity comparisons\n'
+ '====================\n'
+ '\n'
+ 'The operators "is" and "is not" test for object identity: "x '
+ 'is y" is\n'
+ 'true if and only if *x* and *y* are the same object. "x is '
+ 'not y"\n'
+ 'yields the inverse truth value. [6]\n',
+ 'compound': '\n'
+ 'Compound statements\n'
+ '*******************\n'
+ '\n'
+ 'Compound statements contain (groups of) other statements; they '
+ 'affect\n'
+ 'or control the execution of those other statements in some way. '
+ 'In\n'
+ 'general, compound statements span multiple lines, although in '
+ 'simple\n'
+ 'incarnations a whole compound statement may be contained in one '
+ 'line.\n'
+ '\n'
+ 'The "if", "while" and "for" statements implement traditional '
+ 'control\n'
+ 'flow constructs. "try" specifies exception handlers and/or '
+ 'cleanup\n'
+ 'code for a group of statements. Function and class definitions '
+ 'are\n'
+ 'also syntactically compound statements.\n'
+ '\n'
+ "Compound statements consist of one or more 'clauses.' A clause\n"
+ "consists of a header and a 'suite.' The clause headers of a\n"
+ 'particular compound statement are all at the same indentation '
+ 'level.\n'
+ 'Each clause header begins with a uniquely identifying keyword '
+ 'and ends\n'
+ 'with a colon. A suite is a group of statements controlled by a\n'
+ 'clause. A suite can be one or more semicolon-separated simple\n'
+ 'statements on the same line as the header, following the '
+ "header's\n"
+ 'colon, or it can be one or more indented statements on '
+ 'subsequent\n'
+ 'lines. Only the latter form of suite can contain nested '
+ 'compound\n'
+ "statements; the following is illegal, mostly because it wouldn't "
+ 'be\n'
+ 'clear to which "if" clause a following "else" clause would '
+ 'belong:\n'
+ '\n'
+ ' if test1: if test2: print x\n'
+ '\n'
+ 'Also note that the semicolon binds tighter than the colon in '
+ 'this\n'
+ 'context, so that in the following example, either all or none of '
+ 'the\n'
+ '"print" statements are executed:\n'
+ '\n'
+ ' if x < y < z: print x; print y; print z\n'
+ '\n'
+ 'Summarizing:\n'
+ '\n'
+ ' compound_stmt ::= if_stmt\n'
+ ' | while_stmt\n'
+ ' | for_stmt\n'
+ ' | try_stmt\n'
+ ' | with_stmt\n'
+ ' | funcdef\n'
+ ' | classdef\n'
+ ' | decorated\n'
+ ' suite ::= stmt_list NEWLINE | NEWLINE INDENT '
+ 'statement+ DEDENT\n'
+ ' statement ::= stmt_list NEWLINE | compound_stmt\n'
+ ' stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n'
+ '\n'
+ 'Note that statements always end in a "NEWLINE" possibly followed '
+ 'by a\n'
+ '"DEDENT". Also note that optional continuation clauses always '
+ 'begin\n'
+ 'with a keyword that cannot start a statement, thus there are no\n'
+ 'ambiguities (the \'dangling "else"\' problem is solved in Python '
+ 'by\n'
+ 'requiring nested "if" statements to be indented).\n'
+ '\n'
+ 'The formatting of the grammar rules in the following sections '
+ 'places\n'
+ 'each clause on a separate line for clarity.\n'
+ '\n'
+ '\n'
+ 'The "if" statement\n'
+ '==================\n'
+ '\n'
+ 'The "if" statement is used for conditional execution:\n'
+ '\n'
+ ' if_stmt ::= "if" expression ":" suite\n'
+ ' ( "elif" expression ":" suite )*\n'
+ ' ["else" ":" suite]\n'
+ '\n'
+ 'It selects exactly one of the suites by evaluating the '
+ 'expressions one\n'
+ 'by one until one is found to be true (see section Boolean '
+ 'operations\n'
+ 'for the definition of true and false); then that suite is '
+ 'executed\n'
+ '(and no other part of the "if" statement is executed or '
+ 'evaluated).\n'
+ 'If all expressions are false, the suite of the "else" clause, '
+ 'if\n'
+ 'present, is executed.\n'
+ '\n'
+ '\n'
+ 'The "while" statement\n'
+ '=====================\n'
+ '\n'
+ 'The "while" statement is used for repeated execution as long as '
+ 'an\n'
+ 'expression is true:\n'
+ '\n'
+ ' while_stmt ::= "while" expression ":" suite\n'
+ ' ["else" ":" suite]\n'
+ '\n'
+ 'This repeatedly tests the expression and, if it is true, '
+ 'executes the\n'
+ 'first suite; if the expression is false (which may be the first '
+ 'time\n'
+ 'it is tested) the suite of the "else" clause, if present, is '
+ 'executed\n'
+ 'and the loop terminates.\n'
+ '\n'
+ 'A "break" statement executed in the first suite terminates the '
+ 'loop\n'
+ 'without executing the "else" clause\'s suite. A "continue" '
+ 'statement\n'
+ 'executed in the first suite skips the rest of the suite and goes '
+ 'back\n'
+ 'to testing the expression.\n'
+ '\n'
+ '\n'
+ 'The "for" statement\n'
+ '===================\n'
+ '\n'
+ 'The "for" statement is used to iterate over the elements of a '
+ 'sequence\n'
+ '(such as a string, tuple or list) or other iterable object:\n'
+ '\n'
+ ' for_stmt ::= "for" target_list "in" expression_list ":" '
+ 'suite\n'
+ ' ["else" ":" suite]\n'
+ '\n'
+ 'The expression list is evaluated once; it should yield an '
+ 'iterable\n'
+ 'object. An iterator is created for the result of the\n'
+ '"expression_list". The suite is then executed once for each '
+ 'item\n'
+ 'provided by the iterator, in the order of ascending indices. '
+ 'Each\n'
+ 'item in turn is assigned to the target list using the standard '
+ 'rules\n'
+ 'for assignments, and then the suite is executed. When the items '
+ 'are\n'
+ 'exhausted (which is immediately when the sequence is empty), the '
+ 'suite\n'
+ 'in the "else" clause, if present, is executed, and the loop\n'
+ 'terminates.\n'
+ '\n'
+ 'A "break" statement executed in the first suite terminates the '
+ 'loop\n'
+ 'without executing the "else" clause\'s suite. A "continue" '
+ 'statement\n'
+ 'executed in the first suite skips the rest of the suite and '
+ 'continues\n'
+ 'with the next item, or with the "else" clause if there was no '
+ 'next\n'
+ 'item.\n'
+ '\n'
+ 'The suite may assign to the variable(s) in the target list; this '
+ 'does\n'
+ 'not affect the next item assigned to it.\n'
+ '\n'
+ 'The target list is not deleted when the loop is finished, but if '
+ 'the\n'
+ 'sequence is empty, it will not have been assigned to at all by '
+ 'the\n'
+ 'loop. Hint: the built-in function "range()" returns a sequence '
+ 'of\n'
+ 'integers suitable to emulate the effect of Pascal\'s "for i := a '
+ 'to b\n'
+ 'do"; e.g., "range(3)" returns the list "[0, 1, 2]".\n'
+ '\n'
+ 'Note: There is a subtlety when the sequence is being modified by '
+ 'the\n'
+ ' loop (this can only occur for mutable sequences, i.e. lists). '
+ 'An\n'
+ ' internal counter is used to keep track of which item is used '
+ 'next,\n'
+ ' and this is incremented on each iteration. When this counter '
+ 'has\n'
+ ' reached the length of the sequence the loop terminates. This '
+ 'means\n'
+ ' that if the suite deletes the current (or a previous) item '
+ 'from the\n'
+ ' sequence, the next item will be skipped (since it gets the '
+ 'index of\n'
+ ' the current item which has already been treated). Likewise, '
+ 'if the\n'
+ ' suite inserts an item in the sequence before the current item, '
+ 'the\n'
+ ' current item will be treated again the next time through the '
+ 'loop.\n'
+ ' This can lead to nasty bugs that can be avoided by making a\n'
+ ' temporary copy using a slice of the whole sequence, e.g.,\n'
+ '\n'
+ ' for x in a[:]:\n'
+ ' if x < 0: a.remove(x)\n'
+ '\n'
+ '\n'
+ 'The "try" statement\n'
+ '===================\n'
+ '\n'
+ 'The "try" statement specifies exception handlers and/or cleanup '
+ 'code\n'
+ 'for a group of statements:\n'
+ '\n'
+ ' try_stmt ::= try1_stmt | try2_stmt\n'
+ ' try1_stmt ::= "try" ":" suite\n'
+ ' ("except" [expression [("as" | ",") '
+ 'identifier]] ":" suite)+\n'
+ ' ["else" ":" suite]\n'
+ ' ["finally" ":" suite]\n'
+ ' try2_stmt ::= "try" ":" suite\n'
+ ' "finally" ":" suite\n'
+ '\n'
+ 'Changed in version 2.5: In previous versions of Python,\n'
+ '"try"..."except"..."finally" did not work. "try"..."except" had '
+ 'to be\n'
+ 'nested in "try"..."finally".\n'
+ '\n'
+ 'The "except" clause(s) specify one or more exception handlers. '
+ 'When no\n'
+ 'exception occurs in the "try" clause, no exception handler is\n'
+ 'executed. When an exception occurs in the "try" suite, a search '
+ 'for an\n'
+ 'exception handler is started. This search inspects the except '
+ 'clauses\n'
+ 'in turn until one is found that matches the exception. An '
+ 'expression-\n'
+ 'less except clause, if present, must be last; it matches any\n'
+ 'exception. For an except clause with an expression, that '
+ 'expression\n'
+ 'is evaluated, and the clause matches the exception if the '
+ 'resulting\n'
+ 'object is "compatible" with the exception. An object is '
+ 'compatible\n'
+ 'with an exception if it is the class or a base class of the '
+ 'exception\n'
+ 'object, or a tuple containing an item compatible with the '
+ 'exception.\n'
+ '\n'
+ 'If no except clause matches the exception, the search for an '
+ 'exception\n'
+ 'handler continues in the surrounding code and on the invocation '
+ 'stack.\n'
+ '[1]\n'
+ '\n'
+ 'If the evaluation of an expression in the header of an except '
+ 'clause\n'
+ 'raises an exception, the original search for a handler is '
+ 'canceled and\n'
+ 'a search starts for the new exception in the surrounding code '
+ 'and on\n'
+ 'the call stack (it is treated as if the entire "try" statement '
+ 'raised\n'
+ 'the exception).\n'
+ '\n'
+ 'When a matching except clause is found, the exception is '
+ 'assigned to\n'
+ 'the target specified in that except clause, if present, and the '
+ 'except\n'
+ "clause's suite is executed. All except clauses must have an\n"
+ 'executable block. When the end of this block is reached, '
+ 'execution\n'
+ 'continues normally after the entire try statement. (This means '
+ 'that\n'
+ 'if two nested handlers exist for the same exception, and the '
+ 'exception\n'
+ 'occurs in the try clause of the inner handler, the outer handler '
+ 'will\n'
+ 'not handle the exception.)\n'
+ '\n'
+ "Before an except clause's suite is executed, details about the\n"
+ 'exception are assigned to three variables in the "sys" module:\n'
+ '"sys.exc_type" receives the object identifying the exception;\n'
+ '"sys.exc_value" receives the exception\'s parameter;\n'
+ '"sys.exc_traceback" receives a traceback object (see section '
+ 'The\n'
+ 'standard type hierarchy) identifying the point in the program '
+ 'where\n'
+ 'the exception occurred. These details are also available through '
+ 'the\n'
+ '"sys.exc_info()" function, which returns a tuple "(exc_type,\n'
+ 'exc_value, exc_traceback)". Use of the corresponding variables '
+ 'is\n'
+ 'deprecated in favor of this function, since their use is unsafe '
+ 'in a\n'
+ 'threaded program. As of Python 1.5, the variables are restored '
+ 'to\n'
+ 'their previous values (before the call) when returning from a '
+ 'function\n'
+ 'that handled an exception.\n'
+ '\n'
+ 'The optional "else" clause is executed if and when control flows '
+ 'off\n'
+ 'the end of the "try" clause. [2] Exceptions in the "else" clause '
+ 'are\n'
+ 'not handled by the preceding "except" clauses.\n'
+ '\n'
+ 'If "finally" is present, it specifies a \'cleanup\' handler. '
+ 'The "try"\n'
+ 'clause is executed, including any "except" and "else" clauses. '
+ 'If an\n'
+ 'exception occurs in any of the clauses and is not handled, the\n'
+ 'exception is temporarily saved. The "finally" clause is '
+ 'executed. If\n'
+ 'there is a saved exception, it is re-raised at the end of the\n'
+ '"finally" clause. If the "finally" clause raises another '
+ 'exception or\n'
+ 'executes a "return" or "break" statement, the saved exception '
+ 'is\n'
+ 'discarded:\n'
+ '\n'
+ ' >>> def f():\n'
+ ' ... try:\n'
+ ' ... 1/0\n'
+ ' ... finally:\n'
+ ' ... return 42\n'
+ ' ...\n'
+ ' >>> f()\n'
+ ' 42\n'
+ '\n'
+ 'The exception information is not available to the program '
+ 'during\n'
+ 'execution of the "finally" clause.\n'
+ '\n'
+ 'When a "return", "break" or "continue" statement is executed in '
+ 'the\n'
+ '"try" suite of a "try"..."finally" statement, the "finally" '
+ 'clause is\n'
+ 'also executed \'on the way out.\' A "continue" statement is '
+ 'illegal in\n'
+ 'the "finally" clause. (The reason is a problem with the current\n'
+ 'implementation --- this restriction may be lifted in the '
+ 'future).\n'
+ '\n'
+ 'The return value of a function is determined by the last '
+ '"return"\n'
+ 'statement executed. Since the "finally" clause always executes, '
+ 'a\n'
+ '"return" statement executed in the "finally" clause will always '
+ 'be the\n'
+ 'last one executed:\n'
+ '\n'
+ ' >>> def foo():\n'
+ ' ... try:\n'
+ " ... return 'try'\n"
+ ' ... finally:\n'
+ " ... return 'finally'\n"
+ ' ...\n'
+ ' >>> foo()\n'
+ " 'finally'\n"
+ '\n'
+ 'Additional information on exceptions can be found in section\n'
+ 'Exceptions, and information on using the "raise" statement to '
+ 'generate\n'
+ 'exceptions may be found in section The raise statement.\n'
+ '\n'
+ '\n'
+ 'The "with" statement\n'
+ '====================\n'
+ '\n'
+ 'New in version 2.5.\n'
+ '\n'
+ 'The "with" statement is used to wrap the execution of a block '
+ 'with\n'
+ 'methods defined by a context manager (see section With '
+ 'Statement\n'
+ 'Context Managers). This allows common '
+ '"try"..."except"..."finally"\n'
+ 'usage patterns to be encapsulated for convenient reuse.\n'
+ '\n'
+ ' with_stmt ::= "with" with_item ("," with_item)* ":" suite\n'
+ ' with_item ::= expression ["as" target]\n'
+ '\n'
+ 'The execution of the "with" statement with one "item" proceeds '
+ 'as\n'
+ 'follows:\n'
+ '\n'
+ '1. The context expression (the expression given in the '
+ '"with_item")\n'
+ ' is evaluated to obtain a context manager.\n'
+ '\n'
+ '2. The context manager\'s "__exit__()" is loaded for later use.\n'
+ '\n'
+ '3. The context manager\'s "__enter__()" method is invoked.\n'
+ '\n'
+ '4. If a target was included in the "with" statement, the return\n'
+ ' value from "__enter__()" is assigned to it.\n'
+ '\n'
+ ' Note: The "with" statement guarantees that if the '
+ '"__enter__()"\n'
+ ' method returns without an error, then "__exit__()" will '
+ 'always be\n'
+ ' called. Thus, if an error occurs during the assignment to '
+ 'the\n'
+ ' target list, it will be treated the same as an error '
+ 'occurring\n'
+ ' within the suite would be. See step 6 below.\n'
+ '\n'
+ '5. The suite is executed.\n'
+ '\n'
+ '6. The context manager\'s "__exit__()" method is invoked. If an\n'
+ ' exception caused the suite to be exited, its type, value, '
+ 'and\n'
+ ' traceback are passed as arguments to "__exit__()". Otherwise, '
+ 'three\n'
+ ' "None" arguments are supplied.\n'
+ '\n'
+ ' If the suite was exited due to an exception, and the return '
+ 'value\n'
+ ' from the "__exit__()" method was false, the exception is '
+ 'reraised.\n'
+ ' If the return value was true, the exception is suppressed, '
+ 'and\n'
+ ' execution continues with the statement following the "with"\n'
+ ' statement.\n'
+ '\n'
+ ' If the suite was exited for any reason other than an '
+ 'exception, the\n'
+ ' return value from "__exit__()" is ignored, and execution '
+ 'proceeds\n'
+ ' at the normal location for the kind of exit that was taken.\n'
+ '\n'
+ 'With more than one item, the context managers are processed as '
+ 'if\n'
+ 'multiple "with" statements were nested:\n'
+ '\n'
+ ' with A() as a, B() as b:\n'
+ ' suite\n'
+ '\n'
+ 'is equivalent to\n'
+ '\n'
+ ' with A() as a:\n'
+ ' with B() as b:\n'
+ ' suite\n'
+ '\n'
+ 'Note: In Python 2.5, the "with" statement is only allowed when '
+ 'the\n'
+ ' "with_statement" feature has been enabled. It is always '
+ 'enabled in\n'
+ ' Python 2.6.\n'
+ '\n'
+ 'Changed in version 2.7: Support for multiple context '
+ 'expressions.\n'
+ '\n'
+ 'See also:\n'
+ '\n'
+ ' **PEP 343** - The "with" statement\n'
+ ' The specification, background, and examples for the Python '
+ '"with"\n'
+ ' statement.\n'
+ '\n'
+ '\n'
+ 'Function definitions\n'
+ '====================\n'
+ '\n'
+ 'A function definition defines a user-defined function object '
+ '(see\n'
+ 'section The standard type hierarchy):\n'
+ '\n'
+ ' decorated ::= decorators (classdef | funcdef)\n'
+ ' decorators ::= decorator+\n'
+ ' decorator ::= "@" dotted_name ["(" [argument_list [","]] '
+ '")"] NEWLINE\n'
+ ' funcdef ::= "def" funcname "(" [parameter_list] ")" '
+ '":" suite\n'
+ ' dotted_name ::= identifier ("." identifier)*\n'
+ ' parameter_list ::= (defparameter ",")*\n'
+ ' ( "*" identifier ["," "**" identifier]\n'
+ ' | "**" identifier\n'
+ ' | defparameter [","] )\n'
+ ' defparameter ::= parameter ["=" expression]\n'
+ ' sublist ::= parameter ("," parameter)* [","]\n'
+ ' parameter ::= identifier | "(" sublist ")"\n'
+ ' funcname ::= identifier\n'
+ '\n'
+ 'A function definition is an executable statement. Its execution '
+ 'binds\n'
+ 'the function name in the current local namespace to a function '
+ 'object\n'
+ '(a wrapper around the executable code for the function). This\n'
+ 'function object contains a reference to the current global '
+ 'namespace\n'
+ 'as the global namespace to be used when the function is called.\n'
+ '\n'
+ 'The function definition does not execute the function body; this '
+ 'gets\n'
+ 'executed only when the function is called. [3]\n'
+ '\n'
+ 'A function definition may be wrapped by one or more *decorator*\n'
+ 'expressions. Decorator expressions are evaluated when the '
+ 'function is\n'
+ 'defined, in the scope that contains the function definition. '
+ 'The\n'
+ 'result must be a callable, which is invoked with the function '
+ 'object\n'
+ 'as the only argument. The returned value is bound to the '
+ 'function name\n'
+ 'instead of the function object. Multiple decorators are applied '
+ 'in\n'
+ 'nested fashion. For example, the following code:\n'
+ '\n'
+ ' @f1(arg)\n'
+ ' @f2\n'
+ ' def func(): pass\n'
+ '\n'
+ 'is equivalent to:\n'
+ '\n'
+ ' def func(): pass\n'
+ ' func = f1(arg)(f2(func))\n'
+ '\n'
+ 'When one or more top-level *parameters* have the form '
+ '*parameter* "="\n'
+ '*expression*, the function is said to have "default parameter '
+ 'values."\n'
+ 'For a parameter with a default value, the corresponding '
+ '*argument* may\n'
+ "be omitted from a call, in which case the parameter's default "
+ 'value is\n'
+ 'substituted. If a parameter has a default value, all following\n'
+ 'parameters must also have a default value --- this is a '
+ 'syntactic\n'
+ 'restriction that is not expressed by the grammar.\n'
+ '\n'
+ '**Default parameter values are evaluated when the function '
+ 'definition\n'
+ 'is executed.** This means that the expression is evaluated '
+ 'once, when\n'
+ 'the function is defined, and that the same "pre-computed" value '
+ 'is\n'
+ 'used for each call. This is especially important to understand '
+ 'when a\n'
+ 'default parameter is a mutable object, such as a list or a '
+ 'dictionary:\n'
+ 'if the function modifies the object (e.g. by appending an item '
+ 'to a\n'
+ 'list), the default value is in effect modified. This is '
+ 'generally not\n'
+ 'what was intended. A way around this is to use "None" as the\n'
+ 'default, and explicitly test for it in the body of the function, '
+ 'e.g.:\n'
+ '\n'
+ ' def whats_on_the_telly(penguin=None):\n'
+ ' if penguin is None:\n'
+ ' penguin = []\n'
+ ' penguin.append("property of the zoo")\n'
+ ' return penguin\n'
+ '\n'
+ 'Function call semantics are described in more detail in section '
+ 'Calls.\n'
+ 'A function call always assigns values to all parameters '
+ 'mentioned in\n'
+ 'the parameter list, either from position arguments, from '
+ 'keyword\n'
+ 'arguments, or from default values. If the form ""*identifier"" '
+ 'is\n'
+ 'present, it is initialized to a tuple receiving any excess '
+ 'positional\n'
+ 'parameters, defaulting to the empty tuple. If the form\n'
+ '""**identifier"" is present, it is initialized to a new '
+ 'dictionary\n'
+ 'receiving any excess keyword arguments, defaulting to a new '
+ 'empty\n'
+ 'dictionary.\n'
+ '\n'
+ 'It is also possible to create anonymous functions (functions not '
+ 'bound\n'
+ 'to a name), for immediate use in expressions. This uses lambda\n'
+ 'expressions, described in section Lambdas. Note that the '
+ 'lambda\n'
+ 'expression is merely a shorthand for a simplified function '
+ 'definition;\n'
+ 'a function defined in a ""def"" statement can be passed around '
+ 'or\n'
+ 'assigned to another name just like a function defined by a '
+ 'lambda\n'
+ 'expression. The ""def"" form is actually more powerful since '
+ 'it\n'
+ 'allows the execution of multiple statements.\n'
+ '\n'
+ "**Programmer's note:** Functions are first-class objects. A "
+ '""def""\n'
+ 'form executed inside a function definition defines a local '
+ 'function\n'
+ 'that can be returned or passed around. Free variables used in '
+ 'the\n'
+ 'nested function can access the local variables of the function\n'
+ 'containing the def. See section Naming and binding for '
+ 'details.\n'
+ '\n'
+ '\n'
+ 'Class definitions\n'
+ '=================\n'
+ '\n'
+ 'A class definition defines a class object (see section The '
+ 'standard\n'
+ 'type hierarchy):\n'
+ '\n'
+ ' classdef ::= "class" classname [inheritance] ":" suite\n'
+ ' inheritance ::= "(" [expression_list] ")"\n'
+ ' classname ::= identifier\n'
+ '\n'
+ 'A class definition is an executable statement. It first '
+ 'evaluates the\n'
+ 'inheritance list, if present. Each item in the inheritance '
+ 'list\n'
+ 'should evaluate to a class object or class type which allows\n'
+ "subclassing. The class's suite is then executed in a new "
+ 'execution\n'
+ 'frame (see section Naming and binding), using a newly created '
+ 'local\n'
+ 'namespace and the original global namespace. (Usually, the '
+ 'suite\n'
+ "contains only function definitions.) When the class's suite "
+ 'finishes\n'
+ 'execution, its execution frame is discarded but its local '
+ 'namespace is\n'
+ 'saved. [4] A class object is then created using the inheritance '
+ 'list\n'
+ 'for the base classes and the saved local namespace for the '
+ 'attribute\n'
+ 'dictionary. The class name is bound to this class object in '
+ 'the\n'
+ 'original local namespace.\n'
+ '\n'
+ "**Programmer's note:** Variables defined in the class definition "
+ 'are\n'
+ 'class variables; they are shared by all instances. To create '
+ 'instance\n'
+ 'variables, they can be set in a method with "self.name = '
+ 'value". Both\n'
+ 'class and instance variables are accessible through the '
+ 'notation\n'
+ '""self.name"", and an instance variable hides a class variable '
+ 'with\n'
+ 'the same name when accessed in this way. Class variables can be '
+ 'used\n'
+ 'as defaults for instance variables, but using mutable values '
+ 'there can\n'
+ 'lead to unexpected results. For *new-style class*es, '
+ 'descriptors can\n'
+ 'be used to create instance variables with different '
+ 'implementation\n'
+ 'details.\n'
+ '\n'
+ 'Class definitions, like function definitions, may be wrapped by '
+ 'one or\n'
+ 'more *decorator* expressions. The evaluation rules for the '
+ 'decorator\n'
+ 'expressions are the same as for functions. The result must be a '
+ 'class\n'
+ 'object, which is then bound to the class name.\n'
+ '\n'
+ '-[ Footnotes ]-\n'
+ '\n'
+ '[1] The exception is propagated to the invocation stack unless\n'
+ ' there is a "finally" clause which happens to raise another\n'
+ ' exception. That new exception causes the old one to be '
+ 'lost.\n'
+ '\n'
+ '[2] Currently, control "flows off the end" except in the case '
+ 'of\n'
+ ' an exception or the execution of a "return", "continue", or\n'
+ ' "break" statement.\n'
+ '\n'
+ '[3] A string literal appearing as the first statement in the\n'
+ ' function body is transformed into the function\'s "__doc__"\n'
+ " attribute and therefore the function's *docstring*.\n"
+ '\n'
+ '[4] A string literal appearing as the first statement in the '
+ 'class\n'
+ ' body is transformed into the namespace\'s "__doc__" item '
+ 'and\n'
+ " therefore the class's *docstring*.\n",
+ 'context-managers': '\n'
+ 'With Statement Context Managers\n'
+ '*******************************\n'
+ '\n'
+ 'New in version 2.5.\n'
+ '\n'
+ 'A *context manager* is an object that defines the '
+ 'runtime context to\n'
+ 'be established when executing a "with" statement. The '
+ 'context manager\n'
+ 'handles the entry into, and the exit from, the desired '
+ 'runtime context\n'
+ 'for the execution of the block of code. Context '
+ 'managers are normally\n'
+ 'invoked using the "with" statement (described in section '
+ 'The with\n'
+ 'statement), but can also be used by directly invoking '
+ 'their methods.\n'
+ '\n'
+ 'Typical uses of context managers include saving and '
+ 'restoring various\n'
+ 'kinds of global state, locking and unlocking resources, '
+ 'closing opened\n'
+ 'files, etc.\n'
+ '\n'
+ 'For more information on context managers, see Context '
+ 'Manager Types.\n'
+ '\n'
+ 'object.__enter__(self)\n'
+ '\n'
+ ' Enter the runtime context related to this object. The '
+ '"with"\n'
+ " statement will bind this method's return value to the "
+ 'target(s)\n'
+ ' specified in the "as" clause of the statement, if '
+ 'any.\n'
+ '\n'
+ 'object.__exit__(self, exc_type, exc_value, traceback)\n'
+ '\n'
+ ' Exit the runtime context related to this object. The '
+ 'parameters\n'
+ ' describe the exception that caused the context to be '
+ 'exited. If the\n'
+ ' context was exited without an exception, all three '
+ 'arguments will\n'
+ ' be "None".\n'
+ '\n'
+ ' If an exception is supplied, and the method wishes to '
+ 'suppress the\n'
+ ' exception (i.e., prevent it from being propagated), '
+ 'it should\n'
+ ' return a true value. Otherwise, the exception will be '
+ 'processed\n'
+ ' normally upon exit from this method.\n'
+ '\n'
+ ' Note that "__exit__()" methods should not reraise the '
+ 'passed-in\n'
+ " exception; this is the caller's responsibility.\n"
+ '\n'
+ 'See also:\n'
+ '\n'
+ ' **PEP 343** - The "with" statement\n'
+ ' The specification, background, and examples for the '
+ 'Python "with"\n'
+ ' statement.\n',
+ 'continue': '\n'
+ 'The "continue" statement\n'
+ '************************\n'
+ '\n'
+ ' continue_stmt ::= "continue"\n'
+ '\n'
+ '"continue" may only occur syntactically nested in a "for" or '
+ '"while"\n'
+ 'loop, but not nested in a function or class definition or '
+ '"finally"\n'
+ 'clause within that loop. It continues with the next cycle of '
+ 'the\n'
+ 'nearest enclosing loop.\n'
+ '\n'
+ 'When "continue" passes control out of a "try" statement with a\n'
+ '"finally" clause, that "finally" clause is executed before '
+ 'really\n'
+ 'starting the next loop cycle.\n',
+ 'conversions': '\n'
+ 'Arithmetic conversions\n'
+ '**********************\n'
+ '\n'
+ 'When a description of an arithmetic operator below uses the '
+ 'phrase\n'
+ '"the numeric arguments are converted to a common type," the '
+ 'arguments\n'
+ 'are coerced using the coercion rules listed at Coercion '
+ 'rules. If\n'
+ 'both arguments are standard numeric types, the following '
+ 'coercions are\n'
+ 'applied:\n'
+ '\n'
+ '* If either argument is a complex number, the other is '
+ 'converted to\n'
+ ' complex;\n'
+ '\n'
+ '* otherwise, if either argument is a floating point number, '
+ 'the\n'
+ ' other is converted to floating point;\n'
+ '\n'
+ '* otherwise, if either argument is a long integer, the other '
+ 'is\n'
+ ' converted to long integer;\n'
+ '\n'
+ '* otherwise, both must be plain integers and no conversion '
+ 'is\n'
+ ' necessary.\n'
+ '\n'
+ 'Some additional rules apply for certain operators (e.g., a '
+ 'string left\n'
+ "argument to the '%' operator). Extensions can define their "
+ 'own\n'
+ 'coercions.\n',
+ 'customization': '\n'
+ 'Basic customization\n'
+ '*******************\n'
+ '\n'
+ 'object.__new__(cls[, ...])\n'
+ '\n'
+ ' Called to create a new instance of class *cls*. '
+ '"__new__()" is a\n'
+ ' static method (special-cased so you need not declare it '
+ 'as such)\n'
+ ' that takes the class of which an instance was requested '
+ 'as its\n'
+ ' first argument. The remaining arguments are those '
+ 'passed to the\n'
+ ' object constructor expression (the call to the class). '
+ 'The return\n'
+ ' value of "__new__()" should be the new object instance '
+ '(usually an\n'
+ ' instance of *cls*).\n'
+ '\n'
+ ' Typical implementations create a new instance of the '
+ 'class by\n'
+ ' invoking the superclass\'s "__new__()" method using\n'
+ ' "super(currentclass, cls).__new__(cls[, ...])" with '
+ 'appropriate\n'
+ ' arguments and then modifying the newly-created instance '
+ 'as\n'
+ ' necessary before returning it.\n'
+ '\n'
+ ' If "__new__()" returns an instance of *cls*, then the '
+ 'new\n'
+ ' instance\'s "__init__()" method will be invoked like\n'
+ ' "__init__(self[, ...])", where *self* is the new '
+ 'instance and the\n'
+ ' remaining arguments are the same as were passed to '
+ '"__new__()".\n'
+ '\n'
+ ' If "__new__()" does not return an instance of *cls*, '
+ 'then the new\n'
+ ' instance\'s "__init__()" method will not be invoked.\n'
+ '\n'
+ ' "__new__()" is intended mainly to allow subclasses of '
+ 'immutable\n'
+ ' types (like int, str, or tuple) to customize instance '
+ 'creation. It\n'
+ ' is also commonly overridden in custom metaclasses in '
+ 'order to\n'
+ ' customize class creation.\n'
+ '\n'
+ 'object.__init__(self[, ...])\n'
+ '\n'
+ ' Called after the instance has been created (by '
+ '"__new__()"), but\n'
+ ' before it is returned to the caller. The arguments are '
+ 'those\n'
+ ' passed to the class constructor expression. If a base '
+ 'class has an\n'
+ ' "__init__()" method, the derived class\'s "__init__()" '
+ 'method, if\n'
+ ' any, must explicitly call it to ensure proper '
+ 'initialization of the\n'
+ ' base class part of the instance; for example:\n'
+ ' "BaseClass.__init__(self, [args...])".\n'
+ '\n'
+ ' Because "__new__()" and "__init__()" work together in '
+ 'constructing\n'
+ ' objects ("__new__()" to create it, and "__init__()" to '
+ 'customise\n'
+ ' it), no non-"None" value may be returned by '
+ '"__init__()"; doing so\n'
+ ' will cause a "TypeError" to be raised at runtime.\n'
+ '\n'
+ 'object.__del__(self)\n'
+ '\n'
+ ' Called when the instance is about to be destroyed. This '
+ 'is also\n'
+ ' called a destructor. If a base class has a "__del__()" '
+ 'method, the\n'
+ ' derived class\'s "__del__()" method, if any, must '
+ 'explicitly call it\n'
+ ' to ensure proper deletion of the base class part of the '
+ 'instance.\n'
+ ' Note that it is possible (though not recommended!) for '
+ 'the\n'
+ ' "__del__()" method to postpone destruction of the '
+ 'instance by\n'
+ ' creating a new reference to it. It may then be called '
+ 'at a later\n'
+ ' time when this new reference is deleted. It is not '
+ 'guaranteed that\n'
+ ' "__del__()" methods are called for objects that still '
+ 'exist when\n'
+ ' the interpreter exits.\n'
+ '\n'
+ ' Note: "del x" doesn\'t directly call "x.__del__()" --- '
+ 'the former\n'
+ ' decrements the reference count for "x" by one, and the '
+ 'latter is\n'
+ ' only called when "x"\'s reference count reaches zero. '
+ 'Some common\n'
+ ' situations that may prevent the reference count of an '
+ 'object from\n'
+ ' going to zero include: circular references between '
+ 'objects (e.g.,\n'
+ ' a doubly-linked list or a tree data structure with '
+ 'parent and\n'
+ ' child pointers); a reference to the object on the '
+ 'stack frame of\n'
+ ' a function that caught an exception (the traceback '
+ 'stored in\n'
+ ' "sys.exc_traceback" keeps the stack frame alive); or a '
+ 'reference\n'
+ ' to the object on the stack frame that raised an '
+ 'unhandled\n'
+ ' exception in interactive mode (the traceback stored '
+ 'in\n'
+ ' "sys.last_traceback" keeps the stack frame alive). '
+ 'The first\n'
+ ' situation can only be remedied by explicitly breaking '
+ 'the cycles;\n'
+ ' the latter two situations can be resolved by storing '
+ '"None" in\n'
+ ' "sys.exc_traceback" or "sys.last_traceback". Circular '
+ 'references\n'
+ ' which are garbage are detected when the option cycle '
+ 'detector is\n'
+ " enabled (it's on by default), but can only be cleaned "
+ 'up if there\n'
+ ' are no Python-level "__del__()" methods involved. '
+ 'Refer to the\n'
+ ' documentation for the "gc" module for more information '
+ 'about how\n'
+ ' "__del__()" methods are handled by the cycle '
+ 'detector,\n'
+ ' particularly the description of the "garbage" value.\n'
+ '\n'
+ ' Warning: Due to the precarious circumstances under '
+ 'which\n'
+ ' "__del__()" methods are invoked, exceptions that occur '
+ 'during\n'
+ ' their execution are ignored, and a warning is printed '
+ 'to\n'
+ ' "sys.stderr" instead. Also, when "__del__()" is '
+ 'invoked in\n'
+ ' response to a module being deleted (e.g., when '
+ 'execution of the\n'
+ ' program is done), other globals referenced by the '
+ '"__del__()"\n'
+ ' method may already have been deleted or in the process '
+ 'of being\n'
+ ' torn down (e.g. the import machinery shutting down). '
+ 'For this\n'
+ ' reason, "__del__()" methods should do the absolute '
+ 'minimum needed\n'
+ ' to maintain external invariants. Starting with '
+ 'version 1.5,\n'
+ ' Python guarantees that globals whose name begins with '
+ 'a single\n'
+ ' underscore are deleted from their module before other '
+ 'globals are\n'
+ ' deleted; if no other references to such globals exist, '
+ 'this may\n'
+ ' help in assuring that imported modules are still '
+ 'available at the\n'
+ ' time when the "__del__()" method is called.\n'
+ '\n'
+ ' See also the "-R" command-line option.\n'
+ '\n'
+ 'object.__repr__(self)\n'
+ '\n'
+ ' Called by the "repr()" built-in function and by string '
+ 'conversions\n'
+ ' (reverse quotes) to compute the "official" string '
+ 'representation of\n'
+ ' an object. If at all possible, this should look like a '
+ 'valid\n'
+ ' Python expression that could be used to recreate an '
+ 'object with the\n'
+ ' same value (given an appropriate environment). If this '
+ 'is not\n'
+ ' possible, a string of the form "<...some useful '
+ 'description...>"\n'
+ ' should be returned. The return value must be a string '
+ 'object. If a\n'
+ ' class defines "__repr__()" but not "__str__()", then '
+ '"__repr__()"\n'
+ ' is also used when an "informal" string representation of '
+ 'instances\n'
+ ' of that class is required.\n'
+ '\n'
+ ' This is typically used for debugging, so it is important '
+ 'that the\n'
+ ' representation is information-rich and unambiguous.\n'
+ '\n'
+ 'object.__str__(self)\n'
+ '\n'
+ ' Called by the "str()" built-in function and by the '
+ '"print"\n'
+ ' statement to compute the "informal" string '
+ 'representation of an\n'
+ ' object. This differs from "__repr__()" in that it does '
+ 'not have to\n'
+ ' be a valid Python expression: a more convenient or '
+ 'concise\n'
+ ' representation may be used instead. The return value '
+ 'must be a\n'
+ ' string object.\n'
+ '\n'
+ 'object.__lt__(self, other)\n'
+ 'object.__le__(self, other)\n'
+ 'object.__eq__(self, other)\n'
+ 'object.__ne__(self, other)\n'
+ 'object.__gt__(self, other)\n'
+ 'object.__ge__(self, other)\n'
+ '\n'
+ ' New in version 2.1.\n'
+ '\n'
+ ' These are the so-called "rich comparison" methods, and '
+ 'are called\n'
+ ' for comparison operators in preference to "__cmp__()" '
+ 'below. The\n'
+ ' correspondence between operator symbols and method names '
+ 'is as\n'
+ ' follows: "x<y" calls "x.__lt__(y)", "x<=y" calls '
+ '"x.__le__(y)",\n'
+ ' "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call '
+ '"x.__ne__(y)",\n'
+ ' "x>y" calls "x.__gt__(y)", and "x>=y" calls '
+ '"x.__ge__(y)".\n'
+ '\n'
+ ' A rich comparison method may return the singleton '
+ '"NotImplemented"\n'
+ ' if it does not implement the operation for a given pair '
+ 'of\n'
+ ' arguments. By convention, "False" and "True" are '
+ 'returned for a\n'
+ ' successful comparison. However, these methods can return '
+ 'any value,\n'
+ ' so if the comparison operator is used in a Boolean '
+ 'context (e.g.,\n'
+ ' in the condition of an "if" statement), Python will call '
+ '"bool()"\n'
+ ' on the value to determine if the result is true or '
+ 'false.\n'
+ '\n'
+ ' There are no implied relationships among the comparison '
+ 'operators.\n'
+ ' The truth of "x==y" does not imply that "x!=y" is '
+ 'false.\n'
+ ' Accordingly, when defining "__eq__()", one should also '
+ 'define\n'
+ ' "__ne__()" so that the operators will behave as '
+ 'expected. See the\n'
+ ' paragraph on "__hash__()" for some important notes on '
+ 'creating\n'
+ ' *hashable* objects which support custom comparison '
+ 'operations and\n'
+ ' are usable as dictionary keys.\n'
+ '\n'
+ ' There are no swapped-argument versions of these methods '
+ '(to be used\n'
+ ' when the left argument does not support the operation '
+ 'but the right\n'
+ ' argument does); rather, "__lt__()" and "__gt__()" are '
+ "each other's\n"
+ ' reflection, "__le__()" and "__ge__()" are each other\'s '
+ 'reflection,\n'
+ ' and "__eq__()" and "__ne__()" are their own reflection.\n'
+ '\n'
+ ' Arguments to rich comparison methods are never coerced.\n'
+ '\n'
+ ' To automatically generate ordering operations from a '
+ 'single root\n'
+ ' operation, see "functools.total_ordering()".\n'
+ '\n'
+ 'object.__cmp__(self, other)\n'
+ '\n'
+ ' Called by comparison operations if rich comparison (see '
+ 'above) is\n'
+ ' not defined. Should return a negative integer if "self '
+ '< other",\n'
+ ' zero if "self == other", a positive integer if "self > '
+ 'other". If\n'
+ ' no "__cmp__()", "__eq__()" or "__ne__()" operation is '
+ 'defined,\n'
+ ' class instances are compared by object identity '
+ '("address"). See\n'
+ ' also the description of "__hash__()" for some important '
+ 'notes on\n'
+ ' creating *hashable* objects which support custom '
+ 'comparison\n'
+ ' operations and are usable as dictionary keys. (Note: '
+ 'the\n'
+ ' restriction that exceptions are not propagated by '
+ '"__cmp__()" has\n'
+ ' been removed since Python 1.5.)\n'
+ '\n'
+ 'object.__rcmp__(self, other)\n'
+ '\n'
+ ' Changed in version 2.1: No longer supported.\n'
+ '\n'
+ 'object.__hash__(self)\n'
+ '\n'
+ ' Called by built-in function "hash()" and for operations '
+ 'on members\n'
+ ' of hashed collections including "set", "frozenset", and '
+ '"dict".\n'
+ ' "__hash__()" should return an integer. The only '
+ 'required property\n'
+ ' is that objects which compare equal have the same hash '
+ 'value; it is\n'
+ ' advised to mix together the hash values of the '
+ 'components of the\n'
+ ' object that also play a part in comparison of objects by '
+ 'packing\n'
+ ' them into a tuple and hashing the tuple. Example:\n'
+ '\n'
+ ' def __hash__(self):\n'
+ ' return hash((self.name, self.nick, self.color))\n'
+ '\n'
+ ' If a class does not define a "__cmp__()" or "__eq__()" '
+ 'method it\n'
+ ' should not define a "__hash__()" operation either; if it '
+ 'defines\n'
+ ' "__cmp__()" or "__eq__()" but not "__hash__()", its '
+ 'instances will\n'
+ ' not be usable in hashed collections. If a class defines '
+ 'mutable\n'
+ ' objects and implements a "__cmp__()" or "__eq__()" '
+ 'method, it\n'
+ ' should not implement "__hash__()", since hashable '
+ 'collection\n'
+ " implementations require that an object's hash value is "
+ 'immutable\n'
+ " (if the object's hash value changes, it will be in the "
+ 'wrong hash\n'
+ ' bucket).\n'
+ '\n'
+ ' User-defined classes have "__cmp__()" and "__hash__()" '
+ 'methods by\n'
+ ' default; with them, all objects compare unequal (except '
+ 'with\n'
+ ' themselves) and "x.__hash__()" returns a result derived '
+ 'from\n'
+ ' "id(x)".\n'
+ '\n'
+ ' Classes which inherit a "__hash__()" method from a '
+ 'parent class but\n'
+ ' change the meaning of "__cmp__()" or "__eq__()" such '
+ 'that the hash\n'
+ ' value returned is no longer appropriate (e.g. by '
+ 'switching to a\n'
+ ' value-based concept of equality instead of the default '
+ 'identity\n'
+ ' based equality) can explicitly flag themselves as being '
+ 'unhashable\n'
+ ' by setting "__hash__ = None" in the class definition. '
+ 'Doing so\n'
+ ' means that not only will instances of the class raise '
+ 'an\n'
+ ' appropriate "TypeError" when a program attempts to '
+ 'retrieve their\n'
+ ' hash value, but they will also be correctly identified '
+ 'as\n'
+ ' unhashable when checking "isinstance(obj, '
+ 'collections.Hashable)"\n'
+ ' (unlike classes which define their own "__hash__()" to '
+ 'explicitly\n'
+ ' raise "TypeError").\n'
+ '\n'
+ ' Changed in version 2.5: "__hash__()" may now also return '
+ 'a long\n'
+ ' integer object; the 32-bit integer is then derived from '
+ 'the hash of\n'
+ ' that object.\n'
+ '\n'
+ ' Changed in version 2.6: "__hash__" may now be set to '
+ '"None" to\n'
+ ' explicitly flag instances of a class as unhashable.\n'
+ '\n'
+ 'object.__nonzero__(self)\n'
+ '\n'
+ ' Called to implement truth value testing and the built-in '
+ 'operation\n'
+ ' "bool()"; should return "False" or "True", or their '
+ 'integer\n'
+ ' equivalents "0" or "1". When this method is not '
+ 'defined,\n'
+ ' "__len__()" is called, if it is defined, and the object '
+ 'is\n'
+ ' considered true if its result is nonzero. If a class '
+ 'defines\n'
+ ' neither "__len__()" nor "__nonzero__()", all its '
+ 'instances are\n'
+ ' considered true.\n'
+ '\n'
+ 'object.__unicode__(self)\n'
+ '\n'
+ ' Called to implement "unicode()" built-in; should return '
+ 'a Unicode\n'
+ ' object. When this method is not defined, string '
+ 'conversion is\n'
+ ' attempted, and the result of string conversion is '
+ 'converted to\n'
+ ' Unicode using the system default encoding.\n',
+ 'debugger': '\n'
+ '"pdb" --- The Python Debugger\n'
+ '*****************************\n'
+ '\n'
+ '**Source code:** Lib/pdb.py\n'
+ '\n'
+ '======================================================================\n'
+ '\n'
+ 'The module "pdb" defines an interactive source code debugger '
+ 'for\n'
+ 'Python programs. It supports setting (conditional) breakpoints '
+ 'and\n'
+ 'single stepping at the source line level, inspection of stack '
+ 'frames,\n'
+ 'source code listing, and evaluation of arbitrary Python code in '
+ 'the\n'
+ 'context of any stack frame. It also supports post-mortem '
+ 'debugging\n'
+ 'and can be called under program control.\n'
+ '\n'
+ 'The debugger is extensible --- it is actually defined as the '
+ 'class\n'
+ '"Pdb". This is currently undocumented but easily understood by '
+ 'reading\n'
+ 'the source. The extension interface uses the modules "bdb" and '
+ '"cmd".\n'
+ '\n'
+ 'The debugger\'s prompt is "(Pdb)". Typical usage to run a '
+ 'program under\n'
+ 'control of the debugger is:\n'
+ '\n'
+ ' >>> import pdb\n'
+ ' >>> import mymodule\n'
+ " >>> pdb.run('mymodule.test()')\n"
+ ' > <string>(0)?()\n'
+ ' (Pdb) continue\n'
+ ' > <string>(1)?()\n'
+ ' (Pdb) continue\n'
+ " NameError: 'spam'\n"
+ ' > <string>(1)?()\n'
+ ' (Pdb)\n'
+ '\n'
+ '"pdb.py" can also be invoked as a script to debug other '
+ 'scripts. For\n'
+ 'example:\n'
+ '\n'
+ ' python -m pdb myscript.py\n'
+ '\n'
+ 'When invoked as a script, pdb will automatically enter '
+ 'post-mortem\n'
+ 'debugging if the program being debugged exits abnormally. After '
+ 'post-\n'
+ 'mortem debugging (or after normal exit of the program), pdb '
+ 'will\n'
+ "restart the program. Automatic restarting preserves pdb's state "
+ '(such\n'
+ 'as breakpoints) and in most cases is more useful than quitting '
+ 'the\n'
+ "debugger upon program's exit.\n"
+ '\n'
+ 'New in version 2.4: Restarting post-mortem behavior added.\n'
+ '\n'
+ 'The typical usage to break into the debugger from a running '
+ 'program is\n'
+ 'to insert\n'
+ '\n'
+ ' import pdb; pdb.set_trace()\n'
+ '\n'
+ 'at the location you want to break into the debugger. You can '
+ 'then\n'
+ 'step through the code following this statement, and continue '
+ 'running\n'
+ 'without the debugger using the "c" command.\n'
+ '\n'
+ 'The typical usage to inspect a crashed program is:\n'
+ '\n'
+ ' >>> import pdb\n'
+ ' >>> import mymodule\n'
+ ' >>> mymodule.test()\n'
+ ' Traceback (most recent call last):\n'
+ ' File "<stdin>", line 1, in <module>\n'
+ ' File "./mymodule.py", line 4, in test\n'
+ ' test2()\n'
+ ' File "./mymodule.py", line 3, in test2\n'
+ ' print spam\n'
+ ' NameError: spam\n'
+ ' >>> pdb.pm()\n'
+ ' > ./mymodule.py(3)test2()\n'
+ ' -> print spam\n'
+ ' (Pdb)\n'
+ '\n'
+ 'The module defines the following functions; each enters the '
+ 'debugger\n'
+ 'in a slightly different way:\n'
+ '\n'
+ 'pdb.run(statement[, globals[, locals]])\n'
+ '\n'
+ ' Execute the *statement* (given as a string) under debugger '
+ 'control.\n'
+ ' The debugger prompt appears before any code is executed; you '
+ 'can\n'
+ ' set breakpoints and type "continue", or you can step through '
+ 'the\n'
+ ' statement using "step" or "next" (all these commands are '
+ 'explained\n'
+ ' below). The optional *globals* and *locals* arguments '
+ 'specify the\n'
+ ' environment in which the code is executed; by default the\n'
+ ' dictionary of the module "__main__" is used. (See the '
+ 'explanation\n'
+ ' of the "exec" statement or the "eval()" built-in function.)\n'
+ '\n'
+ 'pdb.runeval(expression[, globals[, locals]])\n'
+ '\n'
+ ' Evaluate the *expression* (given as a string) under debugger\n'
+ ' control. When "runeval()" returns, it returns the value of '
+ 'the\n'
+ ' expression. Otherwise this function is similar to "run()".\n'
+ '\n'
+ 'pdb.runcall(function[, argument, ...])\n'
+ '\n'
+ ' Call the *function* (a function or method object, not a '
+ 'string)\n'
+ ' with the given arguments. When "runcall()" returns, it '
+ 'returns\n'
+ ' whatever the function call returned. The debugger prompt '
+ 'appears\n'
+ ' as soon as the function is entered.\n'
+ '\n'
+ 'pdb.set_trace()\n'
+ '\n'
+ ' Enter the debugger at the calling stack frame. This is '
+ 'useful to\n'
+ ' hard-code a breakpoint at a given point in a program, even if '
+ 'the\n'
+ ' code is not otherwise being debugged (e.g. when an assertion\n'
+ ' fails).\n'
+ '\n'
+ 'pdb.post_mortem([traceback])\n'
+ '\n'
+ ' Enter post-mortem debugging of the given *traceback* object. '
+ 'If no\n'
+ ' *traceback* is given, it uses the one of the exception that '
+ 'is\n'
+ ' currently being handled (an exception must be being handled '
+ 'if the\n'
+ ' default is to be used).\n'
+ '\n'
+ 'pdb.pm()\n'
+ '\n'
+ ' Enter post-mortem debugging of the traceback found in\n'
+ ' "sys.last_traceback".\n'
+ '\n'
+ 'The "run*" functions and "set_trace()" are aliases for '
+ 'instantiating\n'
+ 'the "Pdb" class and calling the method of the same name. If you '
+ 'want\n'
+ 'to access further features, you have to do this yourself:\n'
+ '\n'
+ "class pdb.Pdb(completekey='tab', stdin=None, stdout=None, "
+ 'skip=None)\n'
+ '\n'
+ ' "Pdb" is the debugger class.\n'
+ '\n'
+ ' The *completekey*, *stdin* and *stdout* arguments are passed '
+ 'to the\n'
+ ' underlying "cmd.Cmd" class; see the description there.\n'
+ '\n'
+ ' The *skip* argument, if given, must be an iterable of '
+ 'glob-style\n'
+ ' module name patterns. The debugger will not step into frames '
+ 'that\n'
+ ' originate in a module that matches one of these patterns. '
+ '[1]\n'
+ '\n'
+ ' Example call to enable tracing with *skip*:\n'
+ '\n'
+ " import pdb; pdb.Pdb(skip=['django.*']).set_trace()\n"
+ '\n'
+ ' New in version 2.7: The *skip* argument.\n'
+ '\n'
+ ' run(statement[, globals[, locals]])\n'
+ ' runeval(expression[, globals[, locals]])\n'
+ ' runcall(function[, argument, ...])\n'
+ ' set_trace()\n'
+ '\n'
+ ' See the documentation for the functions explained above.\n',
+ 'del': '\n'
+ 'The "del" statement\n'
+ '*******************\n'
+ '\n'
+ ' del_stmt ::= "del" target_list\n'
+ '\n'
+ 'Deletion is recursively defined very similar to the way assignment '
+ 'is\n'
+ 'defined. Rather than spelling it out in full details, here are some\n'
+ 'hints.\n'
+ '\n'
+ 'Deletion of a target list recursively deletes each target, from left\n'
+ 'to right.\n'
+ '\n'
+ 'Deletion of a name removes the binding of that name from the local '
+ 'or\n'
+ 'global namespace, depending on whether the name occurs in a "global"\n'
+ 'statement in the same code block. If the name is unbound, a\n'
+ '"NameError" exception will be raised.\n'
+ '\n'
+ 'It is illegal to delete a name from the local namespace if it occurs\n'
+ 'as a free variable in a nested block.\n'
+ '\n'
+ 'Deletion of attribute references, subscriptions and slicings is '
+ 'passed\n'
+ 'to the primary object involved; deletion of a slicing is in general\n'
+ 'equivalent to assignment of an empty slice of the right type (but '
+ 'even\n'
+ 'this is determined by the sliced object).\n',
+ 'dict': '\n'
+ 'Dictionary displays\n'
+ '*******************\n'
+ '\n'
+ 'A dictionary display is a possibly empty series of key/datum pairs\n'
+ 'enclosed in curly braces:\n'
+ '\n'
+ ' dict_display ::= "{" [key_datum_list | dict_comprehension] '
+ '"}"\n'
+ ' key_datum_list ::= key_datum ("," key_datum)* [","]\n'
+ ' key_datum ::= expression ":" expression\n'
+ ' dict_comprehension ::= expression ":" expression comp_for\n'
+ '\n'
+ 'A dictionary display yields a new dictionary object.\n'
+ '\n'
+ 'If a comma-separated sequence of key/datum pairs is given, they are\n'
+ 'evaluated from left to right to define the entries of the '
+ 'dictionary:\n'
+ 'each key object is used as a key into the dictionary to store the\n'
+ 'corresponding datum. This means that you can specify the same key\n'
+ "multiple times in the key/datum list, and the final dictionary's "
+ 'value\n'
+ 'for that key will be the last one given.\n'
+ '\n'
+ 'A dict comprehension, in contrast to list and set comprehensions,\n'
+ 'needs two expressions separated with a colon followed by the usual\n'
+ '"for" and "if" clauses. When the comprehension is run, the '
+ 'resulting\n'
+ 'key and value elements are inserted in the new dictionary in the '
+ 'order\n'
+ 'they are produced.\n'
+ '\n'
+ 'Restrictions on the types of the key values are listed earlier in\n'
+ 'section The standard type hierarchy. (To summarize, the key type\n'
+ 'should be *hashable*, which excludes all mutable objects.) Clashes\n'
+ 'between duplicate keys are not detected; the last datum (textually\n'
+ 'rightmost in the display) stored for a given key value prevails.\n',
+ 'dynamic-features': '\n'
+ 'Interaction with dynamic features\n'
+ '*********************************\n'
+ '\n'
+ 'There are several cases where Python statements are '
+ 'illegal when used\n'
+ 'in conjunction with nested scopes that contain free '
+ 'variables.\n'
+ '\n'
+ 'If a variable is referenced in an enclosing scope, it is '
+ 'illegal to\n'
+ 'delete the name. An error will be reported at compile '
+ 'time.\n'
+ '\n'
+ 'If the wild card form of import --- "import *" --- is '
+ 'used in a\n'
+ 'function and the function contains or is a nested block '
+ 'with free\n'
+ 'variables, the compiler will raise a "SyntaxError".\n'
+ '\n'
+ 'If "exec" is used in a function and the function '
+ 'contains or is a\n'
+ 'nested block with free variables, the compiler will '
+ 'raise a\n'
+ '"SyntaxError" unless the exec explicitly specifies the '
+ 'local namespace\n'
+ 'for the "exec". (In other words, "exec obj" would be '
+ 'illegal, but\n'
+ '"exec obj in ns" would be legal.)\n'
+ '\n'
+ 'The "eval()", "execfile()", and "input()" functions and '
+ 'the "exec"\n'
+ 'statement do not have access to the full environment for '
+ 'resolving\n'
+ 'names. Names may be resolved in the local and global '
+ 'namespaces of\n'
+ 'the caller. Free variables are not resolved in the '
+ 'nearest enclosing\n'
+ 'namespace, but in the global namespace. [1] The "exec" '
+ 'statement and\n'
+ 'the "eval()" and "execfile()" functions have optional '
+ 'arguments to\n'
+ 'override the global and local namespace. If only one '
+ 'namespace is\n'
+ 'specified, it is used for both.\n',
+ 'else': '\n'
+ 'The "if" statement\n'
+ '******************\n'
+ '\n'
+ 'The "if" statement is used for conditional execution:\n'
+ '\n'
+ ' if_stmt ::= "if" expression ":" suite\n'
+ ' ( "elif" expression ":" suite )*\n'
+ ' ["else" ":" suite]\n'
+ '\n'
+ 'It selects exactly one of the suites by evaluating the expressions '
+ 'one\n'
+ 'by one until one is found to be true (see section Boolean '
+ 'operations\n'
+ 'for the definition of true and false); then that suite is executed\n'
+ '(and no other part of the "if" statement is executed or evaluated).\n'
+ 'If all expressions are false, the suite of the "else" clause, if\n'
+ 'present, is executed.\n',
+ 'exceptions': '\n'
+ 'Exceptions\n'
+ '**********\n'
+ '\n'
+ 'Exceptions are a means of breaking out of the normal flow of '
+ 'control\n'
+ 'of a code block in order to handle errors or other '
+ 'exceptional\n'
+ 'conditions. An exception is *raised* at the point where the '
+ 'error is\n'
+ 'detected; it may be *handled* by the surrounding code block or '
+ 'by any\n'
+ 'code block that directly or indirectly invoked the code block '
+ 'where\n'
+ 'the error occurred.\n'
+ '\n'
+ 'The Python interpreter raises an exception when it detects a '
+ 'run-time\n'
+ 'error (such as division by zero). A Python program can also\n'
+ 'explicitly raise an exception with the "raise" statement. '
+ 'Exception\n'
+ 'handlers are specified with the "try" ... "except" statement. '
+ 'The\n'
+ '"finally" clause of such a statement can be used to specify '
+ 'cleanup\n'
+ 'code which does not handle the exception, but is executed '
+ 'whether an\n'
+ 'exception occurred or not in the preceding code.\n'
+ '\n'
+ 'Python uses the "termination" model of error handling: an '
+ 'exception\n'
+ 'handler can find out what happened and continue execution at '
+ 'an outer\n'
+ 'level, but it cannot repair the cause of the error and retry '
+ 'the\n'
+ 'failing operation (except by re-entering the offending piece '
+ 'of code\n'
+ 'from the top).\n'
+ '\n'
+ 'When an exception is not handled at all, the interpreter '
+ 'terminates\n'
+ 'execution of the program, or returns to its interactive main '
+ 'loop. In\n'
+ 'either case, it prints a stack backtrace, except when the '
+ 'exception is\n'
+ '"SystemExit".\n'
+ '\n'
+ 'Exceptions are identified by class instances. The "except" '
+ 'clause is\n'
+ 'selected depending on the class of the instance: it must '
+ 'reference the\n'
+ 'class of the instance or a base class thereof. The instance '
+ 'can be\n'
+ 'received by the handler and can carry additional information '
+ 'about the\n'
+ 'exceptional condition.\n'
+ '\n'
+ 'Exceptions can also be identified by strings, in which case '
+ 'the\n'
+ '"except" clause is selected by object identity. An arbitrary '
+ 'value\n'
+ 'can be raised along with the identifying string which can be '
+ 'passed to\n'
+ 'the handler.\n'
+ '\n'
+ 'Note: Messages to exceptions are not part of the Python API. '
+ 'Their\n'
+ ' contents may change from one version of Python to the next '
+ 'without\n'
+ ' warning and should not be relied on by code which will run '
+ 'under\n'
+ ' multiple versions of the interpreter.\n'
+ '\n'
+ 'See also the description of the "try" statement in section The '
+ 'try\n'
+ 'statement and "raise" statement in section The raise '
+ 'statement.\n'
+ '\n'
+ '-[ Footnotes ]-\n'
+ '\n'
+ '[1] This limitation occurs because the code that is executed '
+ 'by\n'
+ ' these operations is not available at the time the module '
+ 'is\n'
+ ' compiled.\n',
+ 'exec': '\n'
+ 'The "exec" statement\n'
+ '********************\n'
+ '\n'
+ ' exec_stmt ::= "exec" or_expr ["in" expression ["," expression]]\n'
+ '\n'
+ 'This statement supports dynamic execution of Python code. The '
+ 'first\n'
+ 'expression should evaluate to either a Unicode string, a *Latin-1*\n'
+ 'encoded string, an open file object, a code object, or a tuple. If '
+ 'it\n'
+ 'is a string, the string is parsed as a suite of Python statements\n'
+ 'which is then executed (unless a syntax error occurs). [1] If it is '
+ 'an\n'
+ 'open file, the file is parsed until EOF and executed. If it is a '
+ 'code\n'
+ 'object, it is simply executed. For the interpretation of a tuple, '
+ 'see\n'
+ "below. In all cases, the code that's executed is expected to be "
+ 'valid\n'
+ 'as file input (see section File input). Be aware that the "return"\n'
+ 'and "yield" statements may not be used outside of function '
+ 'definitions\n'
+ 'even within the context of code passed to the "exec" statement.\n'
+ '\n'
+ 'In all cases, if the optional parts are omitted, the code is '
+ 'executed\n'
+ 'in the current scope. If only the first expression after "in" is\n'
+ 'specified, it should be a dictionary, which will be used for both '
+ 'the\n'
+ 'global and the local variables. If two expressions are given, they\n'
+ 'are used for the global and local variables, respectively. If\n'
+ 'provided, *locals* can be any mapping object. Remember that at '
+ 'module\n'
+ 'level, globals and locals are the same dictionary. If two separate\n'
+ 'objects are given as *globals* and *locals*, the code will be '
+ 'executed\n'
+ 'as if it were embedded in a class definition.\n'
+ '\n'
+ 'The first expression may also be a tuple of length 2 or 3. In this\n'
+ 'case, the optional parts must be omitted. The form "exec(expr,\n'
+ 'globals)" is equivalent to "exec expr in globals", while the form\n'
+ '"exec(expr, globals, locals)" is equivalent to "exec expr in '
+ 'globals,\n'
+ 'locals". The tuple form of "exec" provides compatibility with '
+ 'Python\n'
+ '3, where "exec" is a function rather than a statement.\n'
+ '\n'
+ 'Changed in version 2.4: Formerly, *locals* was required to be a\n'
+ 'dictionary.\n'
+ '\n'
+ 'As a side effect, an implementation may insert additional keys into\n'
+ 'the dictionaries given besides those corresponding to variable '
+ 'names\n'
+ 'set by the executed code. For example, the current implementation '
+ 'may\n'
+ 'add a reference to the dictionary of the built-in module '
+ '"__builtin__"\n'
+ 'under the key "__builtins__" (!).\n'
+ '\n'
+ "**Programmer's hints:** dynamic evaluation of expressions is "
+ 'supported\n'
+ 'by the built-in function "eval()". The built-in functions '
+ '"globals()"\n'
+ 'and "locals()" return the current global and local dictionary,\n'
+ 'respectively, which may be useful to pass around for use by "exec".\n'
+ '\n'
+ '-[ Footnotes ]-\n'
+ '\n'
+ '[1] Note that the parser only accepts the Unix-style end of line\n'
+ ' convention. If you are reading the code from a file, make sure '
+ 'to\n'
+ ' use *universal newlines* mode to convert Windows or Mac-style\n'
+ ' newlines.\n',
+ 'execmodel': '\n'
+ 'Execution model\n'
+ '***************\n'
+ '\n'
+ '\n'
+ 'Naming and binding\n'
+ '==================\n'
+ '\n'
+ '*Names* refer to objects. Names are introduced by name '
+ 'binding\n'
+ 'operations. Each occurrence of a name in the program text '
+ 'refers to\n'
+ 'the *binding* of that name established in the innermost '
+ 'function block\n'
+ 'containing the use.\n'
+ '\n'
+ 'A *block* is a piece of Python program text that is executed as '
+ 'a\n'
+ 'unit. The following are blocks: a module, a function body, and '
+ 'a class\n'
+ 'definition. Each command typed interactively is a block. A '
+ 'script\n'
+ 'file (a file given as standard input to the interpreter or '
+ 'specified\n'
+ 'on the interpreter command line the first argument) is a code '
+ 'block.\n'
+ 'A script command (a command specified on the interpreter '
+ 'command line\n'
+ "with the '**-c**' option) is a code block. The file read by "
+ 'the\n'
+ 'built-in function "execfile()" is a code block. The string '
+ 'argument\n'
+ 'passed to the built-in function "eval()" and to the "exec" '
+ 'statement\n'
+ 'is a code block. The expression read and evaluated by the '
+ 'built-in\n'
+ 'function "input()" is a code block.\n'
+ '\n'
+ 'A code block is executed in an *execution frame*. A frame '
+ 'contains\n'
+ 'some administrative information (used for debugging) and '
+ 'determines\n'
+ "where and how execution continues after the code block's "
+ 'execution has\n'
+ 'completed.\n'
+ '\n'
+ 'A *scope* defines the visibility of a name within a block. If '
+ 'a local\n'
+ 'variable is defined in a block, its scope includes that block. '
+ 'If the\n'
+ 'definition occurs in a function block, the scope extends to any '
+ 'blocks\n'
+ 'contained within the defining one, unless a contained block '
+ 'introduces\n'
+ 'a different binding for the name. The scope of names defined '
+ 'in a\n'
+ 'class block is limited to the class block; it does not extend '
+ 'to the\n'
+ 'code blocks of methods -- this includes generator expressions '
+ 'since\n'
+ 'they are implemented using a function scope. This means that '
+ 'the\n'
+ 'following will fail:\n'
+ '\n'
+ ' class A:\n'
+ ' a = 42\n'
+ ' b = list(a + i for i in range(10))\n'
+ '\n'
+ 'When a name is used in a code block, it is resolved using the '
+ 'nearest\n'
+ 'enclosing scope. The set of all such scopes visible to a code '
+ 'block\n'
+ "is called the block's *environment*.\n"
+ '\n'
+ 'If a name is bound in a block, it is a local variable of that '
+ 'block.\n'
+ 'If a name is bound at the module level, it is a global '
+ 'variable. (The\n'
+ 'variables of the module code block are local and global.) If '
+ 'a\n'
+ 'variable is used in a code block but not defined there, it is a '
+ '*free\n'
+ 'variable*.\n'
+ '\n'
+ 'When a name is not found at all, a "NameError" exception is '
+ 'raised.\n'
+ 'If the name refers to a local variable that has not been bound, '
+ 'a\n'
+ '"UnboundLocalError" exception is raised. "UnboundLocalError" '
+ 'is a\n'
+ 'subclass of "NameError".\n'
+ '\n'
+ 'The following constructs bind names: formal parameters to '
+ 'functions,\n'
+ '"import" statements, class and function definitions (these bind '
+ 'the\n'
+ 'class or function name in the defining block), and targets that '
+ 'are\n'
+ 'identifiers if occurring in an assignment, "for" loop header, '
+ 'in the\n'
+ 'second position of an "except" clause header or after "as" in a '
+ '"with"\n'
+ 'statement. The "import" statement of the form "from ... import '
+ '*"\n'
+ 'binds all names defined in the imported module, except those '
+ 'beginning\n'
+ 'with an underscore. This form may only be used at the module '
+ 'level.\n'
+ '\n'
+ 'A target occurring in a "del" statement is also considered '
+ 'bound for\n'
+ 'this purpose (though the actual semantics are to unbind the '
+ 'name). It\n'
+ 'is illegal to unbind a name that is referenced by an enclosing '
+ 'scope;\n'
+ 'the compiler will report a "SyntaxError".\n'
+ '\n'
+ 'Each assignment or import statement occurs within a block '
+ 'defined by a\n'
+ 'class or function definition or at the module level (the '
+ 'top-level\n'
+ 'code block).\n'
+ '\n'
+ 'If a name binding operation occurs anywhere within a code '
+ 'block, all\n'
+ 'uses of the name within the block are treated as references to '
+ 'the\n'
+ 'current block. This can lead to errors when a name is used '
+ 'within a\n'
+ 'block before it is bound. This rule is subtle. Python lacks\n'
+ 'declarations and allows name binding operations to occur '
+ 'anywhere\n'
+ 'within a code block. The local variables of a code block can '
+ 'be\n'
+ 'determined by scanning the entire text of the block for name '
+ 'binding\n'
+ 'operations.\n'
+ '\n'
+ 'If the global statement occurs within a block, all uses of the '
+ 'name\n'
+ 'specified in the statement refer to the binding of that name in '
+ 'the\n'
+ 'top-level namespace. Names are resolved in the top-level '
+ 'namespace by\n'
+ 'searching the global namespace, i.e. the namespace of the '
+ 'module\n'
+ 'containing the code block, and the builtins namespace, the '
+ 'namespace\n'
+ 'of the module "__builtin__". The global namespace is searched '
+ 'first.\n'
+ 'If the name is not found there, the builtins namespace is '
+ 'searched.\n'
+ 'The global statement must precede all uses of the name.\n'
+ '\n'
+ 'The builtins namespace associated with the execution of a code '
+ 'block\n'
+ 'is actually found by looking up the name "__builtins__" in its '
+ 'global\n'
+ 'namespace; this should be a dictionary or a module (in the '
+ 'latter case\n'
+ "the module's dictionary is used). By default, when in the "
+ '"__main__"\n'
+ 'module, "__builtins__" is the built-in module "__builtin__" '
+ '(note: no\n'
+ '\'s\'); when in any other module, "__builtins__" is an alias '
+ 'for the\n'
+ 'dictionary of the "__builtin__" module itself. "__builtins__" '
+ 'can be\n'
+ 'set to a user-created dictionary to create a weak form of '
+ 'restricted\n'
+ 'execution.\n'
+ '\n'
+ '**CPython implementation detail:** Users should not touch\n'
+ '"__builtins__"; it is strictly an implementation detail. '
+ 'Users\n'
+ 'wanting to override values in the builtins namespace should '
+ '"import"\n'
+ 'the "__builtin__" (no \'s\') module and modify its attributes\n'
+ 'appropriately.\n'
+ '\n'
+ 'The namespace for a module is automatically created the first '
+ 'time a\n'
+ 'module is imported. The main module for a script is always '
+ 'called\n'
+ '"__main__".\n'
+ '\n'
+ 'The "global" statement has the same scope as a name binding '
+ 'operation\n'
+ 'in the same block. If the nearest enclosing scope for a free '
+ 'variable\n'
+ 'contains a global statement, the free variable is treated as a '
+ 'global.\n'
+ '\n'
+ 'A class definition is an executable statement that may use and '
+ 'define\n'
+ 'names. These references follow the normal rules for name '
+ 'resolution.\n'
+ 'The namespace of the class definition becomes the attribute '
+ 'dictionary\n'
+ 'of the class. Names defined at the class scope are not visible '
+ 'in\n'
+ 'methods.\n'
+ '\n'
+ '\n'
+ 'Interaction with dynamic features\n'
+ '---------------------------------\n'
+ '\n'
+ 'There are several cases where Python statements are illegal '
+ 'when used\n'
+ 'in conjunction with nested scopes that contain free variables.\n'
+ '\n'
+ 'If a variable is referenced in an enclosing scope, it is '
+ 'illegal to\n'
+ 'delete the name. An error will be reported at compile time.\n'
+ '\n'
+ 'If the wild card form of import --- "import *" --- is used in '
+ 'a\n'
+ 'function and the function contains or is a nested block with '
+ 'free\n'
+ 'variables, the compiler will raise a "SyntaxError".\n'
+ '\n'
+ 'If "exec" is used in a function and the function contains or is '
+ 'a\n'
+ 'nested block with free variables, the compiler will raise a\n'
+ '"SyntaxError" unless the exec explicitly specifies the local '
+ 'namespace\n'
+ 'for the "exec". (In other words, "exec obj" would be illegal, '
+ 'but\n'
+ '"exec obj in ns" would be legal.)\n'
+ '\n'
+ 'The "eval()", "execfile()", and "input()" functions and the '
+ '"exec"\n'
+ 'statement do not have access to the full environment for '
+ 'resolving\n'
+ 'names. Names may be resolved in the local and global '
+ 'namespaces of\n'
+ 'the caller. Free variables are not resolved in the nearest '
+ 'enclosing\n'
+ 'namespace, but in the global namespace. [1] The "exec" '
+ 'statement and\n'
+ 'the "eval()" and "execfile()" functions have optional arguments '
+ 'to\n'
+ 'override the global and local namespace. If only one namespace '
+ 'is\n'
+ 'specified, it is used for both.\n'
+ '\n'
+ '\n'
+ 'Exceptions\n'
+ '==========\n'
+ '\n'
+ 'Exceptions are a means of breaking out of the normal flow of '
+ 'control\n'
+ 'of a code block in order to handle errors or other exceptional\n'
+ 'conditions. An exception is *raised* at the point where the '
+ 'error is\n'
+ 'detected; it may be *handled* by the surrounding code block or '
+ 'by any\n'
+ 'code block that directly or indirectly invoked the code block '
+ 'where\n'
+ 'the error occurred.\n'
+ '\n'
+ 'The Python interpreter raises an exception when it detects a '
+ 'run-time\n'
+ 'error (such as division by zero). A Python program can also\n'
+ 'explicitly raise an exception with the "raise" statement. '
+ 'Exception\n'
+ 'handlers are specified with the "try" ... "except" statement. '
+ 'The\n'
+ '"finally" clause of such a statement can be used to specify '
+ 'cleanup\n'
+ 'code which does not handle the exception, but is executed '
+ 'whether an\n'
+ 'exception occurred or not in the preceding code.\n'
+ '\n'
+ 'Python uses the "termination" model of error handling: an '
+ 'exception\n'
+ 'handler can find out what happened and continue execution at an '
+ 'outer\n'
+ 'level, but it cannot repair the cause of the error and retry '
+ 'the\n'
+ 'failing operation (except by re-entering the offending piece of '
+ 'code\n'
+ 'from the top).\n'
+ '\n'
+ 'When an exception is not handled at all, the interpreter '
+ 'terminates\n'
+ 'execution of the program, or returns to its interactive main '
+ 'loop. In\n'
+ 'either case, it prints a stack backtrace, except when the '
+ 'exception is\n'
+ '"SystemExit".\n'
+ '\n'
+ 'Exceptions are identified by class instances. The "except" '
+ 'clause is\n'
+ 'selected depending on the class of the instance: it must '
+ 'reference the\n'
+ 'class of the instance or a base class thereof. The instance '
+ 'can be\n'
+ 'received by the handler and can carry additional information '
+ 'about the\n'
+ 'exceptional condition.\n'
+ '\n'
+ 'Exceptions can also be identified by strings, in which case '
+ 'the\n'
+ '"except" clause is selected by object identity. An arbitrary '
+ 'value\n'
+ 'can be raised along with the identifying string which can be '
+ 'passed to\n'
+ 'the handler.\n'
+ '\n'
+ 'Note: Messages to exceptions are not part of the Python API. '
+ 'Their\n'
+ ' contents may change from one version of Python to the next '
+ 'without\n'
+ ' warning and should not be relied on by code which will run '
+ 'under\n'
+ ' multiple versions of the interpreter.\n'
+ '\n'
+ 'See also the description of the "try" statement in section The '
+ 'try\n'
+ 'statement and "raise" statement in section The raise '
+ 'statement.\n'
+ '\n'
+ '-[ Footnotes ]-\n'
+ '\n'
+ '[1] This limitation occurs because the code that is executed '
+ 'by\n'
+ ' these operations is not available at the time the module '
+ 'is\n'
+ ' compiled.\n',
+ 'exprlists': '\n'
+ 'Expression lists\n'
+ '****************\n'
+ '\n'
+ ' expression_list ::= expression ( "," expression )* [","]\n'
+ '\n'
+ 'An expression list containing at least one comma yields a '
+ 'tuple. The\n'
+ 'length of the tuple is the number of expressions in the list. '
+ 'The\n'
+ 'expressions are evaluated from left to right.\n'
+ '\n'
+ 'The trailing comma is required only to create a single tuple '
+ '(a.k.a. a\n'
+ '*singleton*); it is optional in all other cases. A single '
+ 'expression\n'
+ "without a trailing comma doesn't create a tuple, but rather "
+ 'yields the\n'
+ 'value of that expression. (To create an empty tuple, use an '
+ 'empty pair\n'
+ 'of parentheses: "()".)\n',
+ 'floating': '\n'
+ 'Floating point literals\n'
+ '***********************\n'
+ '\n'
+ 'Floating point literals are described by the following lexical\n'
+ 'definitions:\n'
+ '\n'
+ ' floatnumber ::= pointfloat | exponentfloat\n'
+ ' pointfloat ::= [intpart] fraction | intpart "."\n'
+ ' exponentfloat ::= (intpart | pointfloat) exponent\n'
+ ' intpart ::= digit+\n'
+ ' fraction ::= "." digit+\n'
+ ' exponent ::= ("e" | "E") ["+" | "-"] digit+\n'
+ '\n'
+ 'Note that the integer and exponent parts of floating point '
+ 'numbers can\n'
+ 'look like octal integers, but are interpreted using radix 10. '
+ 'For\n'
+ 'example, "077e010" is legal, and denotes the same number as '
+ '"77e10".\n'
+ 'The allowed range of floating point literals is implementation-\n'
+ 'dependent. Some examples of floating point literals:\n'
+ '\n'
+ ' 3.14 10. .001 1e100 3.14e-10 0e0\n'
+ '\n'
+ 'Note that numeric literals do not include a sign; a phrase like '
+ '"-1"\n'
+ 'is actually an expression composed of the unary operator "-" and '
+ 'the\n'
+ 'literal "1".\n',
+ 'for': '\n'
+ 'The "for" statement\n'
+ '*******************\n'
+ '\n'
+ 'The "for" statement is used to iterate over the elements of a '
+ 'sequence\n'
+ '(such as a string, tuple or list) or other iterable object:\n'
+ '\n'
+ ' for_stmt ::= "for" target_list "in" expression_list ":" suite\n'
+ ' ["else" ":" suite]\n'
+ '\n'
+ 'The expression list is evaluated once; it should yield an iterable\n'
+ 'object. An iterator is created for the result of the\n'
+ '"expression_list". The suite is then executed once for each item\n'
+ 'provided by the iterator, in the order of ascending indices. Each\n'
+ 'item in turn is assigned to the target list using the standard rules\n'
+ 'for assignments, and then the suite is executed. When the items are\n'
+ 'exhausted (which is immediately when the sequence is empty), the '
+ 'suite\n'
+ 'in the "else" clause, if present, is executed, and the loop\n'
+ 'terminates.\n'
+ '\n'
+ 'A "break" statement executed in the first suite terminates the loop\n'
+ 'without executing the "else" clause\'s suite. A "continue" '
+ 'statement\n'
+ 'executed in the first suite skips the rest of the suite and '
+ 'continues\n'
+ 'with the next item, or with the "else" clause if there was no next\n'
+ 'item.\n'
+ '\n'
+ 'The suite may assign to the variable(s) in the target list; this '
+ 'does\n'
+ 'not affect the next item assigned to it.\n'
+ '\n'
+ 'The target list is not deleted when the loop is finished, but if the\n'
+ 'sequence is empty, it will not have been assigned to at all by the\n'
+ 'loop. Hint: the built-in function "range()" returns a sequence of\n'
+ 'integers suitable to emulate the effect of Pascal\'s "for i := a to '
+ 'b\n'
+ 'do"; e.g., "range(3)" returns the list "[0, 1, 2]".\n'
+ '\n'
+ 'Note: There is a subtlety when the sequence is being modified by the\n'
+ ' loop (this can only occur for mutable sequences, i.e. lists). An\n'
+ ' internal counter is used to keep track of which item is used next,\n'
+ ' and this is incremented on each iteration. When this counter has\n'
+ ' reached the length of the sequence the loop terminates. This '
+ 'means\n'
+ ' that if the suite deletes the current (or a previous) item from '
+ 'the\n'
+ ' sequence, the next item will be skipped (since it gets the index '
+ 'of\n'
+ ' the current item which has already been treated). Likewise, if '
+ 'the\n'
+ ' suite inserts an item in the sequence before the current item, the\n'
+ ' current item will be treated again the next time through the loop.\n'
+ ' This can lead to nasty bugs that can be avoided by making a\n'
+ ' temporary copy using a slice of the whole sequence, e.g.,\n'
+ '\n'
+ ' for x in a[:]:\n'
+ ' if x < 0: a.remove(x)\n',
+ 'formatstrings': '\n'
+ 'Format String Syntax\n'
+ '********************\n'
+ '\n'
+ 'The "str.format()" method and the "Formatter" class share '
+ 'the same\n'
+ 'syntax for format strings (although in the case of '
+ '"Formatter",\n'
+ 'subclasses can define their own format string syntax).\n'
+ '\n'
+ 'Format strings contain "replacement fields" surrounded by '
+ 'curly braces\n'
+ '"{}". Anything that is not contained in braces is '
+ 'considered literal\n'
+ 'text, which is copied unchanged to the output. If you need '
+ 'to include\n'
+ 'a brace character in the literal text, it can be escaped by '
+ 'doubling:\n'
+ '"{{" and "}}".\n'
+ '\n'
+ 'The grammar for a replacement field is as follows:\n'
+ '\n'
+ ' replacement_field ::= "{" [field_name] ["!" '
+ 'conversion] [":" format_spec] "}"\n'
+ ' field_name ::= arg_name ("." attribute_name | '
+ '"[" element_index "]")*\n'
+ ' arg_name ::= [identifier | integer]\n'
+ ' attribute_name ::= identifier\n'
+ ' element_index ::= integer | index_string\n'
+ ' index_string ::= <any source character except '
+ '"]"> +\n'
+ ' conversion ::= "r" | "s"\n'
+ ' format_spec ::= <described in the next '
+ 'section>\n'
+ '\n'
+ 'In less formal terms, the replacement field can start with '
+ 'a\n'
+ '*field_name* that specifies the object whose value is to be '
+ 'formatted\n'
+ 'and inserted into the output instead of the replacement '
+ 'field. The\n'
+ '*field_name* is optionally followed by a *conversion* '
+ 'field, which is\n'
+ 'preceded by an exclamation point "\'!\'", and a '
+ '*format_spec*, which is\n'
+ 'preceded by a colon "\':\'". These specify a non-default '
+ 'format for the\n'
+ 'replacement value.\n'
+ '\n'
+ 'See also the Format Specification Mini-Language section.\n'
+ '\n'
+ 'The *field_name* itself begins with an *arg_name* that is '
+ 'either a\n'
+ "number or a keyword. If it's a number, it refers to a "
+ 'positional\n'
+ "argument, and if it's a keyword, it refers to a named "
+ 'keyword\n'
+ 'argument. If the numerical arg_names in a format string '
+ 'are 0, 1, 2,\n'
+ '... in sequence, they can all be omitted (not just some) '
+ 'and the\n'
+ 'numbers 0, 1, 2, ... will be automatically inserted in that '
+ 'order.\n'
+ 'Because *arg_name* is not quote-delimited, it is not '
+ 'possible to\n'
+ 'specify arbitrary dictionary keys (e.g., the strings '
+ '"\'10\'" or\n'
+ '"\':-]\'") within a format string. The *arg_name* can be '
+ 'followed by any\n'
+ 'number of index or attribute expressions. An expression of '
+ 'the form\n'
+ '"\'.name\'" selects the named attribute using "getattr()", '
+ 'while an\n'
+ 'expression of the form "\'[index]\'" does an index lookup '
+ 'using\n'
+ '"__getitem__()".\n'
+ '\n'
+ 'Changed in version 2.7: The positional argument specifiers '
+ 'can be\n'
+ 'omitted, so "\'{} {}\'" is equivalent to "\'{0} {1}\'".\n'
+ '\n'
+ 'Some simple format string examples:\n'
+ '\n'
+ ' "First, thou shalt count to {0}" # References first '
+ 'positional argument\n'
+ ' "Bring me a {}" # Implicitly '
+ 'references the first positional argument\n'
+ ' "From {} to {}" # Same as "From {0} to '
+ '{1}"\n'
+ ' "My quest is {name}" # References keyword '
+ "argument 'name'\n"
+ ' "Weight in tons {0.weight}" # \'weight\' attribute '
+ 'of first positional arg\n'
+ ' "Units destroyed: {players[0]}" # First element of '
+ "keyword argument 'players'.\n"
+ '\n'
+ 'The *conversion* field causes a type coercion before '
+ 'formatting.\n'
+ 'Normally, the job of formatting a value is done by the '
+ '"__format__()"\n'
+ 'method of the value itself. However, in some cases it is '
+ 'desirable to\n'
+ 'force a type to be formatted as a string, overriding its '
+ 'own\n'
+ 'definition of formatting. By converting the value to a '
+ 'string before\n'
+ 'calling "__format__()", the normal formatting logic is '
+ 'bypassed.\n'
+ '\n'
+ 'Two conversion flags are currently supported: "\'!s\'" '
+ 'which calls\n'
+ '"str()" on the value, and "\'!r\'" which calls "repr()".\n'
+ '\n'
+ 'Some examples:\n'
+ '\n'
+ ' "Harold\'s a clever {0!s}" # Calls str() on the '
+ 'argument first\n'
+ ' "Bring out the holy {name!r}" # Calls repr() on the '
+ 'argument first\n'
+ '\n'
+ 'The *format_spec* field contains a specification of how the '
+ 'value\n'
+ 'should be presented, including such details as field width, '
+ 'alignment,\n'
+ 'padding, decimal precision and so on. Each value type can '
+ 'define its\n'
+ 'own "formatting mini-language" or interpretation of the '
+ '*format_spec*.\n'
+ '\n'
+ 'Most built-in types support a common formatting '
+ 'mini-language, which\n'
+ 'is described in the next section.\n'
+ '\n'
+ 'A *format_spec* field can also include nested replacement '
+ 'fields\n'
+ 'within it. These nested replacement fields may contain a '
+ 'field name,\n'
+ 'conversion flag and format specification, but deeper '
+ 'nesting is not\n'
+ 'allowed. The replacement fields within the format_spec '
+ 'are\n'
+ 'substituted before the *format_spec* string is interpreted. '
+ 'This\n'
+ 'allows the formatting of a value to be dynamically '
+ 'specified.\n'
+ '\n'
+ 'See the Format examples section for some examples.\n'
+ '\n'
+ '\n'
+ 'Format Specification Mini-Language\n'
+ '==================================\n'
+ '\n'
+ '"Format specifications" are used within replacement fields '
+ 'contained\n'
+ 'within a format string to define how individual values are '
+ 'presented\n'
+ '(see Format String Syntax). They can also be passed '
+ 'directly to the\n'
+ 'built-in "format()" function. Each formattable type may '
+ 'define how\n'
+ 'the format specification is to be interpreted.\n'
+ '\n'
+ 'Most built-in types implement the following options for '
+ 'format\n'
+ 'specifications, although some of the formatting options are '
+ 'only\n'
+ 'supported by the numeric types.\n'
+ '\n'
+ 'A general convention is that an empty format string ("""") '
+ 'produces\n'
+ 'the same result as if you had called "str()" on the value. '
+ 'A non-empty\n'
+ 'format string typically modifies the result.\n'
+ '\n'
+ 'The general form of a *standard format specifier* is:\n'
+ '\n'
+ ' format_spec ::= '
+ '[[fill]align][sign][#][0][width][,][.precision][type]\n'
+ ' fill ::= <any character>\n'
+ ' align ::= "<" | ">" | "=" | "^"\n'
+ ' sign ::= "+" | "-" | " "\n'
+ ' width ::= integer\n'
+ ' precision ::= integer\n'
+ ' type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" '
+ '| "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n'
+ '\n'
+ 'If a valid *align* value is specified, it can be preceded '
+ 'by a *fill*\n'
+ 'character that can be any character and defaults to a space '
+ 'if\n'
+ 'omitted. It is not possible to use a literal curly brace '
+ '(""{"" or\n'
+ '""}"") as the *fill* character when using the '
+ '"str.format()" method.\n'
+ 'However, it is possible to insert a curly brace with a '
+ 'nested\n'
+ "replacement field. This limitation doesn't affect the "
+ '"format()"\n'
+ 'function.\n'
+ '\n'
+ 'The meaning of the various alignment options is as '
+ 'follows:\n'
+ '\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | Option | '
+ 'Meaning '
+ '|\n'
+ ' '
+ '+===========+============================================================+\n'
+ ' | "\'<\'" | Forces the field to be left-aligned '
+ 'within the available |\n'
+ ' | | space (this is the default for most '
+ 'objects). |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'>\'" | Forces the field to be right-aligned '
+ 'within the available |\n'
+ ' | | space (this is the default for '
+ 'numbers). |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'=\'" | Forces the padding to be placed after '
+ 'the sign (if any) |\n'
+ ' | | but before the digits. This is used for '
+ 'printing fields |\n'
+ " | | in the form '+000000120'. This alignment "
+ 'option is only |\n'
+ ' | | valid for numeric types. It becomes the '
+ "default when '0' |\n"
+ ' | | immediately precedes the field '
+ 'width. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'^\'" | Forces the field to be centered within '
+ 'the available |\n'
+ ' | | '
+ 'space. '
+ '|\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ '\n'
+ 'Note that unless a minimum field width is defined, the '
+ 'field width\n'
+ 'will always be the same size as the data to fill it, so '
+ 'that the\n'
+ 'alignment option has no meaning in this case.\n'
+ '\n'
+ 'The *sign* option is only valid for number types, and can '
+ 'be one of\n'
+ 'the following:\n'
+ '\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | Option | '
+ 'Meaning '
+ '|\n'
+ ' '
+ '+===========+============================================================+\n'
+ ' | "\'+\'" | indicates that a sign should be used for '
+ 'both positive as |\n'
+ ' | | well as negative '
+ 'numbers. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'-\'" | indicates that a sign should be used '
+ 'only for negative |\n'
+ ' | | numbers (this is the default '
+ 'behavior). |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | space | indicates that a leading space should be '
+ 'used on positive |\n'
+ ' | | numbers, and a minus sign on negative '
+ 'numbers. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ '\n'
+ 'The "\'#\'" option is only valid for integers, and only for '
+ 'binary,\n'
+ 'octal, or hexadecimal output. If present, it specifies '
+ 'that the\n'
+ 'output will be prefixed by "\'0b\'", "\'0o\'", or "\'0x\'", '
+ 'respectively.\n'
+ '\n'
+ 'The "\',\'" option signals the use of a comma for a '
+ 'thousands separator.\n'
+ 'For a locale aware separator, use the "\'n\'" integer '
+ 'presentation type\n'
+ 'instead.\n'
+ '\n'
+ 'Changed in version 2.7: Added the "\',\'" option (see also '
+ '**PEP 378**).\n'
+ '\n'
+ '*width* is a decimal integer defining the minimum field '
+ 'width. If not\n'
+ 'specified, then the field width will be determined by the '
+ 'content.\n'
+ '\n'
+ 'When no explicit alignment is given, preceding the *width* '
+ 'field by a\n'
+ 'zero ("\'0\'") character enables sign-aware zero-padding '
+ 'for numeric\n'
+ 'types. This is equivalent to a *fill* character of "\'0\'" '
+ 'with an\n'
+ '*alignment* type of "\'=\'".\n'
+ '\n'
+ 'The *precision* is a decimal number indicating how many '
+ 'digits should\n'
+ 'be displayed after the decimal point for a floating point '
+ 'value\n'
+ 'formatted with "\'f\'" and "\'F\'", or before and after the '
+ 'decimal point\n'
+ 'for a floating point value formatted with "\'g\'" or '
+ '"\'G\'". For non-\n'
+ 'number types the field indicates the maximum field size - '
+ 'in other\n'
+ 'words, how many characters will be used from the field '
+ 'content. The\n'
+ '*precision* is not allowed for integer values.\n'
+ '\n'
+ 'Finally, the *type* determines how the data should be '
+ 'presented.\n'
+ '\n'
+ 'The available string presentation types are:\n'
+ '\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | Type | '
+ 'Meaning '
+ '|\n'
+ ' '
+ '+===========+============================================================+\n'
+ ' | "\'s\'" | String format. This is the default type '
+ 'for strings and |\n'
+ ' | | may be '
+ 'omitted. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | None | The same as '
+ '"\'s\'". |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ '\n'
+ 'The available integer presentation types are:\n'
+ '\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | Type | '
+ 'Meaning '
+ '|\n'
+ ' '
+ '+===========+============================================================+\n'
+ ' | "\'b\'" | Binary format. Outputs the number in '
+ 'base 2. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'c\'" | Character. Converts the integer to the '
+ 'corresponding |\n'
+ ' | | unicode character before '
+ 'printing. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'d\'" | Decimal Integer. Outputs the number in '
+ 'base 10. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'o\'" | Octal format. Outputs the number in base '
+ '8. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'x\'" | Hex format. Outputs the number in base '
+ '16, using lower- |\n'
+ ' | | case letters for the digits above '
+ '9. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'X\'" | Hex format. Outputs the number in base '
+ '16, using upper- |\n'
+ ' | | case letters for the digits above '
+ '9. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'n\'" | Number. This is the same as "\'d\'", '
+ 'except that it uses the |\n'
+ ' | | current locale setting to insert the '
+ 'appropriate number |\n'
+ ' | | separator '
+ 'characters. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | None | The same as '
+ '"\'d\'". |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ '\n'
+ 'In addition to the above presentation types, integers can '
+ 'be formatted\n'
+ 'with the floating point presentation types listed below '
+ '(except "\'n\'"\n'
+ 'and "None"). When doing so, "float()" is used to convert '
+ 'the integer\n'
+ 'to a floating point number before formatting.\n'
+ '\n'
+ 'The available presentation types for floating point and '
+ 'decimal values\n'
+ 'are:\n'
+ '\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | Type | '
+ 'Meaning '
+ '|\n'
+ ' '
+ '+===========+============================================================+\n'
+ ' | "\'e\'" | Exponent notation. Prints the number in '
+ 'scientific |\n'
+ " | | notation using the letter 'e' to indicate "
+ 'the exponent. |\n'
+ ' | | The default precision is '
+ '"6". |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'E\'" | Exponent notation. Same as "\'e\'" '
+ 'except it uses an upper |\n'
+ " | | case 'E' as the separator "
+ 'character. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'f\'" | Fixed point. Displays the number as a '
+ 'fixed-point number. |\n'
+ ' | | The default precision is '
+ '"6". |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'F\'" | Fixed point. Same as '
+ '"\'f\'". |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'g\'" | General format. For a given precision '
+ '"p >= 1", this |\n'
+ ' | | rounds the number to "p" significant '
+ 'digits and then |\n'
+ ' | | formats the result in either fixed-point '
+ 'format or in |\n'
+ ' | | scientific notation, depending on its '
+ 'magnitude. The |\n'
+ ' | | precise rules are as follows: suppose that '
+ 'the result |\n'
+ ' | | formatted with presentation type "\'e\'" '
+ 'and precision "p-1" |\n'
+ ' | | would have exponent "exp". Then if "-4 <= '
+ 'exp < p", the |\n'
+ ' | | number is formatted with presentation type '
+ '"\'f\'" and |\n'
+ ' | | precision "p-1-exp". Otherwise, the '
+ 'number is formatted |\n'
+ ' | | with presentation type "\'e\'" and '
+ 'precision "p-1". In both |\n'
+ ' | | cases insignificant trailing zeros are '
+ 'removed from the |\n'
+ ' | | significand, and the decimal point is also '
+ 'removed if |\n'
+ ' | | there are no remaining digits following '
+ 'it. Positive and |\n'
+ ' | | negative infinity, positive and negative '
+ 'zero, and nans, |\n'
+ ' | | are formatted as "inf", "-inf", "0", "-0" '
+ 'and "nan" |\n'
+ ' | | respectively, regardless of the '
+ 'precision. A precision of |\n'
+ ' | | "0" is treated as equivalent to a '
+ 'precision of "1". The |\n'
+ ' | | default precision is '
+ '"6". |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'G\'" | General format. Same as "\'g\'" except '
+ 'switches to "\'E\'" if |\n'
+ ' | | the number gets too large. The '
+ 'representations of infinity |\n'
+ ' | | and NaN are uppercased, '
+ 'too. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'n\'" | Number. This is the same as "\'g\'", '
+ 'except that it uses the |\n'
+ ' | | current locale setting to insert the '
+ 'appropriate number |\n'
+ ' | | separator '
+ 'characters. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | "\'%\'" | Percentage. Multiplies the number by 100 '
+ 'and displays in |\n'
+ ' | | fixed ("\'f\'") format, followed by a '
+ 'percent sign. |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ ' | None | The same as '
+ '"\'g\'". |\n'
+ ' '
+ '+-----------+------------------------------------------------------------+\n'
+ '\n'
+ '\n'
+ 'Format examples\n'
+ '===============\n'
+ '\n'
+ 'This section contains examples of the "str.format()" syntax '
+ 'and\n'
+ 'comparison with the old "%"-formatting.\n'
+ '\n'
+ 'In most of the cases the syntax is similar to the old '
+ '"%"-formatting,\n'
+ 'with the addition of the "{}" and with ":" used instead of '
+ '"%". For\n'
+ 'example, "\'%03.2f\'" can be translated to "\'{:03.2f}\'".\n'
+ '\n'
+ 'The new format syntax also supports new and different '
+ 'options, shown\n'
+ 'in the follow examples.\n'
+ '\n'
+ 'Accessing arguments by position:\n'
+ '\n'
+ " >>> '{0}, {1}, {2}'.format('a', 'b', 'c')\n"
+ " 'a, b, c'\n"
+ " >>> '{}, {}, {}'.format('a', 'b', 'c') # 2.7+ only\n"
+ " 'a, b, c'\n"
+ " >>> '{2}, {1}, {0}'.format('a', 'b', 'c')\n"
+ " 'c, b, a'\n"
+ " >>> '{2}, {1}, {0}'.format(*'abc') # unpacking "
+ 'argument sequence\n'
+ " 'c, b, a'\n"
+ " >>> '{0}{1}{0}'.format('abra', 'cad') # arguments' "
+ 'indices can be repeated\n'
+ " 'abracadabra'\n"
+ '\n'
+ 'Accessing arguments by name:\n'
+ '\n'
+ " >>> 'Coordinates: {latitude}, "
+ "{longitude}'.format(latitude='37.24N', "
+ "longitude='-115.81W')\n"
+ " 'Coordinates: 37.24N, -115.81W'\n"
+ " >>> coord = {'latitude': '37.24N', 'longitude': "
+ "'-115.81W'}\n"
+ " >>> 'Coordinates: {latitude}, "
+ "{longitude}'.format(**coord)\n"
+ " 'Coordinates: 37.24N, -115.81W'\n"
+ '\n'
+ "Accessing arguments' attributes:\n"
+ '\n'
+ ' >>> c = 3-5j\n'
+ " >>> ('The complex number {0} is formed from the real "
+ "part {0.real} '\n"
+ " ... 'and the imaginary part {0.imag}.').format(c)\n"
+ " 'The complex number (3-5j) is formed from the real part "
+ "3.0 and the imaginary part -5.0.'\n"
+ ' >>> class Point(object):\n'
+ ' ... def __init__(self, x, y):\n'
+ ' ... self.x, self.y = x, y\n'
+ ' ... def __str__(self):\n'
+ " ... return 'Point({self.x}, "
+ "{self.y})'.format(self=self)\n"
+ ' ...\n'
+ ' >>> str(Point(4, 2))\n'
+ " 'Point(4, 2)'\n"
+ '\n'
+ "Accessing arguments' items:\n"
+ '\n'
+ ' >>> coord = (3, 5)\n'
+ " >>> 'X: {0[0]}; Y: {0[1]}'.format(coord)\n"
+ " 'X: 3; Y: 5'\n"
+ '\n'
+ 'Replacing "%s" and "%r":\n'
+ '\n'
+ ' >>> "repr() shows quotes: {!r}; str() doesn\'t: '
+ '{!s}".format(\'test1\', \'test2\')\n'
+ ' "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n'
+ '\n'
+ 'Aligning the text and specifying a width:\n'
+ '\n'
+ " >>> '{:<30}'.format('left aligned')\n"
+ " 'left aligned '\n"
+ " >>> '{:>30}'.format('right aligned')\n"
+ " ' right aligned'\n"
+ " >>> '{:^30}'.format('centered')\n"
+ " ' centered '\n"
+ " >>> '{:*^30}'.format('centered') # use '*' as a fill "
+ 'char\n'
+ " '***********centered***********'\n"
+ '\n'
+ 'Replacing "%+f", "%-f", and "% f" and specifying a sign:\n'
+ '\n'
+ " >>> '{:+f}; {:+f}'.format(3.14, -3.14) # show it "
+ 'always\n'
+ " '+3.140000; -3.140000'\n"
+ " >>> '{: f}; {: f}'.format(3.14, -3.14) # show a space "
+ 'for positive numbers\n'
+ " ' 3.140000; -3.140000'\n"
+ " >>> '{:-f}; {:-f}'.format(3.14, -3.14) # show only the "
+ "minus -- same as '{:f}; {:f}'\n"
+ " '3.140000; -3.140000'\n"
+ '\n'
+ 'Replacing "%x" and "%o" and converting the value to '
+ 'different bases:\n'
+ '\n'
+ ' >>> # format also supports binary numbers\n'
+ ' >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: '
+ '{0:b}".format(42)\n'
+ " 'int: 42; hex: 2a; oct: 52; bin: 101010'\n"
+ ' >>> # with 0x, 0o, or 0b as prefix:\n'
+ ' >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: '
+ '{0:#b}".format(42)\n'
+ " 'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010'\n"
+ '\n'
+ 'Using the comma as a thousands separator:\n'
+ '\n'
+ " >>> '{:,}'.format(1234567890)\n"
+ " '1,234,567,890'\n"
+ '\n'
+ 'Expressing a percentage:\n'
+ '\n'
+ ' >>> points = 19.5\n'
+ ' >>> total = 22\n'
+ " >>> 'Correct answers: {:.2%}'.format(points/total)\n"
+ " 'Correct answers: 88.64%'\n"
+ '\n'
+ 'Using type-specific formatting:\n'
+ '\n'
+ ' >>> import datetime\n'
+ ' >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n'
+ " >>> '{:%Y-%m-%d %H:%M:%S}'.format(d)\n"
+ " '2010-07-04 12:15:58'\n"
+ '\n'
+ 'Nesting arguments and more complex examples:\n'
+ '\n'
+ " >>> for align, text in zip('<^>', ['left', 'center', "
+ "'right']):\n"
+ " ... '{0:{fill}{align}16}'.format(text, fill=align, "
+ 'align=align)\n'
+ ' ...\n'
+ " 'left<<<<<<<<<<<<'\n"
+ " '^^^^^center^^^^^'\n"
+ " '>>>>>>>>>>>right'\n"
+ ' >>>\n'
+ ' >>> octets = [192, 168, 0, 1]\n'
+ " >>> '{:02X}{:02X}{:02X}{:02X}'.format(*octets)\n"
+ " 'C0A80001'\n"
+ ' >>> int(_, 16)\n'
+ ' 3232235521\n'
+ ' >>>\n'
+ ' >>> width = 5\n'
+ ' >>> for num in range(5,12):\n'
+ " ... for base in 'dXob':\n"
+ " ... print '{0:{width}{base}}'.format(num, "
+ 'base=base, width=width),\n'
+ ' ... print\n'
+ ' ...\n'
+ ' 5 5 5 101\n'
+ ' 6 6 6 110\n'
+ ' 7 7 7 111\n'
+ ' 8 8 10 1000\n'
+ ' 9 9 11 1001\n'
+ ' 10 A 12 1010\n'
+ ' 11 B 13 1011\n',
+ 'function': '\n'
+ 'Function definitions\n'
+ '********************\n'
+ '\n'
+ 'A function definition defines a user-defined function object '
+ '(see\n'
+ 'section The standard type hierarchy):\n'
+ '\n'
+ ' decorated ::= decorators (classdef | funcdef)\n'
+ ' decorators ::= decorator+\n'
+ ' decorator ::= "@" dotted_name ["(" [argument_list [","]] '
+ '")"] NEWLINE\n'
+ ' funcdef ::= "def" funcname "(" [parameter_list] ")" '
+ '":" suite\n'
+ ' dotted_name ::= identifier ("." identifier)*\n'
+ ' parameter_list ::= (defparameter ",")*\n'
+ ' ( "*" identifier ["," "**" identifier]\n'
+ ' | "**" identifier\n'
+ ' | defparameter [","] )\n'
+ ' defparameter ::= parameter ["=" expression]\n'
+ ' sublist ::= parameter ("," parameter)* [","]\n'
+ ' parameter ::= identifier | "(" sublist ")"\n'
+ ' funcname ::= identifier\n'
+ '\n'
+ 'A function definition is an executable statement. Its execution '
+ 'binds\n'
+ 'the function name in the current local namespace to a function '
+ 'object\n'
+ '(a wrapper around the executable code for the function). This\n'
+ 'function object contains a reference to the current global '
+ 'namespace\n'
+ 'as the global namespace to be used when the function is called.\n'
+ '\n'
+ 'The function definition does not execute the function body; this '
+ 'gets\n'
+ 'executed only when the function is called. [3]\n'
+ '\n'
+ 'A function definition may be wrapped by one or more *decorator*\n'
+ 'expressions. Decorator expressions are evaluated when the '
+ 'function is\n'
+ 'defined, in the scope that contains the function definition. '
+ 'The\n'
+ 'result must be a callable, which is invoked with the function '
+ 'object\n'
+ 'as the only argument. The returned value is bound to the '
+ 'function name\n'
+ 'instead of the function object. Multiple decorators are applied '
+ 'in\n'
+ 'nested fashion. For example, the following code:\n'
+ '\n'
+ ' @f1(arg)\n'
+ ' @f2\n'
+ ' def func(): pass\n'
+ '\n'
+ 'is equivalent to:\n'
+ '\n'
+ ' def func(): pass\n'
+ ' func = f1(arg)(f2(func))\n'
+ '\n'
+ 'When one or more top-level *parameters* have the form '
+ '*parameter* "="\n'
+ '*expression*, the function is said to have "default parameter '
+ 'values."\n'
+ 'For a parameter with a default value, the corresponding '
+ '*argument* may\n'
+ "be omitted from a call, in which case the parameter's default "
+ 'value is\n'
+ 'substituted. If a parameter has a default value, all following\n'
+ 'parameters must also have a default value --- this is a '
+ 'syntactic\n'
+ 'restriction that is not expressed by the grammar.\n'
+ '\n'
+ '**Default parameter values are evaluated when the function '
+ 'definition\n'
+ 'is executed.** This means that the expression is evaluated '
+ 'once, when\n'
+ 'the function is defined, and that the same "pre-computed" value '
+ 'is\n'
+ 'used for each call. This is especially important to understand '
+ 'when a\n'
+ 'default parameter is a mutable object, such as a list or a '
+ 'dictionary:\n'
+ 'if the function modifies the object (e.g. by appending an item '
+ 'to a\n'
+ 'list), the default value is in effect modified. This is '
+ 'generally not\n'
+ 'what was intended. A way around this is to use "None" as the\n'
+ 'default, and explicitly test for it in the body of the function, '
+ 'e.g.:\n'
+ '\n'
+ ' def whats_on_the_telly(penguin=None):\n'
+ ' if penguin is None:\n'
+ ' penguin = []\n'
+ ' penguin.append("property of the zoo")\n'
+ ' return penguin\n'
+ '\n'
+ 'Function call semantics are described in more detail in section '
+ 'Calls.\n'
+ 'A function call always assigns values to all parameters '
+ 'mentioned in\n'
+ 'the parameter list, either from position arguments, from '
+ 'keyword\n'
+ 'arguments, or from default values. If the form ""*identifier"" '
+ 'is\n'
+ 'present, it is initialized to a tuple receiving any excess '
+ 'positional\n'
+ 'parameters, defaulting to the empty tuple. If the form\n'
+ '""**identifier"" is present, it is initialized to a new '
+ 'dictionary\n'
+ 'receiving any excess keyword arguments, defaulting to a new '
+ 'empty\n'
+ 'dictionary.\n'
+ '\n'
+ 'It is also possible to create anonymous functions (functions not '
+ 'bound\n'
+ 'to a name), for immediate use in expressions. This uses lambda\n'
+ 'expressions, described in section Lambdas. Note that the '
+ 'lambda\n'
+ 'expression is merely a shorthand for a simplified function '
+ 'definition;\n'
+ 'a function defined in a ""def"" statement can be passed around '
+ 'or\n'
+ 'assigned to another name just like a function defined by a '
+ 'lambda\n'
+ 'expression. The ""def"" form is actually more powerful since '
+ 'it\n'
+ 'allows the execution of multiple statements.\n'
+ '\n'
+ "**Programmer's note:** Functions are first-class objects. A "
+ '""def""\n'
+ 'form executed inside a function definition defines a local '
+ 'function\n'
+ 'that can be returned or passed around. Free variables used in '
+ 'the\n'
+ 'nested function can access the local variables of the function\n'
+ 'containing the def. See section Naming and binding for '
+ 'details.\n',
+ 'global': '\n'
+ 'The "global" statement\n'
+ '**********************\n'
+ '\n'
+ ' global_stmt ::= "global" identifier ("," identifier)*\n'
+ '\n'
+ 'The "global" statement is a declaration which holds for the '
+ 'entire\n'
+ 'current code block. It means that the listed identifiers are to '
+ 'be\n'
+ 'interpreted as globals. It would be impossible to assign to a '
+ 'global\n'
+ 'variable without "global", although free variables may refer to\n'
+ 'globals without being declared global.\n'
+ '\n'
+ 'Names listed in a "global" statement must not be used in the same '
+ 'code\n'
+ 'block textually preceding that "global" statement.\n'
+ '\n'
+ 'Names listed in a "global" statement must not be defined as '
+ 'formal\n'
+ 'parameters or in a "for" loop control target, "class" definition,\n'
+ 'function definition, or "import" statement.\n'
+ '\n'
+ '**CPython implementation detail:** The current implementation does '
+ 'not\n'
+ 'enforce the latter two restrictions, but programs should not '
+ 'abuse\n'
+ 'this freedom, as future implementations may enforce them or '
+ 'silently\n'
+ 'change the meaning of the program.\n'
+ '\n'
+ '**Programmer\'s note:** "global" is a directive to the parser. '
+ 'It\n'
+ 'applies only to code parsed at the same time as the "global"\n'
+ 'statement. In particular, a "global" statement contained in an '
+ '"exec"\n'
+ 'statement does not affect the code block *containing* the "exec"\n'
+ 'statement, and code contained in an "exec" statement is unaffected '
+ 'by\n'
+ '"global" statements in the code containing the "exec" statement. '
+ 'The\n'
+ 'same applies to the "eval()", "execfile()" and "compile()" '
+ 'functions.\n',
+ 'id-classes': '\n'
+ 'Reserved classes of identifiers\n'
+ '*******************************\n'
+ '\n'
+ 'Certain classes of identifiers (besides keywords) have '
+ 'special\n'
+ 'meanings. These classes are identified by the patterns of '
+ 'leading and\n'
+ 'trailing underscore characters:\n'
+ '\n'
+ '"_*"\n'
+ ' Not imported by "from module import *". The special '
+ 'identifier "_"\n'
+ ' is used in the interactive interpreter to store the result '
+ 'of the\n'
+ ' last evaluation; it is stored in the "__builtin__" module. '
+ 'When\n'
+ ' not in interactive mode, "_" has no special meaning and is '
+ 'not\n'
+ ' defined. See section The import statement.\n'
+ '\n'
+ ' Note: The name "_" is often used in conjunction with\n'
+ ' internationalization; refer to the documentation for the\n'
+ ' "gettext" module for more information on this '
+ 'convention.\n'
+ '\n'
+ '"__*__"\n'
+ ' System-defined names. These names are defined by the '
+ 'interpreter\n'
+ ' and its implementation (including the standard library). '
+ 'Current\n'
+ ' system names are discussed in the Special method names '
+ 'section and\n'
+ ' elsewhere. More will likely be defined in future versions '
+ 'of\n'
+ ' Python. *Any* use of "__*__" names, in any context, that '
+ 'does not\n'
+ ' follow explicitly documented use, is subject to breakage '
+ 'without\n'
+ ' warning.\n'
+ '\n'
+ '"__*"\n'
+ ' Class-private names. Names in this category, when used '
+ 'within the\n'
+ ' context of a class definition, are re-written to use a '
+ 'mangled form\n'
+ ' to help avoid name clashes between "private" attributes of '
+ 'base and\n'
+ ' derived classes. See section Identifiers (Names).\n',
+ 'identifiers': '\n'
+ 'Identifiers and keywords\n'
+ '************************\n'
+ '\n'
+ 'Identifiers (also referred to as *names*) are described by '
+ 'the\n'
+ 'following lexical definitions:\n'
+ '\n'
+ ' identifier ::= (letter|"_") (letter | digit | "_")*\n'
+ ' letter ::= lowercase | uppercase\n'
+ ' lowercase ::= "a"..."z"\n'
+ ' uppercase ::= "A"..."Z"\n'
+ ' digit ::= "0"..."9"\n'
+ '\n'
+ 'Identifiers are unlimited in length. Case is significant.\n'
+ '\n'
+ '\n'
+ 'Keywords\n'
+ '========\n'
+ '\n'
+ 'The following identifiers are used as reserved words, or '
+ '*keywords* of\n'
+ 'the language, and cannot be used as ordinary identifiers. '
+ 'They must\n'
+ 'be spelled exactly as written here:\n'
+ '\n'
+ ' and del from not while\n'
+ ' as elif global or with\n'
+ ' assert else if pass yield\n'
+ ' break except import print\n'
+ ' class exec in raise\n'
+ ' continue finally is return\n'
+ ' def for lambda try\n'
+ '\n'
+ 'Changed in version 2.4: "None" became a constant and is now '
+ 'recognized\n'
+ 'by the compiler as a name for the built-in object "None". '
+ 'Although it\n'
+ 'is not a keyword, you cannot assign a different object to '
+ 'it.\n'
+ '\n'
+ 'Changed in version 2.5: Using "as" and "with" as identifiers '
+ 'triggers\n'
+ 'a warning. To use them as keywords, enable the '
+ '"with_statement"\n'
+ 'future feature .\n'
+ '\n'
+ 'Changed in version 2.6: "as" and "with" are full keywords.\n'
+ '\n'
+ '\n'
+ 'Reserved classes of identifiers\n'
+ '===============================\n'
+ '\n'
+ 'Certain classes of identifiers (besides keywords) have '
+ 'special\n'
+ 'meanings. These classes are identified by the patterns of '
+ 'leading and\n'
+ 'trailing underscore characters:\n'
+ '\n'
+ '"_*"\n'
+ ' Not imported by "from module import *". The special '
+ 'identifier "_"\n'
+ ' is used in the interactive interpreter to store the result '
+ 'of the\n'
+ ' last evaluation; it is stored in the "__builtin__" '
+ 'module. When\n'
+ ' not in interactive mode, "_" has no special meaning and is '
+ 'not\n'
+ ' defined. See section The import statement.\n'
+ '\n'
+ ' Note: The name "_" is often used in conjunction with\n'
+ ' internationalization; refer to the documentation for '
+ 'the\n'
+ ' "gettext" module for more information on this '
+ 'convention.\n'
+ '\n'
+ '"__*__"\n'
+ ' System-defined names. These names are defined by the '
+ 'interpreter\n'
+ ' and its implementation (including the standard library). '
+ 'Current\n'
+ ' system names are discussed in the Special method names '
+ 'section and\n'
+ ' elsewhere. More will likely be defined in future versions '
+ 'of\n'
+ ' Python. *Any* use of "__*__" names, in any context, that '
+ 'does not\n'
+ ' follow explicitly documented use, is subject to breakage '
+ 'without\n'
+ ' warning.\n'
+ '\n'
+ '"__*"\n'
+ ' Class-private names. Names in this category, when used '
+ 'within the\n'
+ ' context of a class definition, are re-written to use a '
+ 'mangled form\n'
+ ' to help avoid name clashes between "private" attributes of '
+ 'base and\n'
+ ' derived classes. See section Identifiers (Names).\n',
+ 'if': '\n'
+ 'The "if" statement\n'
+ '******************\n'
+ '\n'
+ 'The "if" statement is used for conditional execution:\n'
+ '\n'
+ ' if_stmt ::= "if" expression ":" suite\n'
+ ' ( "elif" expression ":" suite )*\n'
+ ' ["else" ":" suite]\n'
+ '\n'
+ 'It selects exactly one of the suites by evaluating the expressions '
+ 'one\n'
+ 'by one until one is found to be true (see section Boolean operations\n'
+ 'for the definition of true and false); then that suite is executed\n'
+ '(and no other part of the "if" statement is executed or evaluated).\n'
+ 'If all expressions are false, the suite of the "else" clause, if\n'
+ 'present, is executed.\n',
+ 'imaginary': '\n'
+ 'Imaginary literals\n'
+ '******************\n'
+ '\n'
+ 'Imaginary literals are described by the following lexical '
+ 'definitions:\n'
+ '\n'
+ ' imagnumber ::= (floatnumber | intpart) ("j" | "J")\n'
+ '\n'
+ 'An imaginary literal yields a complex number with a real part '
+ 'of 0.0.\n'
+ 'Complex numbers are represented as a pair of floating point '
+ 'numbers\n'
+ 'and have the same restrictions on their range. To create a '
+ 'complex\n'
+ 'number with a nonzero real part, add a floating point number to '
+ 'it,\n'
+ 'e.g., "(3+4j)". Some examples of imaginary literals:\n'
+ '\n'
+ ' 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n',
+ 'import': '\n'
+ 'The "import" statement\n'
+ '**********************\n'
+ '\n'
+ ' import_stmt ::= "import" module ["as" name] ( "," module '
+ '["as" name] )*\n'
+ ' | "from" relative_module "import" identifier '
+ '["as" name]\n'
+ ' ( "," identifier ["as" name] )*\n'
+ ' | "from" relative_module "import" "(" '
+ 'identifier ["as" name]\n'
+ ' ( "," identifier ["as" name] )* [","] ")"\n'
+ ' | "from" module "import" "*"\n'
+ ' module ::= (identifier ".")* identifier\n'
+ ' relative_module ::= "."* module | "."+\n'
+ ' name ::= identifier\n'
+ '\n'
+ 'Import statements are executed in two steps: (1) find a module, '
+ 'and\n'
+ 'initialize it if necessary; (2) define a name or names in the '
+ 'local\n'
+ 'namespace (of the scope where the "import" statement occurs). The\n'
+ 'statement comes in two forms differing on whether it uses the '
+ '"from"\n'
+ 'keyword. The first form (without "from") repeats these steps for '
+ 'each\n'
+ 'identifier in the list. The form with "from" performs step (1) '
+ 'once,\n'
+ 'and then performs step (2) repeatedly.\n'
+ '\n'
+ 'To understand how step (1) occurs, one must first understand how\n'
+ 'Python handles hierarchical naming of modules. To help organize\n'
+ 'modules and provide a hierarchy in naming, Python has a concept '
+ 'of\n'
+ 'packages. A package can contain other packages and modules while\n'
+ 'modules cannot contain other modules or packages. From a file '
+ 'system\n'
+ 'perspective, packages are directories and modules are files.\n'
+ '\n'
+ 'Once the name of the module is known (unless otherwise specified, '
+ 'the\n'
+ 'term "module" will refer to both packages and modules), searching '
+ 'for\n'
+ 'the module or package can begin. The first place checked is\n'
+ '"sys.modules", the cache of all modules that have been imported\n'
+ 'previously. If the module is found there then it is used in step '
+ '(2)\n'
+ 'of import.\n'
+ '\n'
+ 'If the module is not found in the cache, then "sys.meta_path" is\n'
+ 'searched (the specification for "sys.meta_path" can be found in '
+ '**PEP\n'
+ '302**). The object is a list of *finder* objects which are queried '
+ 'in\n'
+ 'order as to whether they know how to load the module by calling '
+ 'their\n'
+ '"find_module()" method with the name of the module. If the module\n'
+ 'happens to be contained within a package (as denoted by the '
+ 'existence\n'
+ 'of a dot in the name), then a second argument to "find_module()" '
+ 'is\n'
+ 'given as the value of the "__path__" attribute from the parent '
+ 'package\n'
+ '(everything up to the last dot in the name of the module being\n'
+ 'imported). If a finder can find the module it returns a *loader*\n'
+ '(discussed later) or returns "None".\n'
+ '\n'
+ 'If none of the finders on "sys.meta_path" are able to find the '
+ 'module\n'
+ 'then some implicitly defined finders are queried. Implementations '
+ 'of\n'
+ 'Python vary in what implicit meta path finders are defined. The '
+ 'one\n'
+ 'they all do define, though, is one that handles "sys.path_hooks",\n'
+ '"sys.path_importer_cache", and "sys.path".\n'
+ '\n'
+ 'The implicit finder searches for the requested module in the '
+ '"paths"\n'
+ 'specified in one of two places ("paths" do not have to be file '
+ 'system\n'
+ 'paths). If the module being imported is supposed to be contained\n'
+ 'within a package then the second argument passed to '
+ '"find_module()",\n'
+ '"__path__" on the parent package, is used as the source of paths. '
+ 'If\n'
+ 'the module is not contained in a package then "sys.path" is used '
+ 'as\n'
+ 'the source of paths.\n'
+ '\n'
+ 'Once the source of paths is chosen it is iterated over to find a\n'
+ 'finder that can handle that path. The dict at\n'
+ '"sys.path_importer_cache" caches finders for paths and is checked '
+ 'for\n'
+ 'a finder. If the path does not have a finder cached then\n'
+ '"sys.path_hooks" is searched by calling each object in the list '
+ 'with a\n'
+ 'single argument of the path, returning a finder or raises\n'
+ '"ImportError". If a finder is returned then it is cached in\n'
+ '"sys.path_importer_cache" and then used for that path entry. If '
+ 'no\n'
+ 'finder can be found but the path exists then a value of "None" is\n'
+ 'stored in "sys.path_importer_cache" to signify that an implicit, '
+ 'file-\n'
+ 'based finder that handles modules stored as individual files '
+ 'should be\n'
+ 'used for that path. If the path does not exist then a finder '
+ 'which\n'
+ 'always returns "None" is placed in the cache for the path.\n'
+ '\n'
+ 'If no finder can find the module then "ImportError" is raised.\n'
+ 'Otherwise some finder returned a loader whose "load_module()" '
+ 'method\n'
+ 'is called with the name of the module to load (see **PEP 302** for '
+ 'the\n'
+ 'original definition of loaders). A loader has several '
+ 'responsibilities\n'
+ 'to perform on a module it loads. First, if the module already '
+ 'exists\n'
+ 'in "sys.modules" (a possibility if the loader is called outside of '
+ 'the\n'
+ 'import machinery) then it is to use that module for initialization '
+ 'and\n'
+ 'not a new module. But if the module does not exist in '
+ '"sys.modules"\n'
+ 'then it is to be added to that dict before initialization begins. '
+ 'If\n'
+ 'an error occurs during loading of the module and it was added to\n'
+ '"sys.modules" it is to be removed from the dict. If an error '
+ 'occurs\n'
+ 'but the module was already in "sys.modules" it is left in the '
+ 'dict.\n'
+ '\n'
+ 'The loader must set several attributes on the module. "__name__" '
+ 'is to\n'
+ 'be set to the name of the module. "__file__" is to be the "path" '
+ 'to\n'
+ 'the file unless the module is built-in (and thus listed in\n'
+ '"sys.builtin_module_names") in which case the attribute is not '
+ 'set. If\n'
+ 'what is being imported is a package then "__path__" is to be set '
+ 'to a\n'
+ 'list of paths to be searched when looking for modules and '
+ 'packages\n'
+ 'contained within the package being imported. "__package__" is '
+ 'optional\n'
+ 'but should be set to the name of package that contains the module '
+ 'or\n'
+ 'package (the empty string is used for module not contained in a\n'
+ 'package). "__loader__" is also optional but should be set to the\n'
+ 'loader object that is loading the module.\n'
+ '\n'
+ 'If an error occurs during loading then the loader raises '
+ '"ImportError"\n'
+ 'if some other exception is not already being propagated. Otherwise '
+ 'the\n'
+ 'loader returns the module that was loaded and initialized.\n'
+ '\n'
+ 'When step (1) finishes without raising an exception, step (2) can\n'
+ 'begin.\n'
+ '\n'
+ 'The first form of "import" statement binds the module name in the\n'
+ 'local namespace to the module object, and then goes on to import '
+ 'the\n'
+ 'next identifier, if any. If the module name is followed by "as", '
+ 'the\n'
+ 'name following "as" is used as the local name for the module.\n'
+ '\n'
+ 'The "from" form does not bind the module name: it goes through '
+ 'the\n'
+ 'list of identifiers, looks each one of them up in the module found '
+ 'in\n'
+ 'step (1), and binds the name in the local namespace to the object '
+ 'thus\n'
+ 'found. As with the first form of "import", an alternate local '
+ 'name\n'
+ 'can be supplied by specifying ""as" localname". If a name is not\n'
+ 'found, "ImportError" is raised. If the list of identifiers is\n'
+ 'replaced by a star ("\'*\'"), all public names defined in the '
+ 'module are\n'
+ 'bound in the local namespace of the "import" statement..\n'
+ '\n'
+ 'The *public names* defined by a module are determined by checking '
+ 'the\n'
+ 'module\'s namespace for a variable named "__all__"; if defined, it '
+ 'must\n'
+ 'be a sequence of strings which are names defined or imported by '
+ 'that\n'
+ 'module. The names given in "__all__" are all considered public '
+ 'and\n'
+ 'are required to exist. If "__all__" is not defined, the set of '
+ 'public\n'
+ "names includes all names found in the module's namespace which do "
+ 'not\n'
+ 'begin with an underscore character ("\'_\'"). "__all__" should '
+ 'contain\n'
+ 'the entire public API. It is intended to avoid accidentally '
+ 'exporting\n'
+ 'items that are not part of the API (such as library modules which '
+ 'were\n'
+ 'imported and used within the module).\n'
+ '\n'
+ 'The "from" form with "*" may only occur in a module scope. If '
+ 'the\n'
+ 'wild card form of import --- "import *" --- is used in a function '
+ 'and\n'
+ 'the function contains or is a nested block with free variables, '
+ 'the\n'
+ 'compiler will raise a "SyntaxError".\n'
+ '\n'
+ 'When specifying what module to import you do not have to specify '
+ 'the\n'
+ 'absolute name of the module. When a module or package is '
+ 'contained\n'
+ 'within another package it is possible to make a relative import '
+ 'within\n'
+ 'the same top package without having to mention the package name. '
+ 'By\n'
+ 'using leading dots in the specified module or package after "from" '
+ 'you\n'
+ 'can specify how high to traverse up the current package hierarchy\n'
+ 'without specifying exact names. One leading dot means the current\n'
+ 'package where the module making the import exists. Two dots means '
+ 'up\n'
+ 'one package level. Three dots is up two levels, etc. So if you '
+ 'execute\n'
+ '"from . import mod" from a module in the "pkg" package then you '
+ 'will\n'
+ 'end up importing "pkg.mod". If you execute "from ..subpkg2 import '
+ 'mod"\n'
+ 'from within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\n'
+ 'specification for relative imports is contained within **PEP '
+ '328**.\n'
+ '\n'
+ '"importlib.import_module()" is provided to support applications '
+ 'that\n'
+ 'determine which modules need to be loaded dynamically.\n'
+ '\n'
+ '\n'
+ 'Future statements\n'
+ '=================\n'
+ '\n'
+ 'A *future statement* is a directive to the compiler that a '
+ 'particular\n'
+ 'module should be compiled using syntax or semantics that will be\n'
+ 'available in a specified future release of Python. The future\n'
+ 'statement is intended to ease migration to future versions of '
+ 'Python\n'
+ 'that introduce incompatible changes to the language. It allows '
+ 'use of\n'
+ 'the new features on a per-module basis before the release in which '
+ 'the\n'
+ 'feature becomes standard.\n'
+ '\n'
+ ' future_statement ::= "from" "__future__" "import" feature ["as" '
+ 'name]\n'
+ ' ("," feature ["as" name])*\n'
+ ' | "from" "__future__" "import" "(" feature '
+ '["as" name]\n'
+ ' ("," feature ["as" name])* [","] ")"\n'
+ ' feature ::= identifier\n'
+ ' name ::= identifier\n'
+ '\n'
+ 'A future statement must appear near the top of the module. The '
+ 'only\n'
+ 'lines that can appear before a future statement are:\n'
+ '\n'
+ '* the module docstring (if any),\n'
+ '\n'
+ '* comments,\n'
+ '\n'
+ '* blank lines, and\n'
+ '\n'
+ '* other future statements.\n'
+ '\n'
+ 'The features recognized by Python 2.6 are "unicode_literals",\n'
+ '"print_function", "absolute_import", "division", "generators",\n'
+ '"nested_scopes" and "with_statement". "generators", '
+ '"with_statement",\n'
+ '"nested_scopes" are redundant in Python version 2.6 and above '
+ 'because\n'
+ 'they are always enabled.\n'
+ '\n'
+ 'A future statement is recognized and treated specially at compile\n'
+ 'time: Changes to the semantics of core constructs are often\n'
+ 'implemented by generating different code. It may even be the '
+ 'case\n'
+ 'that a new feature introduces new incompatible syntax (such as a '
+ 'new\n'
+ 'reserved word), in which case the compiler may need to parse the\n'
+ 'module differently. Such decisions cannot be pushed off until\n'
+ 'runtime.\n'
+ '\n'
+ 'For any given release, the compiler knows which feature names '
+ 'have\n'
+ 'been defined, and raises a compile-time error if a future '
+ 'statement\n'
+ 'contains a feature not known to it.\n'
+ '\n'
+ 'The direct runtime semantics are the same as for any import '
+ 'statement:\n'
+ 'there is a standard module "__future__", described later, and it '
+ 'will\n'
+ 'be imported in the usual way at the time the future statement is\n'
+ 'executed.\n'
+ '\n'
+ 'The interesting runtime semantics depend on the specific feature\n'
+ 'enabled by the future statement.\n'
+ '\n'
+ 'Note that there is nothing special about the statement:\n'
+ '\n'
+ ' import __future__ [as name]\n'
+ '\n'
+ "That is not a future statement; it's an ordinary import statement "
+ 'with\n'
+ 'no special semantics or syntax restrictions.\n'
+ '\n'
+ 'Code compiled by an "exec" statement or calls to the built-in\n'
+ 'functions "compile()" and "execfile()" that occur in a module "M"\n'
+ 'containing a future statement will, by default, use the new '
+ 'syntax or\n'
+ 'semantics associated with the future statement. This can, '
+ 'starting\n'
+ 'with Python 2.2 be controlled by optional arguments to "compile()" '
+ '---\n'
+ 'see the documentation of that function for details.\n'
+ '\n'
+ 'A future statement typed at an interactive interpreter prompt '
+ 'will\n'
+ 'take effect for the rest of the interpreter session. If an\n'
+ 'interpreter is started with the "-i" option, is passed a script '
+ 'name\n'
+ 'to execute, and the script includes a future statement, it will be '
+ 'in\n'
+ 'effect in the interactive session started after the script is\n'
+ 'executed.\n'
+ '\n'
+ 'See also:\n'
+ '\n'
+ ' **PEP 236** - Back to the __future__\n'
+ ' The original proposal for the __future__ mechanism.\n',
+ 'in': '\n'
+ 'Membership test operations\n'
+ '**************************\n'
+ '\n'
+ 'The operators "in" and "not in" test for membership. "x in s"\n'
+ 'evaluates to "True" if *x* is a member of *s*, and "False" otherwise.\n'
+ '"x not in s" returns the negation of "x in s". All built-in '
+ 'sequences\n'
+ 'and set types support this as well as dictionary, for which "in" '
+ 'tests\n'
+ 'whether the dictionary has a given key. For container types such as\n'
+ 'list, tuple, set, frozenset, dict, or collections.deque, the\n'
+ 'expression "x in y" is equivalent to "any(x is e or x == e for e in\n'
+ 'y)".\n'
+ '\n'
+ 'For the string and bytes types, "x in y" is "True" if and only if *x*\n'
+ 'is a substring of *y*. An equivalent test is "y.find(x) != -1".\n'
+ 'Empty strings are always considered to be a substring of any other\n'
+ 'string, so """ in "abc"" will return "True".\n'
+ '\n'
+ 'For user-defined classes which define the "__contains__()" method, "x\n'
+ 'in y" returns "True" if "y.__contains__(x)" returns a true value, and\n'
+ '"False" otherwise.\n'
+ '\n'
+ 'For user-defined classes which do not define "__contains__()" but do\n'
+ 'define "__iter__()", "x in y" is "True" if some value "z" with "x ==\n'
+ 'z" is produced while iterating over "y". If an exception is raised\n'
+ 'during the iteration, it is as if "in" raised that exception.\n'
+ '\n'
+ 'Lastly, the old-style iteration protocol is tried: if a class defines\n'
+ '"__getitem__()", "x in y" is "True" if and only if there is a non-\n'
+ 'negative integer index *i* such that "x == y[i]", and all lower\n'
+ 'integer indices do not raise "IndexError" exception. (If any other\n'
+ 'exception is raised, it is as if "in" raised that exception).\n'
+ '\n'
+ 'The operator "not in" is defined to have the inverse true value of\n'
+ '"in".\n',
+ 'integers': '\n'
+ 'Integer and long integer literals\n'
+ '*********************************\n'
+ '\n'
+ 'Integer and long integer literals are described by the '
+ 'following\n'
+ 'lexical definitions:\n'
+ '\n'
+ ' longinteger ::= integer ("l" | "L")\n'
+ ' integer ::= decimalinteger | octinteger | hexinteger | '
+ 'bininteger\n'
+ ' decimalinteger ::= nonzerodigit digit* | "0"\n'
+ ' octinteger ::= "0" ("o" | "O") octdigit+ | "0" octdigit+\n'
+ ' hexinteger ::= "0" ("x" | "X") hexdigit+\n'
+ ' bininteger ::= "0" ("b" | "B") bindigit+\n'
+ ' nonzerodigit ::= "1"..."9"\n'
+ ' octdigit ::= "0"..."7"\n'
+ ' bindigit ::= "0" | "1"\n'
+ ' hexdigit ::= digit | "a"..."f" | "A"..."F"\n'
+ '\n'
+ 'Although both lower case "\'l\'" and upper case "\'L\'" are '
+ 'allowed as\n'
+ 'suffix for long integers, it is strongly recommended to always '
+ 'use\n'
+ '"\'L\'", since the letter "\'l\'" looks too much like the digit '
+ '"\'1\'".\n'
+ '\n'
+ 'Plain integer literals that are above the largest representable '
+ 'plain\n'
+ 'integer (e.g., 2147483647 when using 32-bit arithmetic) are '
+ 'accepted\n'
+ 'as if they were long integers instead. [1] There is no limit '
+ 'for long\n'
+ 'integer literals apart from what can be stored in available '
+ 'memory.\n'
+ '\n'
+ 'Some examples of plain integer literals (first row) and long '
+ 'integer\n'
+ 'literals (second and third rows):\n'
+ '\n'
+ ' 7 2147483647 0177\n'
+ ' 3L 79228162514264337593543950336L 0377L 0x100000000L\n'
+ ' 79228162514264337593543950336 0xdeadbeef\n',
+ 'lambda': '\n'
+ 'Lambdas\n'
+ '*******\n'
+ '\n'
+ ' lambda_expr ::= "lambda" [parameter_list]: expression\n'
+ ' old_lambda_expr ::= "lambda" [parameter_list]: old_expression\n'
+ '\n'
+ 'Lambda expressions (sometimes called lambda forms) have the same\n'
+ 'syntactic position as expressions. They are a shorthand to '
+ 'create\n'
+ 'anonymous functions; the expression "lambda arguments: '
+ 'expression"\n'
+ 'yields a function object. The unnamed object behaves like a '
+ 'function\n'
+ 'object defined with\n'
+ '\n'
+ ' def name(arguments):\n'
+ ' return expression\n'
+ '\n'
+ 'See section Function definitions for the syntax of parameter '
+ 'lists.\n'
+ 'Note that functions created with lambda expressions cannot '
+ 'contain\n'
+ 'statements.\n',
+ 'lists': '\n'
+ 'List displays\n'
+ '*************\n'
+ '\n'
+ 'A list display is a possibly empty series of expressions enclosed '
+ 'in\n'
+ 'square brackets:\n'
+ '\n'
+ ' list_display ::= "[" [expression_list | '
+ 'list_comprehension] "]"\n'
+ ' list_comprehension ::= expression list_for\n'
+ ' list_for ::= "for" target_list "in" '
+ 'old_expression_list [list_iter]\n'
+ ' old_expression_list ::= old_expression [("," old_expression)+ '
+ '[","]]\n'
+ ' old_expression ::= or_test | old_lambda_expr\n'
+ ' list_iter ::= list_for | list_if\n'
+ ' list_if ::= "if" old_expression [list_iter]\n'
+ '\n'
+ 'A list display yields a new list object. Its contents are '
+ 'specified\n'
+ 'by providing either a list of expressions or a list comprehension.\n'
+ 'When a comma-separated list of expressions is supplied, its '
+ 'elements\n'
+ 'are evaluated from left to right and placed into the list object '
+ 'in\n'
+ 'that order. When a list comprehension is supplied, it consists of '
+ 'a\n'
+ 'single expression followed by at least one "for" clause and zero '
+ 'or\n'
+ 'more "for" or "if" clauses. In this case, the elements of the new\n'
+ 'list are those that would be produced by considering each of the '
+ '"for"\n'
+ 'or "if" clauses a block, nesting from left to right, and '
+ 'evaluating\n'
+ 'the expression to produce a list element each time the innermost '
+ 'block\n'
+ 'is reached [1].\n',
+ 'naming': '\n'
+ 'Naming and binding\n'
+ '******************\n'
+ '\n'
+ '*Names* refer to objects. Names are introduced by name binding\n'
+ 'operations. Each occurrence of a name in the program text refers '
+ 'to\n'
+ 'the *binding* of that name established in the innermost function '
+ 'block\n'
+ 'containing the use.\n'
+ '\n'
+ 'A *block* is a piece of Python program text that is executed as a\n'
+ 'unit. The following are blocks: a module, a function body, and a '
+ 'class\n'
+ 'definition. Each command typed interactively is a block. A '
+ 'script\n'
+ 'file (a file given as standard input to the interpreter or '
+ 'specified\n'
+ 'on the interpreter command line the first argument) is a code '
+ 'block.\n'
+ 'A script command (a command specified on the interpreter command '
+ 'line\n'
+ "with the '**-c**' option) is a code block. The file read by the\n"
+ 'built-in function "execfile()" is a code block. The string '
+ 'argument\n'
+ 'passed to the built-in function "eval()" and to the "exec" '
+ 'statement\n'
+ 'is a code block. The expression read and evaluated by the '
+ 'built-in\n'
+ 'function "input()" is a code block.\n'
+ '\n'
+ 'A code block is executed in an *execution frame*. A frame '
+ 'contains\n'
+ 'some administrative information (used for debugging) and '
+ 'determines\n'
+ "where and how execution continues after the code block's execution "
+ 'has\n'
+ 'completed.\n'
+ '\n'
+ 'A *scope* defines the visibility of a name within a block. If a '
+ 'local\n'
+ 'variable is defined in a block, its scope includes that block. If '
+ 'the\n'
+ 'definition occurs in a function block, the scope extends to any '
+ 'blocks\n'
+ 'contained within the defining one, unless a contained block '
+ 'introduces\n'
+ 'a different binding for the name. The scope of names defined in '
+ 'a\n'
+ 'class block is limited to the class block; it does not extend to '
+ 'the\n'
+ 'code blocks of methods -- this includes generator expressions '
+ 'since\n'
+ 'they are implemented using a function scope. This means that the\n'
+ 'following will fail:\n'
+ '\n'
+ ' class A:\n'
+ ' a = 42\n'
+ ' b = list(a + i for i in range(10))\n'
+ '\n'
+ 'When a name is used in a code block, it is resolved using the '
+ 'nearest\n'
+ 'enclosing scope. The set of all such scopes visible to a code '
+ 'block\n'
+ "is called the block's *environment*.\n"
+ '\n'
+ 'If a name is bound in a block, it is a local variable of that '
+ 'block.\n'
+ 'If a name is bound at the module level, it is a global variable. '
+ '(The\n'
+ 'variables of the module code block are local and global.) If a\n'
+ 'variable is used in a code block but not defined there, it is a '
+ '*free\n'
+ 'variable*.\n'
+ '\n'
+ 'When a name is not found at all, a "NameError" exception is '
+ 'raised.\n'
+ 'If the name refers to a local variable that has not been bound, a\n'
+ '"UnboundLocalError" exception is raised. "UnboundLocalError" is '
+ 'a\n'
+ 'subclass of "NameError".\n'
+ '\n'
+ 'The following constructs bind names: formal parameters to '
+ 'functions,\n'
+ '"import" statements, class and function definitions (these bind '
+ 'the\n'
+ 'class or function name in the defining block), and targets that '
+ 'are\n'
+ 'identifiers if occurring in an assignment, "for" loop header, in '
+ 'the\n'
+ 'second position of an "except" clause header or after "as" in a '
+ '"with"\n'
+ 'statement. The "import" statement of the form "from ... import '
+ '*"\n'
+ 'binds all names defined in the imported module, except those '
+ 'beginning\n'
+ 'with an underscore. This form may only be used at the module '
+ 'level.\n'
+ '\n'
+ 'A target occurring in a "del" statement is also considered bound '
+ 'for\n'
+ 'this purpose (though the actual semantics are to unbind the '
+ 'name). It\n'
+ 'is illegal to unbind a name that is referenced by an enclosing '
+ 'scope;\n'
+ 'the compiler will report a "SyntaxError".\n'
+ '\n'
+ 'Each assignment or import statement occurs within a block defined '
+ 'by a\n'
+ 'class or function definition or at the module level (the '
+ 'top-level\n'
+ 'code block).\n'
+ '\n'
+ 'If a name binding operation occurs anywhere within a code block, '
+ 'all\n'
+ 'uses of the name within the block are treated as references to '
+ 'the\n'
+ 'current block. This can lead to errors when a name is used within '
+ 'a\n'
+ 'block before it is bound. This rule is subtle. Python lacks\n'
+ 'declarations and allows name binding operations to occur anywhere\n'
+ 'within a code block. The local variables of a code block can be\n'
+ 'determined by scanning the entire text of the block for name '
+ 'binding\n'
+ 'operations.\n'
+ '\n'
+ 'If the global statement occurs within a block, all uses of the '
+ 'name\n'
+ 'specified in the statement refer to the binding of that name in '
+ 'the\n'
+ 'top-level namespace. Names are resolved in the top-level namespace '
+ 'by\n'
+ 'searching the global namespace, i.e. the namespace of the module\n'
+ 'containing the code block, and the builtins namespace, the '
+ 'namespace\n'
+ 'of the module "__builtin__". The global namespace is searched '
+ 'first.\n'
+ 'If the name is not found there, the builtins namespace is '
+ 'searched.\n'
+ 'The global statement must precede all uses of the name.\n'
+ '\n'
+ 'The builtins namespace associated with the execution of a code '
+ 'block\n'
+ 'is actually found by looking up the name "__builtins__" in its '
+ 'global\n'
+ 'namespace; this should be a dictionary or a module (in the latter '
+ 'case\n'
+ "the module's dictionary is used). By default, when in the "
+ '"__main__"\n'
+ 'module, "__builtins__" is the built-in module "__builtin__" (note: '
+ 'no\n'
+ '\'s\'); when in any other module, "__builtins__" is an alias for '
+ 'the\n'
+ 'dictionary of the "__builtin__" module itself. "__builtins__" can '
+ 'be\n'
+ 'set to a user-created dictionary to create a weak form of '
+ 'restricted\n'
+ 'execution.\n'
+ '\n'
+ '**CPython implementation detail:** Users should not touch\n'
+ '"__builtins__"; it is strictly an implementation detail. Users\n'
+ 'wanting to override values in the builtins namespace should '
+ '"import"\n'
+ 'the "__builtin__" (no \'s\') module and modify its attributes\n'
+ 'appropriately.\n'
+ '\n'
+ 'The namespace for a module is automatically created the first time '
+ 'a\n'
+ 'module is imported. The main module for a script is always '
+ 'called\n'
+ '"__main__".\n'
+ '\n'
+ 'The "global" statement has the same scope as a name binding '
+ 'operation\n'
+ 'in the same block. If the nearest enclosing scope for a free '
+ 'variable\n'
+ 'contains a global statement, the free variable is treated as a '
+ 'global.\n'
+ '\n'
+ 'A class definition is an executable statement that may use and '
+ 'define\n'
+ 'names. These references follow the normal rules for name '
+ 'resolution.\n'
+ 'The namespace of the class definition becomes the attribute '
+ 'dictionary\n'
+ 'of the class. Names defined at the class scope are not visible '
+ 'in\n'
+ 'methods.\n'
+ '\n'
+ '\n'
+ 'Interaction with dynamic features\n'
+ '=================================\n'
+ '\n'
+ 'There are several cases where Python statements are illegal when '
+ 'used\n'
+ 'in conjunction with nested scopes that contain free variables.\n'
+ '\n'
+ 'If a variable is referenced in an enclosing scope, it is illegal '
+ 'to\n'
+ 'delete the name. An error will be reported at compile time.\n'
+ '\n'
+ 'If the wild card form of import --- "import *" --- is used in a\n'
+ 'function and the function contains or is a nested block with free\n'
+ 'variables, the compiler will raise a "SyntaxError".\n'
+ '\n'
+ 'If "exec" is used in a function and the function contains or is a\n'
+ 'nested block with free variables, the compiler will raise a\n'
+ '"SyntaxError" unless the exec explicitly specifies the local '
+ 'namespace\n'
+ 'for the "exec". (In other words, "exec obj" would be illegal, '
+ 'but\n'
+ '"exec obj in ns" would be legal.)\n'
+ '\n'
+ 'The "eval()", "execfile()", and "input()" functions and the '
+ '"exec"\n'
+ 'statement do not have access to the full environment for '
+ 'resolving\n'
+ 'names. Names may be resolved in the local and global namespaces '
+ 'of\n'
+ 'the caller. Free variables are not resolved in the nearest '
+ 'enclosing\n'
+ 'namespace, but in the global namespace. [1] The "exec" statement '
+ 'and\n'
+ 'the "eval()" and "execfile()" functions have optional arguments '
+ 'to\n'
+ 'override the global and local namespace. If only one namespace '
+ 'is\n'
+ 'specified, it is used for both.\n',
+ 'numbers': '\n'
+ 'Numeric literals\n'
+ '****************\n'
+ '\n'
+ 'There are four types of numeric literals: plain integers, long\n'
+ 'integers, floating point numbers, and imaginary numbers. There '
+ 'are no\n'
+ 'complex literals (complex numbers can be formed by adding a real\n'
+ 'number and an imaginary number).\n'
+ '\n'
+ 'Note that numeric literals do not include a sign; a phrase like '
+ '"-1"\n'
+ 'is actually an expression composed of the unary operator \'"-"\' '
+ 'and the\n'
+ 'literal "1".\n',
+ 'numeric-types': '\n'
+ 'Emulating numeric types\n'
+ '***********************\n'
+ '\n'
+ 'The following methods can be defined to emulate numeric '
+ 'objects.\n'
+ 'Methods corresponding to operations that are not supported '
+ 'by the\n'
+ 'particular kind of number implemented (e.g., bitwise '
+ 'operations for\n'
+ 'non-integral numbers) should be left undefined.\n'
+ '\n'
+ 'object.__add__(self, other)\n'
+ 'object.__sub__(self, other)\n'
+ 'object.__mul__(self, other)\n'
+ 'object.__floordiv__(self, other)\n'
+ 'object.__mod__(self, other)\n'
+ 'object.__divmod__(self, other)\n'
+ 'object.__pow__(self, other[, modulo])\n'
+ 'object.__lshift__(self, other)\n'
+ 'object.__rshift__(self, other)\n'
+ 'object.__and__(self, other)\n'
+ 'object.__xor__(self, other)\n'
+ 'object.__or__(self, other)\n'
+ '\n'
+ ' These methods are called to implement the binary '
+ 'arithmetic\n'
+ ' operations ("+", "-", "*", "//", "%", "divmod()", '
+ '"pow()", "**",\n'
+ ' "<<", ">>", "&", "^", "|"). For instance, to evaluate '
+ 'the\n'
+ ' expression "x + y", where *x* is an instance of a class '
+ 'that has an\n'
+ ' "__add__()" method, "x.__add__(y)" is called. The '
+ '"__divmod__()"\n'
+ ' method should be the equivalent to using '
+ '"__floordiv__()" and\n'
+ ' "__mod__()"; it should not be related to "__truediv__()" '
+ '(described\n'
+ ' below). Note that "__pow__()" should be defined to '
+ 'accept an\n'
+ ' optional third argument if the ternary version of the '
+ 'built-in\n'
+ ' "pow()" function is to be supported.\n'
+ '\n'
+ ' If one of those methods does not support the operation '
+ 'with the\n'
+ ' supplied arguments, it should return "NotImplemented".\n'
+ '\n'
+ 'object.__div__(self, other)\n'
+ 'object.__truediv__(self, other)\n'
+ '\n'
+ ' The division operator ("/") is implemented by these '
+ 'methods. The\n'
+ ' "__truediv__()" method is used when '
+ '"__future__.division" is in\n'
+ ' effect, otherwise "__div__()" is used. If only one of '
+ 'these two\n'
+ ' methods is defined, the object will not support division '
+ 'in the\n'
+ ' alternate context; "TypeError" will be raised instead.\n'
+ '\n'
+ 'object.__radd__(self, other)\n'
+ 'object.__rsub__(self, other)\n'
+ 'object.__rmul__(self, other)\n'
+ 'object.__rdiv__(self, other)\n'
+ 'object.__rtruediv__(self, other)\n'
+ 'object.__rfloordiv__(self, other)\n'
+ 'object.__rmod__(self, other)\n'
+ 'object.__rdivmod__(self, other)\n'
+ 'object.__rpow__(self, other)\n'
+ 'object.__rlshift__(self, other)\n'
+ 'object.__rrshift__(self, other)\n'
+ 'object.__rand__(self, other)\n'
+ 'object.__rxor__(self, other)\n'
+ 'object.__ror__(self, other)\n'
+ '\n'
+ ' These methods are called to implement the binary '
+ 'arithmetic\n'
+ ' operations ("+", "-", "*", "/", "%", "divmod()", '
+ '"pow()", "**",\n'
+ ' "<<", ">>", "&", "^", "|") with reflected (swapped) '
+ 'operands.\n'
+ ' These functions are only called if the left operand does '
+ 'not\n'
+ ' support the corresponding operation and the operands are '
+ 'of\n'
+ ' different types. [2] For instance, to evaluate the '
+ 'expression "x -\n'
+ ' y", where *y* is an instance of a class that has an '
+ '"__rsub__()"\n'
+ ' method, "y.__rsub__(x)" is called if "x.__sub__(y)" '
+ 'returns\n'
+ ' *NotImplemented*.\n'
+ '\n'
+ ' Note that ternary "pow()" will not try calling '
+ '"__rpow__()" (the\n'
+ ' coercion rules would become too complicated).\n'
+ '\n'
+ " Note: If the right operand's type is a subclass of the "
+ 'left\n'
+ " operand's type and that subclass provides the "
+ 'reflected method\n'
+ ' for the operation, this method will be called before '
+ 'the left\n'
+ " operand's non-reflected method. This behavior allows "
+ 'subclasses\n'
+ " to override their ancestors' operations.\n"
+ '\n'
+ 'object.__iadd__(self, other)\n'
+ 'object.__isub__(self, other)\n'
+ 'object.__imul__(self, other)\n'
+ 'object.__idiv__(self, other)\n'
+ 'object.__itruediv__(self, other)\n'
+ 'object.__ifloordiv__(self, other)\n'
+ 'object.__imod__(self, other)\n'
+ 'object.__ipow__(self, other[, modulo])\n'
+ 'object.__ilshift__(self, other)\n'
+ 'object.__irshift__(self, other)\n'
+ 'object.__iand__(self, other)\n'
+ 'object.__ixor__(self, other)\n'
+ 'object.__ior__(self, other)\n'
+ '\n'
+ ' These methods are called to implement the augmented '
+ 'arithmetic\n'
+ ' assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", '
+ '"<<=",\n'
+ ' ">>=", "&=", "^=", "|="). These methods should attempt '
+ 'to do the\n'
+ ' operation in-place (modifying *self*) and return the '
+ 'result (which\n'
+ ' could be, but does not have to be, *self*). If a '
+ 'specific method\n'
+ ' is not defined, the augmented assignment falls back to '
+ 'the normal\n'
+ ' methods. For instance, to execute the statement "x += '
+ 'y", where\n'
+ ' *x* is an instance of a class that has an "__iadd__()" '
+ 'method,\n'
+ ' "x.__iadd__(y)" is called. If *x* is an instance of a '
+ 'class that\n'
+ ' does not define a "__iadd__()" method, "x.__add__(y)" '
+ 'and\n'
+ ' "y.__radd__(x)" are considered, as with the evaluation '
+ 'of "x + y".\n'
+ '\n'
+ 'object.__neg__(self)\n'
+ 'object.__pos__(self)\n'
+ 'object.__abs__(self)\n'
+ 'object.__invert__(self)\n'
+ '\n'
+ ' Called to implement the unary arithmetic operations '
+ '("-", "+",\n'
+ ' "abs()" and "~").\n'
+ '\n'
+ 'object.__complex__(self)\n'
+ 'object.__int__(self)\n'
+ 'object.__long__(self)\n'
+ 'object.__float__(self)\n'
+ '\n'
+ ' Called to implement the built-in functions "complex()", '
+ '"int()",\n'
+ ' "long()", and "float()". Should return a value of the '
+ 'appropriate\n'
+ ' type.\n'
+ '\n'
+ 'object.__oct__(self)\n'
+ 'object.__hex__(self)\n'
+ '\n'
+ ' Called to implement the built-in functions "oct()" and '
+ '"hex()".\n'
+ ' Should return a string value.\n'
+ '\n'
+ 'object.__index__(self)\n'
+ '\n'
+ ' Called to implement "operator.index()". Also called '
+ 'whenever\n'
+ ' Python needs an integer object (such as in slicing). '
+ 'Must return\n'
+ ' an integer (int or long).\n'
+ '\n'
+ ' New in version 2.5.\n'
+ '\n'
+ 'object.__coerce__(self, other)\n'
+ '\n'
+ ' Called to implement "mixed-mode" numeric arithmetic. '
+ 'Should either\n'
+ ' return a 2-tuple containing *self* and *other* converted '
+ 'to a\n'
+ ' common numeric type, or "None" if conversion is '
+ 'impossible. When\n'
+ ' the common type would be the type of "other", it is '
+ 'sufficient to\n'
+ ' return "None", since the interpreter will also ask the '
+ 'other object\n'
+ ' to attempt a coercion (but sometimes, if the '
+ 'implementation of the\n'
+ ' other type cannot be changed, it is useful to do the '
+ 'conversion to\n'
+ ' the other type here). A return value of '
+ '"NotImplemented" is\n'
+ ' equivalent to returning "None".\n',
+ 'objects': '\n'
+ 'Objects, values and types\n'
+ '*************************\n'
+ '\n'
+ "*Objects* are Python's abstraction for data. All data in a "
+ 'Python\n'
+ 'program is represented by objects or by relations between '
+ 'objects. (In\n'
+ 'a sense, and in conformance to Von Neumann\'s model of a "stored\n'
+ 'program computer," code is also represented by objects.)\n'
+ '\n'
+ "Every object has an identity, a type and a value. An object's\n"
+ '*identity* never changes once it has been created; you may think '
+ 'of it\n'
+ 'as the object\'s address in memory. The \'"is"\' operator '
+ 'compares the\n'
+ 'identity of two objects; the "id()" function returns an integer\n'
+ 'representing its identity (currently implemented as its address). '
+ 'An\n'
+ "object's *type* is also unchangeable. [1] An object's type "
+ 'determines\n'
+ 'the operations that the object supports (e.g., "does it have a\n'
+ 'length?") and also defines the possible values for objects of '
+ 'that\n'
+ 'type. The "type()" function returns an object\'s type (which is '
+ 'an\n'
+ 'object itself). The *value* of some objects can change. '
+ 'Objects\n'
+ 'whose value can change are said to be *mutable*; objects whose '
+ 'value\n'
+ 'is unchangeable once they are created are called *immutable*. '
+ '(The\n'
+ 'value of an immutable container object that contains a reference '
+ 'to a\n'
+ "mutable object can change when the latter's value is changed; "
+ 'however\n'
+ 'the container is still considered immutable, because the '
+ 'collection of\n'
+ 'objects it contains cannot be changed. So, immutability is not\n'
+ 'strictly the same as having an unchangeable value, it is more '
+ 'subtle.)\n'
+ "An object's mutability is determined by its type; for instance,\n"
+ 'numbers, strings and tuples are immutable, while dictionaries '
+ 'and\n'
+ 'lists are mutable.\n'
+ '\n'
+ 'Objects are never explicitly destroyed; however, when they '
+ 'become\n'
+ 'unreachable they may be garbage-collected. An implementation is\n'
+ 'allowed to postpone garbage collection or omit it altogether --- '
+ 'it is\n'
+ 'a matter of implementation quality how garbage collection is\n'
+ 'implemented, as long as no objects are collected that are still\n'
+ 'reachable.\n'
+ '\n'
+ '**CPython implementation detail:** CPython currently uses a '
+ 'reference-\n'
+ 'counting scheme with (optional) delayed detection of cyclically '
+ 'linked\n'
+ 'garbage, which collects most objects as soon as they become\n'
+ 'unreachable, but is not guaranteed to collect garbage containing\n'
+ 'circular references. See the documentation of the "gc" module '
+ 'for\n'
+ 'information on controlling the collection of cyclic garbage. '
+ 'Other\n'
+ 'implementations act differently and CPython may change. Do not '
+ 'depend\n'
+ 'on immediate finalization of objects when they become unreachable '
+ '(ex:\n'
+ 'always close files).\n'
+ '\n'
+ "Note that the use of the implementation's tracing or debugging\n"
+ 'facilities may keep objects alive that would normally be '
+ 'collectable.\n'
+ 'Also note that catching an exception with a \'"try"..."except"\'\n'
+ 'statement may keep objects alive.\n'
+ '\n'
+ 'Some objects contain references to "external" resources such as '
+ 'open\n'
+ 'files or windows. It is understood that these resources are '
+ 'freed\n'
+ 'when the object is garbage-collected, but since garbage '
+ 'collection is\n'
+ 'not guaranteed to happen, such objects also provide an explicit '
+ 'way to\n'
+ 'release the external resource, usually a "close()" method. '
+ 'Programs\n'
+ 'are strongly recommended to explicitly close such objects. The\n'
+ '\'"try"..."finally"\' statement provides a convenient way to do '
+ 'this.\n'
+ '\n'
+ 'Some objects contain references to other objects; these are '
+ 'called\n'
+ '*containers*. Examples of containers are tuples, lists and\n'
+ "dictionaries. The references are part of a container's value. "
+ 'In\n'
+ 'most cases, when we talk about the value of a container, we imply '
+ 'the\n'
+ 'values, not the identities of the contained objects; however, '
+ 'when we\n'
+ 'talk about the mutability of a container, only the identities of '
+ 'the\n'
+ 'immediately contained objects are implied. So, if an immutable\n'
+ 'container (like a tuple) contains a reference to a mutable '
+ 'object, its\n'
+ 'value changes if that mutable object is changed.\n'
+ '\n'
+ 'Types affect almost all aspects of object behavior. Even the\n'
+ 'importance of object identity is affected in some sense: for '
+ 'immutable\n'
+ 'types, operations that compute new values may actually return a\n'
+ 'reference to any existing object with the same type and value, '
+ 'while\n'
+ 'for mutable objects this is not allowed. E.g., after "a = 1; b = '
+ '1",\n'
+ '"a" and "b" may or may not refer to the same object with the '
+ 'value\n'
+ 'one, depending on the implementation, but after "c = []; d = []", '
+ '"c"\n'
+ 'and "d" are guaranteed to refer to two different, unique, newly\n'
+ 'created empty lists. (Note that "c = d = []" assigns the same '
+ 'object\n'
+ 'to both "c" and "d".)\n',
+ 'operator-summary': '\n'
+ 'Operator precedence\n'
+ '*******************\n'
+ '\n'
+ 'The following table summarizes the operator precedences '
+ 'in Python,\n'
+ 'from lowest precedence (least binding) to highest '
+ 'precedence (most\n'
+ 'binding). Operators in the same box have the same '
+ 'precedence. Unless\n'
+ 'the syntax is explicitly given, operators are binary. '
+ 'Operators in\n'
+ 'the same box group left to right (except for '
+ 'comparisons, including\n'
+ 'tests, which all have the same precedence and chain from '
+ 'left to right\n'
+ '--- see section Comparisons --- and exponentiation, '
+ 'which groups from\n'
+ 'right to left).\n'
+ '\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| Operator | '
+ 'Description |\n'
+ '+=================================================+=======================================+\n'
+ '| "lambda" | '
+ 'Lambda expression |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "if" -- "else" | '
+ 'Conditional expression |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "or" | '
+ 'Boolean OR |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "and" | '
+ 'Boolean AND |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "not" "x" | '
+ 'Boolean NOT |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "in", "not in", "is", "is not", "<", "<=", ">", | '
+ 'Comparisons, including membership |\n'
+ '| ">=", "<>", "!=", "==" | '
+ 'tests and identity tests |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "|" | '
+ 'Bitwise OR |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "^" | '
+ 'Bitwise XOR |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "&" | '
+ 'Bitwise AND |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "<<", ">>" | '
+ 'Shifts |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "+", "-" | '
+ 'Addition and subtraction |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "*", "/", "//", "%" | '
+ 'Multiplication, division, remainder |\n'
+ '| | '
+ '[7] |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "+x", "-x", "~x" | '
+ 'Positive, negative, bitwise NOT |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "**" | '
+ 'Exponentiation [8] |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "x[index]", "x[index:index]", | '
+ 'Subscription, slicing, call, |\n'
+ '| "x(arguments...)", "x.attribute" | '
+ 'attribute reference |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '| "(expressions...)", "[expressions...]", "{key: | '
+ 'Binding or tuple display, list |\n'
+ '| value...}", "`expressions...`" | '
+ 'display, dictionary display, string |\n'
+ '| | '
+ 'conversion |\n'
+ '+-------------------------------------------------+---------------------------------------+\n'
+ '\n'
+ '-[ Footnotes ]-\n'
+ '\n'
+ '[1] In Python 2.3 and later releases, a list '
+ 'comprehension "leaks"\n'
+ ' the control variables of each "for" it contains into '
+ 'the\n'
+ ' containing scope. However, this behavior is '
+ 'deprecated, and\n'
+ ' relying on it will not work in Python 3.\n'
+ '\n'
+ '[2] While "abs(x%y) < abs(y)" is true mathematically, '
+ 'for floats\n'
+ ' it may not be true numerically due to roundoff. For '
+ 'example, and\n'
+ ' assuming a platform on which a Python float is an '
+ 'IEEE 754 double-\n'
+ ' precision number, in order that "-1e-100 % 1e100" '
+ 'have the same\n'
+ ' sign as "1e100", the computed result is "-1e-100 + '
+ '1e100", which\n'
+ ' is numerically exactly equal to "1e100". The '
+ 'function\n'
+ ' "math.fmod()" returns a result whose sign matches '
+ 'the sign of the\n'
+ ' first argument instead, and so returns "-1e-100" in '
+ 'this case.\n'
+ ' Which approach is more appropriate depends on the '
+ 'application.\n'
+ '\n'
+ '[3] If x is very close to an exact integer multiple of '
+ "y, it's\n"
+ ' possible for "floor(x/y)" to be one larger than '
+ '"(x-x%y)/y" due to\n'
+ ' rounding. In such cases, Python returns the latter '
+ 'result, in\n'
+ ' order to preserve that "divmod(x,y)[0] * y + x % y" '
+ 'be very close\n'
+ ' to "x".\n'
+ '\n'
+ '[4] The Unicode standard distinguishes between *code '
+ 'points* (e.g.\n'
+ ' U+0041) and *abstract characters* (e.g. "LATIN '
+ 'CAPITAL LETTER A").\n'
+ ' While most abstract characters in Unicode are only '
+ 'represented\n'
+ ' using one code point, there is a number of abstract '
+ 'characters\n'
+ ' that can in addition be represented using a sequence '
+ 'of more than\n'
+ ' one code point. For example, the abstract character '
+ '"LATIN\n'
+ ' CAPITAL LETTER C WITH CEDILLA" can be represented as '
+ 'a single\n'
+ ' *precomposed character* at code position U+00C7, or '
+ 'as a sequence\n'
+ ' of a *base character* at code position U+0043 (LATIN '
+ 'CAPITAL\n'
+ ' LETTER C), followed by a *combining character* at '
+ 'code position\n'
+ ' U+0327 (COMBINING CEDILLA).\n'
+ '\n'
+ ' The comparison operators on unicode strings compare '
+ 'at the level\n'
+ ' of Unicode code points. This may be '
+ 'counter-intuitive to humans.\n'
+ ' For example, "u"\\u00C7" == u"\\u0043\\u0327"" is '
+ '"False", even\n'
+ ' though both strings represent the same abstract '
+ 'character "LATIN\n'
+ ' CAPITAL LETTER C WITH CEDILLA".\n'
+ '\n'
+ ' To compare strings at the level of abstract '
+ 'characters (that is,\n'
+ ' in a way intuitive to humans), use '
+ '"unicodedata.normalize()".\n'
+ '\n'
+ '[5] Earlier versions of Python used lexicographic '
+ 'comparison of\n'
+ ' the sorted (key, value) lists, but this was very '
+ 'expensive for the\n'
+ ' common case of comparing for equality. An even '
+ 'earlier version of\n'
+ ' Python compared dictionaries by identity only, but '
+ 'this caused\n'
+ ' surprises because people expected to be able to test '
+ 'a dictionary\n'
+ ' for emptiness by comparing it to "{}".\n'
+ '\n'
+ '[6] Due to automatic garbage-collection, free lists, and '
+ 'the\n'
+ ' dynamic nature of descriptors, you may notice '
+ 'seemingly unusual\n'
+ ' behaviour in certain uses of the "is" operator, like '
+ 'those\n'
+ ' involving comparisons between instance methods, or '
+ 'constants.\n'
+ ' Check their documentation for more info.\n'
+ '\n'
+ '[7] The "%" operator is also used for string formatting; '
+ 'the same\n'
+ ' precedence applies.\n'
+ '\n'
+ '[8] The power operator "**" binds less tightly than an '
+ 'arithmetic\n'
+ ' or bitwise unary operator on its right, that is, '
+ '"2**-1" is "0.5".\n',
+ 'pass': '\n'
+ 'The "pass" statement\n'
+ '********************\n'
+ '\n'
+ ' pass_stmt ::= "pass"\n'
+ '\n'
+ '"pass" is a null operation --- when it is executed, nothing '
+ 'happens.\n'
+ 'It is useful as a placeholder when a statement is required\n'
+ 'syntactically, but no code needs to be executed, for example:\n'
+ '\n'
+ ' def f(arg): pass # a function that does nothing (yet)\n'
+ '\n'
+ ' class C: pass # a class with no methods (yet)\n',
+ 'power': '\n'
+ 'The power operator\n'
+ '******************\n'
+ '\n'
+ 'The power operator binds more tightly than unary operators on its\n'
+ 'left; it binds less tightly than unary operators on its right. '
+ 'The\n'
+ 'syntax is:\n'
+ '\n'
+ ' power ::= primary ["**" u_expr]\n'
+ '\n'
+ 'Thus, in an unparenthesized sequence of power and unary operators, '
+ 'the\n'
+ 'operators are evaluated from right to left (this does not '
+ 'constrain\n'
+ 'the evaluation order for the operands): "-1**2" results in "-1".\n'
+ '\n'
+ 'The power operator has the same semantics as the built-in "pow()"\n'
+ 'function, when called with two arguments: it yields its left '
+ 'argument\n'
+ 'raised to the power of its right argument. The numeric arguments '
+ 'are\n'
+ 'first converted to a common type. The result type is that of the\n'
+ 'arguments after coercion.\n'
+ '\n'
+ 'With mixed operand types, the coercion rules for binary arithmetic\n'
+ 'operators apply. For int and long int operands, the result has the\n'
+ 'same type as the operands (after coercion) unless the second '
+ 'argument\n'
+ 'is negative; in that case, all arguments are converted to float and '
+ 'a\n'
+ 'float result is delivered. For example, "10**2" returns "100", but\n'
+ '"10**-2" returns "0.01". (This last feature was added in Python '
+ '2.2.\n'
+ 'In Python 2.1 and before, if both arguments were of integer types '
+ 'and\n'
+ 'the second argument was negative, an exception was raised).\n'
+ '\n'
+ 'Raising "0.0" to a negative power results in a '
+ '"ZeroDivisionError".\n'
+ 'Raising a negative number to a fractional power results in a\n'
+ '"ValueError".\n',
+ 'print': '\n'
+ 'The "print" statement\n'
+ '*********************\n'
+ '\n'
+ ' print_stmt ::= "print" ([expression ("," expression)* [","]]\n'
+ ' | ">>" expression [("," expression)+ [","]])\n'
+ '\n'
+ '"print" evaluates each expression in turn and writes the resulting\n'
+ 'object to standard output (see below). If an object is not a '
+ 'string,\n'
+ 'it is first converted to a string using the rules for string\n'
+ 'conversions. The (resulting or original) string is then written. '
+ 'A\n'
+ 'space is written before each object is (converted and) written, '
+ 'unless\n'
+ 'the output system believes it is positioned at the beginning of a\n'
+ 'line. This is the case (1) when no characters have yet been '
+ 'written\n'
+ 'to standard output, (2) when the last character written to '
+ 'standard\n'
+ 'output is a whitespace character except "\' \'", or (3) when the '
+ 'last\n'
+ 'write operation on standard output was not a "print" statement. '
+ '(In\n'
+ 'some cases it may be functional to write an empty string to '
+ 'standard\n'
+ 'output for this reason.)\n'
+ '\n'
+ 'Note: Objects which act like file objects but which are not the\n'
+ ' built-in file objects often do not properly emulate this aspect '
+ 'of\n'
+ " the file object's behavior, so it is best not to rely on this.\n"
+ '\n'
+ 'A "\'\\n\'" character is written at the end, unless the "print" '
+ 'statement\n'
+ 'ends with a comma. This is the only action if the statement '
+ 'contains\n'
+ 'just the keyword "print".\n'
+ '\n'
+ 'Standard output is defined as the file object named "stdout" in '
+ 'the\n'
+ 'built-in module "sys". If no such object exists, or if it does '
+ 'not\n'
+ 'have a "write()" method, a "RuntimeError" exception is raised.\n'
+ '\n'
+ '"print" also has an extended form, defined by the second portion '
+ 'of\n'
+ 'the syntax described above. This form is sometimes referred to as\n'
+ '""print" chevron." In this form, the first expression after the '
+ '">>"\n'
+ 'must evaluate to a "file-like" object, specifically an object that '
+ 'has\n'
+ 'a "write()" method as described above. With this extended form, '
+ 'the\n'
+ 'subsequent expressions are printed to this file object. If the '
+ 'first\n'
+ 'expression evaluates to "None", then "sys.stdout" is used as the '
+ 'file\n'
+ 'for output.\n',
+ 'raise': '\n'
+ 'The "raise" statement\n'
+ '*********************\n'
+ '\n'
+ ' raise_stmt ::= "raise" [expression ["," expression ["," '
+ 'expression]]]\n'
+ '\n'
+ 'If no expressions are present, "raise" re-raises the last '
+ 'exception\n'
+ 'that was active in the current scope. If no exception is active '
+ 'in\n'
+ 'the current scope, a "TypeError" exception is raised indicating '
+ 'that\n'
+ 'this is an error (if running under IDLE, a "Queue.Empty" exception '
+ 'is\n'
+ 'raised instead).\n'
+ '\n'
+ 'Otherwise, "raise" evaluates the expressions to get three objects,\n'
+ 'using "None" as the value of omitted expressions. The first two\n'
+ 'objects are used to determine the *type* and *value* of the '
+ 'exception.\n'
+ '\n'
+ 'If the first object is an instance, the type of the exception is '
+ 'the\n'
+ 'class of the instance, the instance itself is the value, and the\n'
+ 'second object must be "None".\n'
+ '\n'
+ 'If the first object is a class, it becomes the type of the '
+ 'exception.\n'
+ 'The second object is used to determine the exception value: If it '
+ 'is\n'
+ 'an instance of the class, the instance becomes the exception value. '
+ 'If\n'
+ 'the second object is a tuple, it is used as the argument list for '
+ 'the\n'
+ 'class constructor; if it is "None", an empty argument list is '
+ 'used,\n'
+ 'and any other object is treated as a single argument to the\n'
+ 'constructor. The instance so created by calling the constructor '
+ 'is\n'
+ 'used as the exception value.\n'
+ '\n'
+ 'If a third object is present and not "None", it must be a '
+ 'traceback\n'
+ 'object (see section The standard type hierarchy), and it is\n'
+ 'substituted instead of the current location as the place where the\n'
+ 'exception occurred. If the third object is present and not a\n'
+ 'traceback object or "None", a "TypeError" exception is raised. '
+ 'The\n'
+ 'three-expression form of "raise" is useful to re-raise an '
+ 'exception\n'
+ 'transparently in an except clause, but "raise" with no expressions\n'
+ 'should be preferred if the exception to be re-raised was the most\n'
+ 'recently active exception in the current scope.\n'
+ '\n'
+ 'Additional information on exceptions can be found in section\n'
+ 'Exceptions, and information about handling exceptions is in '
+ 'section\n'
+ 'The try statement.\n',
+ 'return': '\n'
+ 'The "return" statement\n'
+ '**********************\n'
+ '\n'
+ ' return_stmt ::= "return" [expression_list]\n'
+ '\n'
+ '"return" may only occur syntactically nested in a function '
+ 'definition,\n'
+ 'not within a nested class definition.\n'
+ '\n'
+ 'If an expression list is present, it is evaluated, else "None" is\n'
+ 'substituted.\n'
+ '\n'
+ '"return" leaves the current function call with the expression list '
+ '(or\n'
+ '"None") as return value.\n'
+ '\n'
+ 'When "return" passes control out of a "try" statement with a '
+ '"finally"\n'
+ 'clause, that "finally" clause is executed before really leaving '
+ 'the\n'
+ 'function.\n'
+ '\n'
+ 'In a generator function, the "return" statement is not allowed to\n'
+ 'include an "expression_list". In that context, a bare "return"\n'
+ 'indicates that the generator is done and will cause '
+ '"StopIteration" to\n'
+ 'be raised.\n',
+ 'sequence-types': '\n'
+ 'Emulating container types\n'
+ '*************************\n'
+ '\n'
+ 'The following methods can be defined to implement '
+ 'container objects.\n'
+ 'Containers usually are sequences (such as lists or tuples) '
+ 'or mappings\n'
+ '(like dictionaries), but can represent other containers as '
+ 'well. The\n'
+ 'first set of methods is used either to emulate a sequence '
+ 'or to\n'
+ 'emulate a mapping; the difference is that for a sequence, '
+ 'the\n'
+ 'allowable keys should be the integers *k* for which "0 <= '
+ 'k < N" where\n'
+ '*N* is the length of the sequence, or slice objects, which '
+ 'define a\n'
+ 'range of items. (For backwards compatibility, the method\n'
+ '"__getslice__()" (see below) can also be defined to handle '
+ 'simple, but\n'
+ 'not extended slices.) It is also recommended that mappings '
+ 'provide the\n'
+ 'methods "keys()", "values()", "items()", "has_key()", '
+ '"get()",\n'
+ '"clear()", "setdefault()", "iterkeys()", "itervalues()",\n'
+ '"iteritems()", "pop()", "popitem()", "copy()", and '
+ '"update()" behaving\n'
+ "similar to those for Python's standard dictionary "
+ 'objects. The\n'
+ '"UserDict" module provides a "DictMixin" class to help '
+ 'create those\n'
+ 'methods from a base set of "__getitem__()", '
+ '"__setitem__()",\n'
+ '"__delitem__()", and "keys()". Mutable sequences should '
+ 'provide\n'
+ 'methods "append()", "count()", "index()", "extend()", '
+ '"insert()",\n'
+ '"pop()", "remove()", "reverse()" and "sort()", like Python '
+ 'standard\n'
+ 'list objects. Finally, sequence types should implement '
+ 'addition\n'
+ '(meaning concatenation) and multiplication (meaning '
+ 'repetition) by\n'
+ 'defining the methods "__add__()", "__radd__()", '
+ '"__iadd__()",\n'
+ '"__mul__()", "__rmul__()" and "__imul__()" described '
+ 'below; they\n'
+ 'should not define "__coerce__()" or other numerical '
+ 'operators. It is\n'
+ 'recommended that both mappings and sequences implement '
+ 'the\n'
+ '"__contains__()" method to allow efficient use of the "in" '
+ 'operator;\n'
+ 'for mappings, "in" should be equivalent of "has_key()"; '
+ 'for sequences,\n'
+ 'it should search through the values. It is further '
+ 'recommended that\n'
+ 'both mappings and sequences implement the "__iter__()" '
+ 'method to allow\n'
+ 'efficient iteration through the container; for mappings, '
+ '"__iter__()"\n'
+ 'should be the same as "iterkeys()"; for sequences, it '
+ 'should iterate\n'
+ 'through the values.\n'
+ '\n'
+ 'object.__len__(self)\n'
+ '\n'
+ ' Called to implement the built-in function "len()". '
+ 'Should return\n'
+ ' the length of the object, an integer ">=" 0. Also, an '
+ 'object that\n'
+ ' doesn\'t define a "__nonzero__()" method and whose '
+ '"__len__()"\n'
+ ' method returns zero is considered to be false in a '
+ 'Boolean context.\n'
+ '\n'
+ ' **CPython implementation detail:** In CPython, the '
+ 'length is\n'
+ ' required to be at most "sys.maxsize". If the length is '
+ 'larger than\n'
+ ' "sys.maxsize" some features (such as "len()") may '
+ 'raise\n'
+ ' "OverflowError". To prevent raising "OverflowError" by '
+ 'truth value\n'
+ ' testing, an object must define a "__nonzero__()" '
+ 'method.\n'
+ '\n'
+ 'object.__getitem__(self, key)\n'
+ '\n'
+ ' Called to implement evaluation of "self[key]". For '
+ 'sequence types,\n'
+ ' the accepted keys should be integers and slice '
+ 'objects. Note that\n'
+ ' the special interpretation of negative indexes (if the '
+ 'class wishes\n'
+ ' to emulate a sequence type) is up to the '
+ '"__getitem__()" method. If\n'
+ ' *key* is of an inappropriate type, "TypeError" may be '
+ 'raised; if of\n'
+ ' a value outside the set of indexes for the sequence '
+ '(after any\n'
+ ' special interpretation of negative values), '
+ '"IndexError" should be\n'
+ ' raised. For mapping types, if *key* is missing (not in '
+ 'the\n'
+ ' container), "KeyError" should be raised.\n'
+ '\n'
+ ' Note: "for" loops expect that an "IndexError" will be '
+ 'raised for\n'
+ ' illegal indexes to allow proper detection of the end '
+ 'of the\n'
+ ' sequence.\n'
+ '\n'
+ 'object.__missing__(self, key)\n'
+ '\n'
+ ' Called by "dict"."__getitem__()" to implement '
+ '"self[key]" for dict\n'
+ ' subclasses when key is not in the dictionary.\n'
+ '\n'
+ 'object.__setitem__(self, key, value)\n'
+ '\n'
+ ' Called to implement assignment to "self[key]". Same '
+ 'note as for\n'
+ ' "__getitem__()". This should only be implemented for '
+ 'mappings if\n'
+ ' the objects support changes to the values for keys, or '
+ 'if new keys\n'
+ ' can be added, or for sequences if elements can be '
+ 'replaced. The\n'
+ ' same exceptions should be raised for improper *key* '
+ 'values as for\n'
+ ' the "__getitem__()" method.\n'
+ '\n'
+ 'object.__delitem__(self, key)\n'
+ '\n'
+ ' Called to implement deletion of "self[key]". Same note '
+ 'as for\n'
+ ' "__getitem__()". This should only be implemented for '
+ 'mappings if\n'
+ ' the objects support removal of keys, or for sequences '
+ 'if elements\n'
+ ' can be removed from the sequence. The same exceptions '
+ 'should be\n'
+ ' raised for improper *key* values as for the '
+ '"__getitem__()" method.\n'
+ '\n'
+ 'object.__iter__(self)\n'
+ '\n'
+ ' This method is called when an iterator is required for '
+ 'a container.\n'
+ ' This method should return a new iterator object that '
+ 'can iterate\n'
+ ' over all the objects in the container. For mappings, '
+ 'it should\n'
+ ' iterate over the keys of the container, and should also '
+ 'be made\n'
+ ' available as the method "iterkeys()".\n'
+ '\n'
+ ' Iterator objects also need to implement this method; '
+ 'they are\n'
+ ' required to return themselves. For more information on '
+ 'iterator\n'
+ ' objects, see Iterator Types.\n'
+ '\n'
+ 'object.__reversed__(self)\n'
+ '\n'
+ ' Called (if present) by the "reversed()" built-in to '
+ 'implement\n'
+ ' reverse iteration. It should return a new iterator '
+ 'object that\n'
+ ' iterates over all the objects in the container in '
+ 'reverse order.\n'
+ '\n'
+ ' If the "__reversed__()" method is not provided, the '
+ '"reversed()"\n'
+ ' built-in will fall back to using the sequence protocol '
+ '("__len__()"\n'
+ ' and "__getitem__()"). Objects that support the '
+ 'sequence protocol\n'
+ ' should only provide "__reversed__()" if they can '
+ 'provide an\n'
+ ' implementation that is more efficient than the one '
+ 'provided by\n'
+ ' "reversed()".\n'
+ '\n'
+ ' New in version 2.6.\n'
+ '\n'
+ 'The membership test operators ("in" and "not in") are '
+ 'normally\n'
+ 'implemented as an iteration through a sequence. However, '
+ 'container\n'
+ 'objects can supply the following special method with a '
+ 'more efficient\n'
+ 'implementation, which also does not require the object be '
+ 'a sequence.\n'
+ '\n'
+ 'object.__contains__(self, item)\n'
+ '\n'
+ ' Called to implement membership test operators. Should '
+ 'return true\n'
+ ' if *item* is in *self*, false otherwise. For mapping '
+ 'objects, this\n'
+ ' should consider the keys of the mapping rather than the '
+ 'values or\n'
+ ' the key-item pairs.\n'
+ '\n'
+ ' For objects that don\'t define "__contains__()", the '
+ 'membership test\n'
+ ' first tries iteration via "__iter__()", then the old '
+ 'sequence\n'
+ ' iteration protocol via "__getitem__()", see this '
+ 'section in the\n'
+ ' language reference.\n',
+ 'shifting': '\n'
+ 'Shifting operations\n'
+ '*******************\n'
+ '\n'
+ 'The shifting operations have lower priority than the arithmetic\n'
+ 'operations:\n'
+ '\n'
+ ' shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n'
+ '\n'
+ 'These operators accept plain or long integers as arguments. '
+ 'The\n'
+ 'arguments are converted to a common type. They shift the first\n'
+ 'argument to the left or right by the number of bits given by '
+ 'the\n'
+ 'second argument.\n'
+ '\n'
+ 'A right shift by *n* bits is defined as division by "pow(2, '
+ 'n)". A\n'
+ 'left shift by *n* bits is defined as multiplication with "pow(2, '
+ 'n)".\n'
+ 'Negative shift counts raise a "ValueError" exception.\n'
+ '\n'
+ 'Note: In the current implementation, the right-hand operand is\n'
+ ' required to be at most "sys.maxsize". If the right-hand '
+ 'operand is\n'
+ ' larger than "sys.maxsize" an "OverflowError" exception is '
+ 'raised.\n',
+ 'slicings': '\n'
+ 'Slicings\n'
+ '********\n'
+ '\n'
+ 'A slicing selects a range of items in a sequence object (e.g., '
+ 'a\n'
+ 'string, tuple or list). Slicings may be used as expressions or '
+ 'as\n'
+ 'targets in assignment or "del" statements. The syntax for a '
+ 'slicing:\n'
+ '\n'
+ ' slicing ::= simple_slicing | extended_slicing\n'
+ ' simple_slicing ::= primary "[" short_slice "]"\n'
+ ' extended_slicing ::= primary "[" slice_list "]"\n'
+ ' slice_list ::= slice_item ("," slice_item)* [","]\n'
+ ' slice_item ::= expression | proper_slice | ellipsis\n'
+ ' proper_slice ::= short_slice | long_slice\n'
+ ' short_slice ::= [lower_bound] ":" [upper_bound]\n'
+ ' long_slice ::= short_slice ":" [stride]\n'
+ ' lower_bound ::= expression\n'
+ ' upper_bound ::= expression\n'
+ ' stride ::= expression\n'
+ ' ellipsis ::= "..."\n'
+ '\n'
+ 'There is ambiguity in the formal syntax here: anything that '
+ 'looks like\n'
+ 'an expression list also looks like a slice list, so any '
+ 'subscription\n'
+ 'can be interpreted as a slicing. Rather than further '
+ 'complicating the\n'
+ 'syntax, this is disambiguated by defining that in this case the\n'
+ 'interpretation as a subscription takes priority over the\n'
+ 'interpretation as a slicing (this is the case if the slice list\n'
+ 'contains no proper slice nor ellipses). Similarly, when the '
+ 'slice\n'
+ 'list has exactly one short slice and no trailing comma, the\n'
+ 'interpretation as a simple slicing takes priority over that as '
+ 'an\n'
+ 'extended slicing.\n'
+ '\n'
+ 'The semantics for a simple slicing are as follows. The primary '
+ 'must\n'
+ 'evaluate to a sequence object. The lower and upper bound '
+ 'expressions,\n'
+ 'if present, must evaluate to plain integers; defaults are zero '
+ 'and the\n'
+ '"sys.maxint", respectively. If either bound is negative, the\n'
+ "sequence's length is added to it. The slicing now selects all "
+ 'items\n'
+ 'with index *k* such that "i <= k < j" where *i* and *j* are the\n'
+ 'specified lower and upper bounds. This may be an empty '
+ 'sequence. It\n'
+ 'is not an error if *i* or *j* lie outside the range of valid '
+ 'indexes\n'
+ "(such items don't exist so they aren't selected).\n"
+ '\n'
+ 'The semantics for an extended slicing are as follows. The '
+ 'primary\n'
+ 'must evaluate to a mapping object, and it is indexed with a key '
+ 'that\n'
+ 'is constructed from the slice list, as follows. If the slice '
+ 'list\n'
+ 'contains at least one comma, the key is a tuple containing the\n'
+ 'conversion of the slice items; otherwise, the conversion of the '
+ 'lone\n'
+ 'slice item is the key. The conversion of a slice item that is '
+ 'an\n'
+ 'expression is that expression. The conversion of an ellipsis '
+ 'slice\n'
+ 'item is the built-in "Ellipsis" object. The conversion of a '
+ 'proper\n'
+ 'slice is a slice object (see section The standard type '
+ 'hierarchy)\n'
+ 'whose "start", "stop" and "step" attributes are the values of '
+ 'the\n'
+ 'expressions given as lower bound, upper bound and stride,\n'
+ 'respectively, substituting "None" for missing expressions.\n',
+ 'specialattrs': '\n'
+ 'Special Attributes\n'
+ '******************\n'
+ '\n'
+ 'The implementation adds a few special read-only attributes '
+ 'to several\n'
+ 'object types, where they are relevant. Some of these are '
+ 'not reported\n'
+ 'by the "dir()" built-in function.\n'
+ '\n'
+ 'object.__dict__\n'
+ '\n'
+ ' A dictionary or other mapping object used to store an '
+ "object's\n"
+ ' (writable) attributes.\n'
+ '\n'
+ 'object.__methods__\n'
+ '\n'
+ ' Deprecated since version 2.2: Use the built-in function '
+ '"dir()" to\n'
+ " get a list of an object's attributes. This attribute is "
+ 'no longer\n'
+ ' available.\n'
+ '\n'
+ 'object.__members__\n'
+ '\n'
+ ' Deprecated since version 2.2: Use the built-in function '
+ '"dir()" to\n'
+ " get a list of an object's attributes. This attribute is "
+ 'no longer\n'
+ ' available.\n'
+ '\n'
+ 'instance.__class__\n'
+ '\n'
+ ' The class to which a class instance belongs.\n'
+ '\n'
+ 'class.__bases__\n'
+ '\n'
+ ' The tuple of base classes of a class object.\n'
+ '\n'
+ 'definition.__name__\n'
+ '\n'
+ ' The name of the class, type, function, method, '
+ 'descriptor, or\n'
+ ' generator instance.\n'
+ '\n'
+ 'The following attributes are only supported by *new-style '
+ 'class*es.\n'
+ '\n'
+ 'class.__mro__\n'
+ '\n'
+ ' This attribute is a tuple of classes that are considered '
+ 'when\n'
+ ' looking for base classes during method resolution.\n'
+ '\n'
+ 'class.mro()\n'
+ '\n'
+ ' This method can be overridden by a metaclass to customize '
+ 'the\n'
+ ' method resolution order for its instances. It is called '
+ 'at class\n'
+ ' instantiation, and its result is stored in "__mro__".\n'
+ '\n'
+ 'class.__subclasses__()\n'
+ '\n'
+ ' Each new-style class keeps a list of weak references to '
+ 'its\n'
+ ' immediate subclasses. This method returns a list of all '
+ 'those\n'
+ ' references still alive. Example:\n'
+ '\n'
+ ' >>> int.__subclasses__()\n'
+ " [<type 'bool'>]\n"
+ '\n'
+ '-[ Footnotes ]-\n'
+ '\n'
+ '[1] Additional information on these special methods may be '
+ 'found\n'
+ ' in the Python Reference Manual (Basic customization).\n'
+ '\n'
+ '[2] As a consequence, the list "[1, 2]" is considered equal '
+ 'to\n'
+ ' "[1.0, 2.0]", and similarly for tuples.\n'
+ '\n'
+ "[3] They must have since the parser can't tell the type of "
+ 'the\n'
+ ' operands.\n'
+ '\n'
+ '[4] Cased characters are those with general category '
+ 'property\n'
+ ' being one of "Lu" (Letter, uppercase), "Ll" (Letter, '
+ 'lowercase),\n'
+ ' or "Lt" (Letter, titlecase).\n'
+ '\n'
+ '[5] To format only a tuple you should therefore provide a\n'
+ ' singleton tuple whose only element is the tuple to be '
+ 'formatted.\n'
+ '\n'
+ '[6] The advantage of leaving the newline on is that '
+ 'returning an\n'
+ ' empty string is then an unambiguous EOF indication. It '
+ 'is also\n'
+ ' possible (in cases where it might matter, for example, '
+ 'if you want\n'
+ ' to make an exact copy of a file while scanning its '
+ 'lines) to tell\n'
+ ' whether the last line of a file ended in a newline or '
+ 'not (yes\n'
+ ' this happens!).\n',
+ 'specialnames': '\n'
+ 'Special method names\n'
+ '********************\n'
+ '\n'
+ 'A class can implement certain operations that are invoked by '
+ 'special\n'
+ 'syntax (such as arithmetic operations or subscripting and '
+ 'slicing) by\n'
+ "defining methods with special names. This is Python's "
+ 'approach to\n'
+ '*operator overloading*, allowing classes to define their own '
+ 'behavior\n'
+ 'with respect to language operators. For instance, if a '
+ 'class defines\n'
+ 'a method named "__getitem__()", and "x" is an instance of '
+ 'this class,\n'
+ 'then "x[i]" is roughly equivalent to "x.__getitem__(i)" for '
+ 'old-style\n'
+ 'classes and "type(x).__getitem__(x, i)" for new-style '
+ 'classes. Except\n'
+ 'where mentioned, attempts to execute an operation raise an '
+ 'exception\n'
+ 'when no appropriate method is defined (typically '
+ '"AttributeError" or\n'
+ '"TypeError").\n'
+ '\n'
+ 'When implementing a class that emulates any built-in type, '
+ 'it is\n'
+ 'important that the emulation only be implemented to the '
+ 'degree that it\n'
+ 'makes sense for the object being modelled. For example, '
+ 'some\n'
+ 'sequences may work well with retrieval of individual '
+ 'elements, but\n'
+ 'extracting a slice may not make sense. (One example of this '
+ 'is the\n'
+ '"NodeList" interface in the W3C\'s Document Object Model.)\n'
+ '\n'
+ '\n'
+ 'Basic customization\n'
+ '===================\n'
+ '\n'
+ 'object.__new__(cls[, ...])\n'
+ '\n'
+ ' Called to create a new instance of class *cls*. '
+ '"__new__()" is a\n'
+ ' static method (special-cased so you need not declare it '
+ 'as such)\n'
+ ' that takes the class of which an instance was requested '
+ 'as its\n'
+ ' first argument. The remaining arguments are those passed '
+ 'to the\n'
+ ' object constructor expression (the call to the class). '
+ 'The return\n'
+ ' value of "__new__()" should be the new object instance '
+ '(usually an\n'
+ ' instance of *cls*).\n'
+ '\n'
+ ' Typical implementations create a new instance of the '
+ 'class by\n'
+ ' invoking the superclass\'s "__new__()" method using\n'
+ ' "super(currentclass, cls).__new__(cls[, ...])" with '
+ 'appropriate\n'
+ ' arguments and then modifying the newly-created instance '
+ 'as\n'
+ ' necessary before returning it.\n'
+ '\n'
+ ' If "__new__()" returns an instance of *cls*, then the '
+ 'new\n'
+ ' instance\'s "__init__()" method will be invoked like\n'
+ ' "__init__(self[, ...])", where *self* is the new instance '
+ 'and the\n'
+ ' remaining arguments are the same as were passed to '
+ '"__new__()".\n'
+ '\n'
+ ' If "__new__()" does not return an instance of *cls*, then '
+ 'the new\n'
+ ' instance\'s "__init__()" method will not be invoked.\n'
+ '\n'
+ ' "__new__()" is intended mainly to allow subclasses of '
+ 'immutable\n'
+ ' types (like int, str, or tuple) to customize instance '
+ 'creation. It\n'
+ ' is also commonly overridden in custom metaclasses in '
+ 'order to\n'
+ ' customize class creation.\n'
+ '\n'
+ 'object.__init__(self[, ...])\n'
+ '\n'
+ ' Called after the instance has been created (by '
+ '"__new__()"), but\n'
+ ' before it is returned to the caller. The arguments are '
+ 'those\n'
+ ' passed to the class constructor expression. If a base '
+ 'class has an\n'
+ ' "__init__()" method, the derived class\'s "__init__()" '
+ 'method, if\n'
+ ' any, must explicitly call it to ensure proper '
+ 'initialization of the\n'
+ ' base class part of the instance; for example:\n'
+ ' "BaseClass.__init__(self, [args...])".\n'
+ '\n'
+ ' Because "__new__()" and "__init__()" work together in '
+ 'constructing\n'
+ ' objects ("__new__()" to create it, and "__init__()" to '
+ 'customise\n'
+ ' it), no non-"None" value may be returned by "__init__()"; '
+ 'doing so\n'
+ ' will cause a "TypeError" to be raised at runtime.\n'
+ '\n'
+ 'object.__del__(self)\n'
+ '\n'
+ ' Called when the instance is about to be destroyed. This '
+ 'is also\n'
+ ' called a destructor. If a base class has a "__del__()" '
+ 'method, the\n'
+ ' derived class\'s "__del__()" method, if any, must '
+ 'explicitly call it\n'
+ ' to ensure proper deletion of the base class part of the '
+ 'instance.\n'
+ ' Note that it is possible (though not recommended!) for '
+ 'the\n'
+ ' "__del__()" method to postpone destruction of the '
+ 'instance by\n'
+ ' creating a new reference to it. It may then be called at '
+ 'a later\n'
+ ' time when this new reference is deleted. It is not '
+ 'guaranteed that\n'
+ ' "__del__()" methods are called for objects that still '
+ 'exist when\n'
+ ' the interpreter exits.\n'
+ '\n'
+ ' Note: "del x" doesn\'t directly call "x.__del__()" --- '
+ 'the former\n'
+ ' decrements the reference count for "x" by one, and the '
+ 'latter is\n'
+ ' only called when "x"\'s reference count reaches zero. '
+ 'Some common\n'
+ ' situations that may prevent the reference count of an '
+ 'object from\n'
+ ' going to zero include: circular references between '
+ 'objects (e.g.,\n'
+ ' a doubly-linked list or a tree data structure with '
+ 'parent and\n'
+ ' child pointers); a reference to the object on the stack '
+ 'frame of\n'
+ ' a function that caught an exception (the traceback '
+ 'stored in\n'
+ ' "sys.exc_traceback" keeps the stack frame alive); or a '
+ 'reference\n'
+ ' to the object on the stack frame that raised an '
+ 'unhandled\n'
+ ' exception in interactive mode (the traceback stored in\n'
+ ' "sys.last_traceback" keeps the stack frame alive). The '
+ 'first\n'
+ ' situation can only be remedied by explicitly breaking '
+ 'the cycles;\n'
+ ' the latter two situations can be resolved by storing '
+ '"None" in\n'
+ ' "sys.exc_traceback" or "sys.last_traceback". Circular '
+ 'references\n'
+ ' which are garbage are detected when the option cycle '
+ 'detector is\n'
+ " enabled (it's on by default), but can only be cleaned "
+ 'up if there\n'
+ ' are no Python-level "__del__()" methods involved. Refer '
+ 'to the\n'
+ ' documentation for the "gc" module for more information '
+ 'about how\n'
+ ' "__del__()" methods are handled by the cycle detector,\n'
+ ' particularly the description of the "garbage" value.\n'
+ '\n'
+ ' Warning: Due to the precarious circumstances under which\n'
+ ' "__del__()" methods are invoked, exceptions that occur '
+ 'during\n'
+ ' their execution are ignored, and a warning is printed '
+ 'to\n'
+ ' "sys.stderr" instead. Also, when "__del__()" is invoked '
+ 'in\n'
+ ' response to a module being deleted (e.g., when '
+ 'execution of the\n'
+ ' program is done), other globals referenced by the '
+ '"__del__()"\n'
+ ' method may already have been deleted or in the process '
+ 'of being\n'
+ ' torn down (e.g. the import machinery shutting down). '
+ 'For this\n'
+ ' reason, "__del__()" methods should do the absolute '
+ 'minimum needed\n'
+ ' to maintain external invariants. Starting with version '
+ '1.5,\n'
+ ' Python guarantees that globals whose name begins with a '
+ 'single\n'
+ ' underscore are deleted from their module before other '
+ 'globals are\n'
+ ' deleted; if no other references to such globals exist, '
+ 'this may\n'
+ ' help in assuring that imported modules are still '
+ 'available at the\n'
+ ' time when the "__del__()" method is called.\n'
+ '\n'
+ ' See also the "-R" command-line option.\n'
+ '\n'
+ 'object.__repr__(self)\n'
+ '\n'
+ ' Called by the "repr()" built-in function and by string '
+ 'conversions\n'
+ ' (reverse quotes) to compute the "official" string '
+ 'representation of\n'
+ ' an object. If at all possible, this should look like a '
+ 'valid\n'
+ ' Python expression that could be used to recreate an '
+ 'object with the\n'
+ ' same value (given an appropriate environment). If this '
+ 'is not\n'
+ ' possible, a string of the form "<...some useful '
+ 'description...>"\n'
+ ' should be returned. The return value must be a string '
+ 'object. If a\n'
+ ' class defines "__repr__()" but not "__str__()", then '
+ '"__repr__()"\n'
+ ' is also used when an "informal" string representation of '
+ 'instances\n'
+ ' of that class is required.\n'
+ '\n'
+ ' This is typically used for debugging, so it is important '
+ 'that the\n'
+ ' representation is information-rich and unambiguous.\n'
+ '\n'
+ 'object.__str__(self)\n'
+ '\n'
+ ' Called by the "str()" built-in function and by the '
+ '"print"\n'
+ ' statement to compute the "informal" string representation '
+ 'of an\n'
+ ' object. This differs from "__repr__()" in that it does '
+ 'not have to\n'
+ ' be a valid Python expression: a more convenient or '
+ 'concise\n'
+ ' representation may be used instead. The return value must '
+ 'be a\n'
+ ' string object.\n'
+ '\n'
+ 'object.__lt__(self, other)\n'
+ 'object.__le__(self, other)\n'
+ 'object.__eq__(self, other)\n'
+ 'object.__ne__(self, other)\n'
+ 'object.__gt__(self, other)\n'
+ 'object.__ge__(self, other)\n'
+ '\n'
+ ' New in version 2.1.\n'
+ '\n'
+ ' These are the so-called "rich comparison" methods, and '
+ 'are called\n'
+ ' for comparison operators in preference to "__cmp__()" '
+ 'below. The\n'
+ ' correspondence between operator symbols and method names '
+ 'is as\n'
+ ' follows: "x<y" calls "x.__lt__(y)", "x<=y" calls '
+ '"x.__le__(y)",\n'
+ ' "x==y" calls "x.__eq__(y)", "x!=y" and "x<>y" call '
+ '"x.__ne__(y)",\n'
+ ' "x>y" calls "x.__gt__(y)", and "x>=y" calls '
+ '"x.__ge__(y)".\n'
+ '\n'
+ ' A rich comparison method may return the singleton '
+ '"NotImplemented"\n'
+ ' if it does not implement the operation for a given pair '
+ 'of\n'
+ ' arguments. By convention, "False" and "True" are returned '
+ 'for a\n'
+ ' successful comparison. However, these methods can return '
+ 'any value,\n'
+ ' so if the comparison operator is used in a Boolean '
+ 'context (e.g.,\n'
+ ' in the condition of an "if" statement), Python will call '
+ '"bool()"\n'
+ ' on the value to determine if the result is true or '
+ 'false.\n'
+ '\n'
+ ' There are no implied relationships among the comparison '
+ 'operators.\n'
+ ' The truth of "x==y" does not imply that "x!=y" is false.\n'
+ ' Accordingly, when defining "__eq__()", one should also '
+ 'define\n'
+ ' "__ne__()" so that the operators will behave as '
+ 'expected. See the\n'
+ ' paragraph on "__hash__()" for some important notes on '
+ 'creating\n'
+ ' *hashable* objects which support custom comparison '
+ 'operations and\n'
+ ' are usable as dictionary keys.\n'
+ '\n'
+ ' There are no swapped-argument versions of these methods '
+ '(to be used\n'
+ ' when the left argument does not support the operation but '
+ 'the right\n'
+ ' argument does); rather, "__lt__()" and "__gt__()" are '
+ "each other's\n"
+ ' reflection, "__le__()" and "__ge__()" are each other\'s '
+ 'reflection,\n'
+ ' and "__eq__()" and "__ne__()" are their own reflection.\n'
+ '\n'
+ ' Arguments to rich comparison methods are never coerced.\n'
+ '\n'
+ ' To automatically generate ordering operations from a '
+ 'single root\n'
+ ' operation, see "functools.total_ordering()".\n'
+ '\n'
+ 'object.__cmp__(self, other)\n'
+ '\n'
+ ' Called by comparison operations if rich comparison (see '
+ 'above) is\n'
+ ' not defined. Should return a negative integer if "self < '
+ 'other",\n'
+ ' zero if "self == other", a positive integer if "self > '
+ 'other". If\n'
+ ' no "__cmp__()", "__eq__()" or "__ne__()" operation is '
+ 'defined,\n'
+ ' class instances are compared by object identity '
+ '("address"). See\n'
+ ' also the description of "__hash__()" for some important '
+ 'notes on\n'
+ ' creating *hashable* objects which support custom '
+ 'comparison\n'
+ ' operations and are usable as dictionary keys. (Note: the\n'
+ ' restriction that exceptions are not propagated by '
+ '"__cmp__()" has\n'
+ ' been removed since Python 1.5.)\n'
+ '\n'
+ 'object.__rcmp__(self, other)\n'
+ '\n'
+ ' Changed in version 2.1: No longer supported.\n'
+ '\n'
+ 'object.__hash__(self)\n'
+ '\n'
+ ' Called by built-in function "hash()" and for operations '
+ 'on members\n'
+ ' of hashed collections including "set", "frozenset", and '
+ '"dict".\n'
+ ' "__hash__()" should return an integer. The only required '
+ 'property\n'
+ ' is that objects which compare equal have the same hash '
+ 'value; it is\n'
+ ' advised to mix together the hash values of the components '
+ 'of the\n'
+ ' object that also play a part in comparison of objects by '
+ 'packing\n'
+ ' them into a tuple and hashing the tuple. Example:\n'
+ '\n'
+ ' def __hash__(self):\n'
+ ' return hash((self.name, self.nick, self.color))\n'
+ '\n'
+ ' If a class does not define a "__cmp__()" or "__eq__()" '
+ 'method it\n'
+ ' should not define a "__hash__()" operation either; if it '
+ 'defines\n'
+ ' "__cmp__()" or "__eq__()" but not "__hash__()", its '
+ 'instances will\n'
+ ' not be usable in hashed collections. If a class defines '
+ 'mutable\n'
+ ' objects and implements a "__cmp__()" or "__eq__()" '
+ 'method, it\n'
+ ' should not implement "__hash__()", since hashable '
+ 'collection\n'
+ " implementations require that an object's hash value is "
+ 'immutable\n'
+ " (if the object's hash value changes, it will be in the "
+ 'wrong hash\n'
+ ' bucket).\n'
+ '\n'
+ ' User-defined classes have "__cmp__()" and "__hash__()" '
+ 'methods by\n'
+ ' default; with them, all objects compare unequal (except '
+ 'with\n'
+ ' themselves) and "x.__hash__()" returns a result derived '
+ 'from\n'
+ ' "id(x)".\n'
+ '\n'
+ ' Classes which inherit a "__hash__()" method from a parent '
+ 'class but\n'
+ ' change the meaning of "__cmp__()" or "__eq__()" such that '
+ 'the hash\n'
+ ' value returned is no longer appropriate (e.g. by '
+ 'switching to a\n'
+ ' value-based concept of equality instead of the default '
+ 'identity\n'
+ ' based equality) can explicitly flag themselves as being '
+ 'unhashable\n'
+ ' by setting "__hash__ = None" in the class definition. '
+ 'Doing so\n'
+ ' means that not only will instances of the class raise an\n'
+ ' appropriate "TypeError" when a program attempts to '
+ 'retrieve their\n'
+ ' hash value, but they will also be correctly identified '
+ 'as\n'
+ ' unhashable when checking "isinstance(obj, '
+ 'collections.Hashable)"\n'
+ ' (unlike classes which define their own "__hash__()" to '
+ 'explicitly\n'
+ ' raise "TypeError").\n'
+ '\n'
+ ' Changed in version 2.5: "__hash__()" may now also return '
+ 'a long\n'
+ ' integer object; the 32-bit integer is then derived from '
+ 'the hash of\n'
+ ' that object.\n'
+ '\n'
+ ' Changed in version 2.6: "__hash__" may now be set to '
+ '"None" to\n'
+ ' explicitly flag instances of a class as unhashable.\n'
+ '\n'
+ 'object.__nonzero__(self)\n'
+ '\n'
+ ' Called to implement truth value testing and the built-in '
+ 'operation\n'
+ ' "bool()"; should return "False" or "True", or their '
+ 'integer\n'
+ ' equivalents "0" or "1". When this method is not '
+ 'defined,\n'
+ ' "__len__()" is called, if it is defined, and the object '
+ 'is\n'
+ ' considered true if its result is nonzero. If a class '
+ 'defines\n'
+ ' neither "__len__()" nor "__nonzero__()", all its '
+ 'instances are\n'
+ ' considered true.\n'
+ '\n'
+ 'object.__unicode__(self)\n'
+ '\n'
+ ' Called to implement "unicode()" built-in; should return a '
+ 'Unicode\n'
+ ' object. When this method is not defined, string '
+ 'conversion is\n'
+ ' attempted, and the result of string conversion is '
+ 'converted to\n'
+ ' Unicode using the system default encoding.\n'
+ '\n'
+ '\n'
+ 'Customizing attribute access\n'
+ '============================\n'
+ '\n'
+ 'The following methods can be defined to customize the '
+ 'meaning of\n'
+ 'attribute access (use of, assignment to, or deletion of '
+ '"x.name") for\n'
+ 'class instances.\n'
+ '\n'
+ 'object.__getattr__(self, name)\n'
+ '\n'
+ ' Called when an attribute lookup has not found the '
+ 'attribute in the\n'
+ ' usual places (i.e. it is not an instance attribute nor is '
+ 'it found\n'
+ ' in the class tree for "self"). "name" is the attribute '
+ 'name. This\n'
+ ' method should return the (computed) attribute value or '
+ 'raise an\n'
+ ' "AttributeError" exception.\n'
+ '\n'
+ ' Note that if the attribute is found through the normal '
+ 'mechanism,\n'
+ ' "__getattr__()" is not called. (This is an intentional '
+ 'asymmetry\n'
+ ' between "__getattr__()" and "__setattr__()".) This is '
+ 'done both for\n'
+ ' efficiency reasons and because otherwise "__getattr__()" '
+ 'would have\n'
+ ' no way to access other attributes of the instance. Note '
+ 'that at\n'
+ ' least for instance variables, you can fake total control '
+ 'by not\n'
+ ' inserting any values in the instance attribute dictionary '
+ '(but\n'
+ ' instead inserting them in another object). See the\n'
+ ' "__getattribute__()" method below for a way to actually '
+ 'get total\n'
+ ' control in new-style classes.\n'
+ '\n'
+ 'object.__setattr__(self, name, value)\n'
+ '\n'
+ ' Called when an attribute assignment is attempted. This '
+ 'is called\n'
+ ' instead of the normal mechanism (i.e. store the value in '
+ 'the\n'
+ ' instance dictionary). *name* is the attribute name, '
+ '*value* is the\n'
+ ' value to be assigned to it.\n'
+ '\n'
+ ' If "__setattr__()" wants to assign to an instance '
+ 'attribute, it\n'
+ ' should not simply execute "self.name = value" --- this '
+ 'would cause\n'
+ ' a recursive call to itself. Instead, it should insert '
+ 'the value in\n'
+ ' the dictionary of instance attributes, e.g., '
+ '"self.__dict__[name] =\n'
+ ' value". For new-style classes, rather than accessing the '
+ 'instance\n'
+ ' dictionary, it should call the base class method with the '
+ 'same\n'
+ ' name, for example, "object.__setattr__(self, name, '
+ 'value)".\n'
+ '\n'
+ 'object.__delattr__(self, name)\n'
+ '\n'
+ ' Like "__setattr__()" but for attribute deletion instead '
+ 'of\n'
+ ' assignment. This should only be implemented if "del '
+ 'obj.name" is\n'
+ ' meaningful for the object.\n'
+ '\n'
+ '\n'
+ 'More attribute access for new-style classes\n'
+ '-------------------------------------------\n'
+ '\n'
+ 'The following methods only apply to new-style classes.\n'
+ '\n'
+ 'object.__getattribute__(self, name)\n'
+ '\n'
+ ' Called unconditionally to implement attribute accesses '
+ 'for\n'
+ ' instances of the class. If the class also defines '
+ '"__getattr__()",\n'
+ ' the latter will not be called unless "__getattribute__()" '
+ 'either\n'
+ ' calls it explicitly or raises an "AttributeError". This '
+ 'method\n'
+ ' should return the (computed) attribute value or raise an\n'
+ ' "AttributeError" exception. In order to avoid infinite '
+ 'recursion in\n'
+ ' this method, its implementation should always call the '
+ 'base class\n'
+ ' method with the same name to access any attributes it '
+ 'needs, for\n'
+ ' example, "object.__getattribute__(self, name)".\n'
+ '\n'
+ ' Note: This method may still be bypassed when looking up '
+ 'special\n'
+ ' methods as the result of implicit invocation via '
+ 'language syntax\n'
+ ' or built-in functions. See Special method lookup for '
+ 'new-style\n'
+ ' classes.\n'
+ '\n'
+ '\n'
+ 'Implementing Descriptors\n'
+ '------------------------\n'
+ '\n'
+ 'The following methods only apply when an instance of the '
+ 'class\n'
+ 'containing the method (a so-called *descriptor* class) '
+ 'appears in an\n'
+ "*owner* class (the descriptor must be in either the owner's "
+ 'class\n'
+ 'dictionary or in the class dictionary for one of its '
+ 'parents). In the\n'
+ 'examples below, "the attribute" refers to the attribute '
+ 'whose name is\n'
+ 'the key of the property in the owner class\' "__dict__".\n'
+ '\n'
+ 'object.__get__(self, instance, owner)\n'
+ '\n'
+ ' Called to get the attribute of the owner class (class '
+ 'attribute\n'
+ ' access) or of an instance of that class (instance '
+ 'attribute\n'
+ ' access). *owner* is always the owner class, while '
+ '*instance* is the\n'
+ ' instance that the attribute was accessed through, or '
+ '"None" when\n'
+ ' the attribute is accessed through the *owner*. This '
+ 'method should\n'
+ ' return the (computed) attribute value or raise an '
+ '"AttributeError"\n'
+ ' exception.\n'
+ '\n'
+ 'object.__set__(self, instance, value)\n'
+ '\n'
+ ' Called to set the attribute on an instance *instance* of '
+ 'the owner\n'
+ ' class to a new value, *value*.\n'
+ '\n'
+ 'object.__delete__(self, instance)\n'
+ '\n'
+ ' Called to delete the attribute on an instance *instance* '
+ 'of the\n'
+ ' owner class.\n'
+ '\n'
+ '\n'
+ 'Invoking Descriptors\n'
+ '--------------------\n'
+ '\n'
+ 'In general, a descriptor is an object attribute with '
+ '"binding\n'
+ 'behavior", one whose attribute access has been overridden by '
+ 'methods\n'
+ 'in the descriptor protocol: "__get__()", "__set__()", and\n'
+ '"__delete__()". If any of those methods are defined for an '
+ 'object, it\n'
+ 'is said to be a descriptor.\n'
+ '\n'
+ 'The default behavior for attribute access is to get, set, or '
+ 'delete\n'
+ "the attribute from an object's dictionary. For instance, "
+ '"a.x" has a\n'
+ 'lookup chain starting with "a.__dict__[\'x\']", then\n'
+ '"type(a).__dict__[\'x\']", and continuing through the base '
+ 'classes of\n'
+ '"type(a)" excluding metaclasses.\n'
+ '\n'
+ 'However, if the looked-up value is an object defining one of '
+ 'the\n'
+ 'descriptor methods, then Python may override the default '
+ 'behavior and\n'
+ 'invoke the descriptor method instead. Where this occurs in '
+ 'the\n'
+ 'precedence chain depends on which descriptor methods were '
+ 'defined and\n'
+ 'how they were called. Note that descriptors are only '
+ 'invoked for new\n'
+ 'style objects or classes (ones that subclass "object()" or '
+ '"type()").\n'
+ '\n'
+ 'The starting point for descriptor invocation is a binding, '
+ '"a.x". How\n'
+ 'the arguments are assembled depends on "a":\n'
+ '\n'
+ 'Direct Call\n'
+ ' The simplest and least common call is when user code '
+ 'directly\n'
+ ' invokes a descriptor method: "x.__get__(a)".\n'
+ '\n'
+ 'Instance Binding\n'
+ ' If binding to a new-style object instance, "a.x" is '
+ 'transformed\n'
+ ' into the call: "type(a).__dict__[\'x\'].__get__(a, '
+ 'type(a))".\n'
+ '\n'
+ 'Class Binding\n'
+ ' If binding to a new-style class, "A.x" is transformed '
+ 'into the\n'
+ ' call: "A.__dict__[\'x\'].__get__(None, A)".\n'
+ '\n'
+ 'Super Binding\n'
+ ' If "a" is an instance of "super", then the binding '
+ '"super(B,\n'
+ ' obj).m()" searches "obj.__class__.__mro__" for the base '
+ 'class "A"\n'
+ ' immediately preceding "B" and then invokes the descriptor '
+ 'with the\n'
+ ' call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n'
+ '\n'
+ 'For instance bindings, the precedence of descriptor '
+ 'invocation depends\n'
+ 'on the which descriptor methods are defined. A descriptor '
+ 'can define\n'
+ 'any combination of "__get__()", "__set__()" and '
+ '"__delete__()". If it\n'
+ 'does not define "__get__()", then accessing the attribute '
+ 'will return\n'
+ 'the descriptor object itself unless there is a value in the '
+ "object's\n"
+ 'instance dictionary. If the descriptor defines "__set__()" '
+ 'and/or\n'
+ '"__delete__()", it is a data descriptor; if it defines '
+ 'neither, it is\n'
+ 'a non-data descriptor. Normally, data descriptors define '
+ 'both\n'
+ '"__get__()" and "__set__()", while non-data descriptors have '
+ 'just the\n'
+ '"__get__()" method. Data descriptors with "__set__()" and '
+ '"__get__()"\n'
+ 'defined always override a redefinition in an instance '
+ 'dictionary. In\n'
+ 'contrast, non-data descriptors can be overridden by '
+ 'instances.\n'
+ '\n'
+ 'Python methods (including "staticmethod()" and '
+ '"classmethod()") are\n'
+ 'implemented as non-data descriptors. Accordingly, instances '
+ 'can\n'
+ 'redefine and override methods. This allows individual '
+ 'instances to\n'
+ 'acquire behaviors that differ from other instances of the '
+ 'same class.\n'
+ '\n'
+ 'The "property()" function is implemented as a data '
+ 'descriptor.\n'
+ 'Accordingly, instances cannot override the behavior of a '
+ 'property.\n'
+ '\n'
+ '\n'
+ '__slots__\n'
+ '---------\n'
+ '\n'
+ 'By default, instances of both old and new-style classes have '
+ 'a\n'
+ 'dictionary for attribute storage. This wastes space for '
+ 'objects\n'
+ 'having very few instance variables. The space consumption '
+ 'can become\n'
+ 'acute when creating large numbers of instances.\n'
+ '\n'
+ 'The default can be overridden by defining *__slots__* in a '
+ 'new-style\n'
+ 'class definition. The *__slots__* declaration takes a '
+ 'sequence of\n'
+ 'instance variables and reserves just enough space in each '
+ 'instance to\n'
+ 'hold a value for each variable. Space is saved because '
+ '*__dict__* is\n'
+ 'not created for each instance.\n'
+ '\n'
+ '__slots__\n'
+ '\n'
+ ' This class variable can be assigned a string, iterable, '
+ 'or sequence\n'
+ ' of strings with variable names used by instances. If '
+ 'defined in a\n'
+ ' new-style class, *__slots__* reserves space for the '
+ 'declared\n'
+ ' variables and prevents the automatic creation of '
+ '*__dict__* and\n'
+ ' *__weakref__* for each instance.\n'
+ '\n'
+ ' New in version 2.2.\n'
+ '\n'
+ 'Notes on using *__slots__*\n'
+ '\n'
+ '* When inheriting from a class without *__slots__*, the '
+ '*__dict__*\n'
+ ' attribute of that class will always be accessible, so a '
+ '*__slots__*\n'
+ ' definition in the subclass is meaningless.\n'
+ '\n'
+ '* Without a *__dict__* variable, instances cannot be '
+ 'assigned new\n'
+ ' variables not listed in the *__slots__* definition. '
+ 'Attempts to\n'
+ ' assign to an unlisted variable name raises '
+ '"AttributeError". If\n'
+ ' dynamic assignment of new variables is desired, then add\n'
+ ' "\'__dict__\'" to the sequence of strings in the '
+ '*__slots__*\n'
+ ' declaration.\n'
+ '\n'
+ ' Changed in version 2.3: Previously, adding "\'__dict__\'" '
+ 'to the\n'
+ ' *__slots__* declaration would not enable the assignment of '
+ 'new\n'
+ ' attributes not specifically listed in the sequence of '
+ 'instance\n'
+ ' variable names.\n'
+ '\n'
+ '* Without a *__weakref__* variable for each instance, '
+ 'classes\n'
+ ' defining *__slots__* do not support weak references to '
+ 'its\n'
+ ' instances. If weak reference support is needed, then add\n'
+ ' "\'__weakref__\'" to the sequence of strings in the '
+ '*__slots__*\n'
+ ' declaration.\n'
+ '\n'
+ ' Changed in version 2.3: Previously, adding '
+ '"\'__weakref__\'" to the\n'
+ ' *__slots__* declaration would not enable support for weak\n'
+ ' references.\n'
+ '\n'
+ '* *__slots__* are implemented at the class level by '
+ 'creating\n'
+ ' descriptors (Implementing Descriptors) for each variable '
+ 'name. As a\n'
+ ' result, class attributes cannot be used to set default '
+ 'values for\n'
+ ' instance variables defined by *__slots__*; otherwise, the '
+ 'class\n'
+ ' attribute would overwrite the descriptor assignment.\n'
+ '\n'
+ '* The action of a *__slots__* declaration is limited to the '
+ 'class\n'
+ ' where it is defined. As a result, subclasses will have a '
+ '*__dict__*\n'
+ ' unless they also define *__slots__* (which must only '
+ 'contain names\n'
+ ' of any *additional* slots).\n'
+ '\n'
+ '* If a class defines a slot also defined in a base class, '
+ 'the\n'
+ ' instance variable defined by the base class slot is '
+ 'inaccessible\n'
+ ' (except by retrieving its descriptor directly from the '
+ 'base class).\n'
+ ' This renders the meaning of the program undefined. In the '
+ 'future, a\n'
+ ' check may be added to prevent this.\n'
+ '\n'
+ '* Nonempty *__slots__* does not work for classes derived '
+ 'from\n'
+ ' "variable-length" built-in types such as "long", "str" and '
+ '"tuple".\n'
+ '\n'
+ '* Any non-string iterable may be assigned to *__slots__*. '
+ 'Mappings\n'
+ ' may also be used; however, in the future, special meaning '
+ 'may be\n'
+ ' assigned to the values corresponding to each key.\n'
+ '\n'
+ '* *__class__* assignment works only if both classes have the '
+ 'same\n'
+ ' *__slots__*.\n'
+ '\n'
+ ' Changed in version 2.6: Previously, *__class__* assignment '
+ 'raised an\n'
+ ' error if either new or old class had *__slots__*.\n'
+ '\n'
+ '\n'
+ 'Customizing class creation\n'
+ '==========================\n'
+ '\n'
+ 'By default, new-style classes are constructed using '
+ '"type()". A class\n'
+ 'definition is read into a separate namespace and the value '
+ 'of class\n'
+ 'name is bound to the result of "type(name, bases, dict)".\n'
+ '\n'
+ 'When the class definition is read, if *__metaclass__* is '
+ 'defined then\n'
+ 'the callable assigned to it will be called instead of '
+ '"type()". This\n'
+ 'allows classes or functions to be written which monitor or '
+ 'alter the\n'
+ 'class creation process:\n'
+ '\n'
+ '* Modifying the class dictionary prior to the class being '
+ 'created.\n'
+ '\n'
+ '* Returning an instance of another class -- essentially '
+ 'performing\n'
+ ' the role of a factory function.\n'
+ '\n'
+ "These steps will have to be performed in the metaclass's "
+ '"__new__()"\n'
+ 'method -- "type.__new__()" can then be called from this '
+ 'method to\n'
+ 'create a class with different properties. This example adds '
+ 'a new\n'
+ 'element to the class dictionary before creating the class:\n'
+ '\n'
+ ' class metacls(type):\n'
+ ' def __new__(mcs, name, bases, dict):\n'
+ " dict['foo'] = 'metacls was here'\n"
+ ' return type.__new__(mcs, name, bases, dict)\n'
+ '\n'
+ 'You can of course also override other class methods (or add '
+ 'new\n'
+ 'methods); for example defining a custom "__call__()" method '
+ 'in the\n'
+ 'metaclass allows custom behavior when the class is called, '
+ 'e.g. not\n'
+ 'always creating a new instance.\n'
+ '\n'
+ '__metaclass__\n'
+ '\n'
+ ' This variable can be any callable accepting arguments for '
+ '"name",\n'
+ ' "bases", and "dict". Upon class creation, the callable '
+ 'is used\n'
+ ' instead of the built-in "type()".\n'
+ '\n'
+ ' New in version 2.2.\n'
+ '\n'
+ 'The appropriate metaclass is determined by the following '
+ 'precedence\n'
+ 'rules:\n'
+ '\n'
+ '* If "dict[\'__metaclass__\']" exists, it is used.\n'
+ '\n'
+ '* Otherwise, if there is at least one base class, its '
+ 'metaclass is\n'
+ ' used (this looks for a *__class__* attribute first and if '
+ 'not found,\n'
+ ' uses its type).\n'
+ '\n'
+ '* Otherwise, if a global variable named __metaclass__ '
+ 'exists, it is\n'
+ ' used.\n'
+ '\n'
+ '* Otherwise, the old-style, classic metaclass '
+ '(types.ClassType) is\n'
+ ' used.\n'
+ '\n'
+ 'The potential uses for metaclasses are boundless. Some ideas '
+ 'that have\n'
+ 'been explored including logging, interface checking, '
+ 'automatic\n'
+ 'delegation, automatic property creation, proxies, '
+ 'frameworks, and\n'
+ 'automatic resource locking/synchronization.\n'
+ '\n'
+ '\n'
+ 'Customizing instance and subclass checks\n'
+ '========================================\n'
+ '\n'
+ 'New in version 2.6.\n'
+ '\n'
+ 'The following methods are used to override the default '
+ 'behavior of the\n'
+ '"isinstance()" and "issubclass()" built-in functions.\n'
+ '\n'
+ 'In particular, the metaclass "abc.ABCMeta" implements these '
+ 'methods in\n'
+ 'order to allow the addition of Abstract Base Classes (ABCs) '
+ 'as\n'
+ '"virtual base classes" to any class or type (including '
+ 'built-in\n'
+ 'types), including other ABCs.\n'
+ '\n'
+ 'class.__instancecheck__(self, instance)\n'
+ '\n'
+ ' Return true if *instance* should be considered a (direct '
+ 'or\n'
+ ' indirect) instance of *class*. If defined, called to '
+ 'implement\n'
+ ' "isinstance(instance, class)".\n'
+ '\n'
+ 'class.__subclasscheck__(self, subclass)\n'
+ '\n'
+ ' Return true if *subclass* should be considered a (direct '
+ 'or\n'
+ ' indirect) subclass of *class*. If defined, called to '
+ 'implement\n'
+ ' "issubclass(subclass, class)".\n'
+ '\n'
+ 'Note that these methods are looked up on the type '
+ '(metaclass) of a\n'
+ 'class. They cannot be defined as class methods in the '
+ 'actual class.\n'
+ 'This is consistent with the lookup of special methods that '
+ 'are called\n'
+ 'on instances, only in this case the instance is itself a '
+ 'class.\n'
+ '\n'
+ 'See also:\n'
+ '\n'
+ ' **PEP 3119** - Introducing Abstract Base Classes\n'
+ ' Includes the specification for customizing '
+ '"isinstance()" and\n'
+ ' "issubclass()" behavior through "__instancecheck__()" '
+ 'and\n'
+ ' "__subclasscheck__()", with motivation for this '
+ 'functionality in\n'
+ ' the context of adding Abstract Base Classes (see the '
+ '"abc"\n'
+ ' module) to the language.\n'
+ '\n'
+ '\n'
+ 'Emulating callable objects\n'
+ '==========================\n'
+ '\n'
+ 'object.__call__(self[, args...])\n'
+ '\n'
+ ' Called when the instance is "called" as a function; if '
+ 'this method\n'
+ ' is defined, "x(arg1, arg2, ...)" is a shorthand for\n'
+ ' "x.__call__(arg1, arg2, ...)".\n'
+ '\n'
+ '\n'
+ 'Emulating container types\n'
+ '=========================\n'
+ '\n'
+ 'The following methods can be defined to implement container '
+ 'objects.\n'
+ 'Containers usually are sequences (such as lists or tuples) '
+ 'or mappings\n'
+ '(like dictionaries), but can represent other containers as '
+ 'well. The\n'
+ 'first set of methods is used either to emulate a sequence or '
+ 'to\n'
+ 'emulate a mapping; the difference is that for a sequence, '
+ 'the\n'
+ 'allowable keys should be the integers *k* for which "0 <= k '
+ '< N" where\n'
+ '*N* is the length of the sequence, or slice objects, which '
+ 'define a\n'
+ 'range of items. (For backwards compatibility, the method\n'
+ '"__getslice__()" (see below) can also be defined to handle '
+ 'simple, but\n'
+ 'not extended slices.) It is also recommended that mappings '
+ 'provide the\n'
+ 'methods "keys()", "values()", "items()", "has_key()", '
+ '"get()",\n'
+ '"clear()", "setdefault()", "iterkeys()", "itervalues()",\n'
+ '"iteritems()", "pop()", "popitem()", "copy()", and '
+ '"update()" behaving\n'
+ "similar to those for Python's standard dictionary objects. "
+ 'The\n'
+ '"UserDict" module provides a "DictMixin" class to help '
+ 'create those\n'
+ 'methods from a base set of "__getitem__()", '
+ '"__setitem__()",\n'
+ '"__delitem__()", and "keys()". Mutable sequences should '
+ 'provide\n'
+ 'methods "append()", "count()", "index()", "extend()", '
+ '"insert()",\n'
+ '"pop()", "remove()", "reverse()" and "sort()", like Python '
+ 'standard\n'
+ 'list objects. Finally, sequence types should implement '
+ 'addition\n'
+ '(meaning concatenation) and multiplication (meaning '
+ 'repetition) by\n'
+ 'defining the methods "__add__()", "__radd__()", '
+ '"__iadd__()",\n'
+ '"__mul__()", "__rmul__()" and "__imul__()" described below; '
+ 'they\n'
+ 'should not define "__coerce__()" or other numerical '
+ 'operators. It is\n'
+ 'recommended that both mappings and sequences implement the\n'
+ '"__contains__()" method to allow efficient use of the "in" '
+ 'operator;\n'
+ 'for mappings, "in" should be equivalent of "has_key()"; for '
+ 'sequences,\n'
+ 'it should search through the values. It is further '
+ 'recommended that\n'
+ 'both mappings and sequences implement the "__iter__()" '
+ 'method to allow\n'
+ 'efficient iteration through the container; for mappings, '
+ '"__iter__()"\n'
+ 'should be the same as "iterkeys()"; for sequences, it should '
+ 'iterate\n'
+ 'through the values.\n'
+ '\n'
+ 'object.__len__(self)\n'
+ '\n'
+ ' Called to implement the built-in function "len()". '
+ 'Should return\n'
+ ' the length of the object, an integer ">=" 0. Also, an '
+ 'object that\n'
+ ' doesn\'t define a "__nonzero__()" method and whose '
+ '"__len__()"\n'
+ ' method returns zero is considered to be false in a '
+ 'Boolean context.\n'
+ '\n'
+ ' **CPython implementation detail:** In CPython, the length '
+ 'is\n'
+ ' required to be at most "sys.maxsize". If the length is '
+ 'larger than\n'
+ ' "sys.maxsize" some features (such as "len()") may raise\n'
+ ' "OverflowError". To prevent raising "OverflowError" by '
+ 'truth value\n'
+ ' testing, an object must define a "__nonzero__()" method.\n'
+ '\n'
+ 'object.__getitem__(self, key)\n'
+ '\n'
+ ' Called to implement evaluation of "self[key]". For '
+ 'sequence types,\n'
+ ' the accepted keys should be integers and slice objects. '
+ 'Note that\n'
+ ' the special interpretation of negative indexes (if the '
+ 'class wishes\n'
+ ' to emulate a sequence type) is up to the "__getitem__()" '
+ 'method. If\n'
+ ' *key* is of an inappropriate type, "TypeError" may be '
+ 'raised; if of\n'
+ ' a value outside the set of indexes for the sequence '
+ '(after any\n'
+ ' special interpretation of negative values), "IndexError" '
+ 'should be\n'
+ ' raised. For mapping types, if *key* is missing (not in '
+ 'the\n'
+ ' container), "KeyError" should be raised.\n'
+ '\n'
+ ' Note: "for" loops expect that an "IndexError" will be '
+ 'raised for\n'
+ ' illegal indexes to allow proper detection of the end of '
+ 'the\n'
+ ' sequence.\n'
+ '\n'
+ 'object.__missing__(self, key)\n'
+ '\n'
+ ' Called by "dict"."__getitem__()" to implement "self[key]" '
+ 'for dict\n'
+ ' subclasses when key is not in the dictionary.\n'
+ '\n'
+ 'object.__setitem__(self, key, value)\n'
+ '\n'
+ ' Called to implement assignment to "self[key]". Same note '
+ 'as for\n'
+ ' "__getitem__()". This should only be implemented for '
+ 'mappings if\n'
+ ' the objects support changes to the values for keys, or if '
+ 'new keys\n'
+ ' can be added, or for sequences if elements can be '
+ 'replaced. The\n'
+ ' same exceptions should be raised for improper *key* '
+ 'values as for\n'
+ ' the "__getitem__()" method.\n'
+ '\n'
+ 'object.__delitem__(self, key)\n'
+ '\n'
+ ' Called to implement deletion of "self[key]". Same note '
+ 'as for\n'
+ ' "__getitem__()". This should only be implemented for '
+ 'mappings if\n'
+ ' the objects support removal of keys, or for sequences if '
+ 'elements\n'
+ ' can be removed from the sequence. The same exceptions '
+ 'should be\n'
+ ' raised for improper *key* values as for the '
+ '"__getitem__()" method.\n'
+ '\n'
+ 'object.__iter__(self)\n'
+ '\n'
+ ' This method is called when an iterator is required for a '
+ 'container.\n'
+ ' This method should return a new iterator object that can '
+ 'iterate\n'
+ ' over all the objects in the container. For mappings, it '
+ 'should\n'
+ ' iterate over the keys of the container, and should also '
+ 'be made\n'
+ ' available as the method "iterkeys()".\n'
+ '\n'
+ ' Iterator objects also need to implement this method; they '
+ 'are\n'
+ ' required to return themselves. For more information on '
+ 'iterator\n'
+ ' objects, see Iterator Types.\n'
+ '\n'
+ 'object.__reversed__(self)\n'
+ '\n'
+ ' Called (if present) by the "reversed()" built-in to '
+ 'implement\n'
+ ' reverse iteration. It should return a new iterator '
+ 'object that\n'
+ ' iterates over all the objects in the container in reverse '
+ 'order.\n'
+ '\n'
+ ' If the "__reversed__()" method is not provided, the '
+ '"reversed()"\n'
+ ' built-in will fall back to using the sequence protocol '
+ '("__len__()"\n'
+ ' and "__getitem__()"). Objects that support the sequence '
+ 'protocol\n'
+ ' should only provide "__reversed__()" if they can provide '
+ 'an\n'
+ ' implementation that is more efficient than the one '
+ 'provided by\n'
+ ' "reversed()".\n'
+ '\n'
+ ' New in version 2.6.\n'
+ '\n'
+ 'The membership test operators ("in" and "not in") are '
+ 'normally\n'
+ 'implemented as an iteration through a sequence. However, '
+ 'container\n'
+ 'objects can supply the following special method with a more '
+ 'efficient\n'
+ 'implementation, which also does not require the object be a '
+ 'sequence.\n'
+ '\n'
+ 'object.__contains__(self, item)\n'
+ '\n'
+ ' Called to implement membership test operators. Should '
+ 'return true\n'
+ ' if *item* is in *self*, false otherwise. For mapping '
+ 'objects, this\n'
+ ' should consider the keys of the mapping rather than the '
+ 'values or\n'
+ ' the key-item pairs.\n'
+ '\n'
+ ' For objects that don\'t define "__contains__()", the '
+ 'membership test\n'
+ ' first tries iteration via "__iter__()", then the old '
+ 'sequence\n'
+ ' iteration protocol via "__getitem__()", see this section '
+ 'in the\n'
+ ' language reference.\n'
+ '\n'
+ '\n'
+ 'Additional methods for emulation of sequence types\n'
+ '==================================================\n'
+ '\n'
+ 'The following optional methods can be defined to further '
+ 'emulate\n'
+ 'sequence objects. Immutable sequences methods should at '
+ 'most only\n'
+ 'define "__getslice__()"; mutable sequences might define all '
+ 'three\n'
+ 'methods.\n'
+ '\n'
+ 'object.__getslice__(self, i, j)\n'
+ '\n'
+ ' Deprecated since version 2.0: Support slice objects as '
+ 'parameters\n'
+ ' to the "__getitem__()" method. (However, built-in types '
+ 'in CPython\n'
+ ' currently still implement "__getslice__()". Therefore, '
+ 'you have to\n'
+ ' override it in derived classes when implementing '
+ 'slicing.)\n'
+ '\n'
+ ' Called to implement evaluation of "self[i:j]". The '
+ 'returned object\n'
+ ' should be of the same type as *self*. Note that missing '
+ '*i* or *j*\n'
+ ' in the slice expression are replaced by zero or '
+ '"sys.maxsize",\n'
+ ' respectively. If negative indexes are used in the slice, '
+ 'the\n'
+ ' length of the sequence is added to that index. If the '
+ 'instance does\n'
+ ' not implement the "__len__()" method, an "AttributeError" '
+ 'is\n'
+ ' raised. No guarantee is made that indexes adjusted this '
+ 'way are not\n'
+ ' still negative. Indexes which are greater than the '
+ 'length of the\n'
+ ' sequence are not modified. If no "__getslice__()" is '
+ 'found, a slice\n'
+ ' object is created instead, and passed to "__getitem__()" '
+ 'instead.\n'
+ '\n'
+ 'object.__setslice__(self, i, j, sequence)\n'
+ '\n'
+ ' Called to implement assignment to "self[i:j]". Same notes '
+ 'for *i*\n'
+ ' and *j* as for "__getslice__()".\n'
+ '\n'
+ ' This method is deprecated. If no "__setslice__()" is '
+ 'found, or for\n'
+ ' extended slicing of the form "self[i:j:k]", a slice '
+ 'object is\n'
+ ' created, and passed to "__setitem__()", instead of '
+ '"__setslice__()"\n'
+ ' being called.\n'
+ '\n'
+ 'object.__delslice__(self, i, j)\n'
+ '\n'
+ ' Called to implement deletion of "self[i:j]". Same notes '
+ 'for *i* and\n'
+ ' *j* as for "__getslice__()". This method is deprecated. '
+ 'If no\n'
+ ' "__delslice__()" is found, or for extended slicing of the '
+ 'form\n'
+ ' "self[i:j:k]", a slice object is created, and passed to\n'
+ ' "__delitem__()", instead of "__delslice__()" being '
+ 'called.\n'
+ '\n'
+ 'Notice that these methods are only invoked when a single '
+ 'slice with a\n'
+ 'single colon is used, and the slice method is available. '
+ 'For slice\n'
+ 'operations involving extended slice notation, or in absence '
+ 'of the\n'
+ 'slice methods, "__getitem__()", "__setitem__()" or '
+ '"__delitem__()" is\n'
+ 'called with a slice object as argument.\n'
+ '\n'
+ 'The following example demonstrate how to make your program '
+ 'or module\n'
+ 'compatible with earlier versions of Python (assuming that '
+ 'methods\n'
+ '"__getitem__()", "__setitem__()" and "__delitem__()" support '
+ 'slice\n'
+ 'objects as arguments):\n'
+ '\n'
+ ' class MyClass:\n'
+ ' ...\n'
+ ' def __getitem__(self, index):\n'
+ ' ...\n'
+ ' def __setitem__(self, index, value):\n'
+ ' ...\n'
+ ' def __delitem__(self, index):\n'
+ ' ...\n'
+ '\n'
+ ' if sys.version_info < (2, 0):\n'
+ " # They won't be defined if version is at least "
+ '2.0 final\n'
+ '\n'
+ ' def __getslice__(self, i, j):\n'
+ ' return self[max(0, i):max(0, j):]\n'
+ ' def __setslice__(self, i, j, seq):\n'
+ ' self[max(0, i):max(0, j):] = seq\n'
+ ' def __delslice__(self, i, j):\n'
+ ' del self[max(0, i):max(0, j):]\n'
+ ' ...\n'
+ '\n'
+ 'Note the calls to "max()"; these are necessary because of '
+ 'the handling\n'
+ 'of negative indices before the "__*slice__()" methods are '
+ 'called.\n'
+ 'When negative indexes are used, the "__*item__()" methods '
+ 'receive them\n'
+ 'as provided, but the "__*slice__()" methods get a "cooked" '
+ 'form of the\n'
+ 'index values. For each negative index value, the length of '
+ 'the\n'
+ 'sequence is added to the index before calling the method '
+ '(which may\n'
+ 'still result in a negative index); this is the customary '
+ 'handling of\n'
+ 'negative indexes by the built-in sequence types, and the '
+ '"__*item__()"\n'
+ 'methods are expected to do this as well. However, since '
+ 'they should\n'
+ 'already be doing that, negative indexes cannot be passed in; '
+ 'they must\n'
+ 'be constrained to the bounds of the sequence before being '
+ 'passed to\n'
+ 'the "__*item__()" methods. Calling "max(0, i)" conveniently '
+ 'returns\n'
+ 'the proper value.\n'
+ '\n'
+ '\n'
+ 'Emulating numeric types\n'
+ '=======================\n'
+ '\n'
+ 'The following methods can be defined to emulate numeric '
+ 'objects.\n'
+ 'Methods corresponding to operations that are not supported '
+ 'by the\n'
+ 'particular kind of number implemented (e.g., bitwise '
+ 'operations for\n'
+ 'non-integral numbers) should be left undefined.\n'
+ '\n'
+ 'object.__add__(self, other)\n'
+ 'object.__sub__(self, other)\n'
+ 'object.__mul__(self, other)\n'
+ 'object.__floordiv__(self, other)\n'
+ 'object.__mod__(self, other)\n'
+ 'object.__divmod__(self, other)\n'
+ 'object.__pow__(self, other[, modulo])\n'
+ 'object.__lshift__(self, other)\n'
+ 'object.__rshift__(self, other)\n'
+ 'object.__and__(self, other)\n'
+ 'object.__xor__(self, other)\n'
+ 'object.__or__(self, other)\n'
+ '\n'
+ ' These methods are called to implement the binary '
+ 'arithmetic\n'
+ ' operations ("+", "-", "*", "//", "%", "divmod()", '
+ '"pow()", "**",\n'
+ ' "<<", ">>", "&", "^", "|"). For instance, to evaluate '
+ 'the\n'
+ ' expression "x + y", where *x* is an instance of a class '
+ 'that has an\n'
+ ' "__add__()" method, "x.__add__(y)" is called. The '
+ '"__divmod__()"\n'
+ ' method should be the equivalent to using "__floordiv__()" '
+ 'and\n'
+ ' "__mod__()"; it should not be related to "__truediv__()" '
+ '(described\n'
+ ' below). Note that "__pow__()" should be defined to '
+ 'accept an\n'
+ ' optional third argument if the ternary version of the '
+ 'built-in\n'
+ ' "pow()" function is to be supported.\n'
+ '\n'
+ ' If one of those methods does not support the operation '
+ 'with the\n'
+ ' supplied arguments, it should return "NotImplemented".\n'
+ '\n'
+ 'object.__div__(self, other)\n'
+ 'object.__truediv__(self, other)\n'
+ '\n'
+ ' The division operator ("/") is implemented by these '
+ 'methods. The\n'
+ ' "__truediv__()" method is used when "__future__.division" '
+ 'is in\n'
+ ' effect, otherwise "__div__()" is used. If only one of '
+ 'these two\n'
+ ' methods is defined, the object will not support division '
+ 'in the\n'
+ ' alternate context; "TypeError" will be raised instead.\n'
+ '\n'
+ 'object.__radd__(self, other)\n'
+ 'object.__rsub__(self, other)\n'
+ 'object.__rmul__(self, other)\n'
+ 'object.__rdiv__(self, other)\n'
+ 'object.__rtruediv__(self, other)\n'
+ 'object.__rfloordiv__(self, other)\n'
+ 'object.__rmod__(self, other)\n'
+ 'object.__rdivmod__(self, other)\n'
+ 'object.__rpow__(self, other)\n'
+ 'object.__rlshift__(self, other)\n'
+ 'object.__rrshift__(self, other)\n'
+ 'object.__rand__(self, other)\n'
+ 'object.__rxor__(self, other)\n'
+ 'object.__ror__(self, other)\n'
+ '\n'
+ ' These methods are called to implement the binary '
+ 'arithmetic\n'
+ ' operations ("+", "-", "*", "/", "%", "divmod()", "pow()", '
+ '"**",\n'
+ ' "<<", ">>", "&", "^", "|") with reflected (swapped) '
+ 'operands.\n'
+ ' These functions are only called if the left operand does '
+ 'not\n'
+ ' support the corresponding operation and the operands are '
+ 'of\n'
+ ' different types. [2] For instance, to evaluate the '
+ 'expression "x -\n'
+ ' y", where *y* is an instance of a class that has an '
+ '"__rsub__()"\n'
+ ' method, "y.__rsub__(x)" is called if "x.__sub__(y)" '
+ 'returns\n'
+ ' *NotImplemented*.\n'
+ '\n'
+ ' Note that ternary "pow()" will not try calling '
+ '"__rpow__()" (the\n'
+ ' coercion rules would become too complicated).\n'
+ '\n'
+ " Note: If the right operand's type is a subclass of the "
+ 'left\n'
+ " operand's type and that subclass provides the reflected "
+ 'method\n'
+ ' for the operation, this method will be called before '
+ 'the left\n'
+ " operand's non-reflected method. This behavior allows "
+ 'subclasses\n'
+ " to override their ancestors' operations.\n"
+ '\n'
+ 'object.__iadd__(self, other)\n'
+ 'object.__isub__(self, other)\n'
+ 'object.__imul__(self, other)\n'
+ 'object.__idiv__(self, other)\n'
+ 'object.__itruediv__(self, other)\n'
+ 'object.__ifloordiv__(self, other)\n'
+ 'object.__imod__(self, other)\n'
+ 'object.__ipow__(self, other[, modulo])\n'
+ 'object.__ilshift__(self, other)\n'
+ 'object.__irshift__(self, other)\n'
+ 'object.__iand__(self, other)\n'
+ 'object.__ixor__(self, other)\n'
+ 'object.__ior__(self, other)\n'
+ '\n'
+ ' These methods are called to implement the augmented '
+ 'arithmetic\n'
+ ' assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", '
+ '"<<=",\n'
+ ' ">>=", "&=", "^=", "|="). These methods should attempt '
+ 'to do the\n'
+ ' operation in-place (modifying *self*) and return the '
+ 'result (which\n'
+ ' could be, but does not have to be, *self*). If a '
+ 'specific method\n'
+ ' is not defined, the augmented assignment falls back to '
+ 'the normal\n'
+ ' methods. For instance, to execute the statement "x += '
+ 'y", where\n'
+ ' *x* is an instance of a class that has an "__iadd__()" '
+ 'method,\n'
+ ' "x.__iadd__(y)" is called. If *x* is an instance of a '
+ 'class that\n'
+ ' does not define a "__iadd__()" method, "x.__add__(y)" '
+ 'and\n'
+ ' "y.__radd__(x)" are considered, as with the evaluation of '
+ '"x + y".\n'
+ '\n'
+ 'object.__neg__(self)\n'
+ 'object.__pos__(self)\n'
+ 'object.__abs__(self)\n'
+ 'object.__invert__(self)\n'
+ '\n'
+ ' Called to implement the unary arithmetic operations ("-", '
+ '"+",\n'
+ ' "abs()" and "~").\n'
+ '\n'
+ 'object.__complex__(self)\n'
+ 'object.__int__(self)\n'
+ 'object.__long__(self)\n'
+ 'object.__float__(self)\n'
+ '\n'
+ ' Called to implement the built-in functions "complex()", '
+ '"int()",\n'
+ ' "long()", and "float()". Should return a value of the '
+ 'appropriate\n'
+ ' type.\n'
+ '\n'
+ 'object.__oct__(self)\n'
+ 'object.__hex__(self)\n'
+ '\n'
+ ' Called to implement the built-in functions "oct()" and '
+ '"hex()".\n'
+ ' Should return a string value.\n'
+ '\n'
+ 'object.__index__(self)\n'
+ '\n'
+ ' Called to implement "operator.index()". Also called '
+ 'whenever\n'
+ ' Python needs an integer object (such as in slicing). '
+ 'Must return\n'
+ ' an integer (int or long).\n'
+ '\n'
+ ' New in version 2.5.\n'
+ '\n'
+ 'object.__coerce__(self, other)\n'
+ '\n'
+ ' Called to implement "mixed-mode" numeric arithmetic. '
+ 'Should either\n'
+ ' return a 2-tuple containing *self* and *other* converted '
+ 'to a\n'
+ ' common numeric type, or "None" if conversion is '
+ 'impossible. When\n'
+ ' the common type would be the type of "other", it is '
+ 'sufficient to\n'
+ ' return "None", since the interpreter will also ask the '
+ 'other object\n'
+ ' to attempt a coercion (but sometimes, if the '
+ 'implementation of the\n'
+ ' other type cannot be changed, it is useful to do the '
+ 'conversion to\n'
+ ' the other type here). A return value of "NotImplemented" '
+ 'is\n'
+ ' equivalent to returning "None".\n'
+ '\n'
+ '\n'
+ 'Coercion rules\n'
+ '==============\n'
+ '\n'
+ 'This section used to document the rules for coercion. As '
+ 'the language\n'
+ 'has evolved, the coercion rules have become hard to '
+ 'document\n'
+ 'precisely; documenting what one version of one particular\n'
+ 'implementation does is undesirable. Instead, here are some '
+ 'informal\n'
+ 'guidelines regarding coercion. In Python 3, coercion will '
+ 'not be\n'
+ 'supported.\n'
+ '\n'
+ '* If the left operand of a % operator is a string or Unicode '
+ 'object,\n'
+ ' no coercion takes place and the string formatting '
+ 'operation is\n'
+ ' invoked instead.\n'
+ '\n'
+ '* It is no longer recommended to define a coercion '
+ 'operation. Mixed-\n'
+ " mode operations on types that don't define coercion pass "
+ 'the\n'
+ ' original arguments to the operation.\n'
+ '\n'
+ '* New-style classes (those derived from "object") never '
+ 'invoke the\n'
+ ' "__coerce__()" method in response to a binary operator; '
+ 'the only\n'
+ ' time "__coerce__()" is invoked is when the built-in '
+ 'function\n'
+ ' "coerce()" is called.\n'
+ '\n'
+ '* For most intents and purposes, an operator that returns\n'
+ ' "NotImplemented" is treated the same as one that is not '
+ 'implemented\n'
+ ' at all.\n'
+ '\n'
+ '* Below, "__op__()" and "__rop__()" are used to signify the '
+ 'generic\n'
+ ' method names corresponding to an operator; "__iop__()" is '
+ 'used for\n'
+ ' the corresponding in-place operator. For example, for the '
+ 'operator\n'
+ ' \'"+"\', "__add__()" and "__radd__()" are used for the '
+ 'left and right\n'
+ ' variant of the binary operator, and "__iadd__()" for the '
+ 'in-place\n'
+ ' variant.\n'
+ '\n'
+ '* For objects *x* and *y*, first "x.__op__(y)" is tried. If '
+ 'this is\n'
+ ' not implemented or returns "NotImplemented", '
+ '"y.__rop__(x)" is\n'
+ ' tried. If this is also not implemented or returns '
+ '"NotImplemented",\n'
+ ' a "TypeError" exception is raised. But see the following '
+ 'exception:\n'
+ '\n'
+ '* Exception to the previous item: if the left operand is an '
+ 'instance\n'
+ ' of a built-in type or a new-style class, and the right '
+ 'operand is an\n'
+ ' instance of a proper subclass of that type or class and '
+ 'overrides\n'
+ ' the base\'s "__rop__()" method, the right operand\'s '
+ '"__rop__()"\n'
+ ' method is tried *before* the left operand\'s "__op__()" '
+ 'method.\n'
+ '\n'
+ ' This is done so that a subclass can completely override '
+ 'binary\n'
+ ' operators. Otherwise, the left operand\'s "__op__()" '
+ 'method would\n'
+ ' always accept the right operand: when an instance of a '
+ 'given class\n'
+ ' is expected, an instance of a subclass of that class is '
+ 'always\n'
+ ' acceptable.\n'
+ '\n'
+ '* When either operand type defines a coercion, this coercion '
+ 'is\n'
+ ' called before that type\'s "__op__()" or "__rop__()" '
+ 'method is\n'
+ ' called, but no sooner. If the coercion returns an object '
+ 'of a\n'
+ ' different type for the operand whose coercion is invoked, '
+ 'part of\n'
+ ' the process is redone using the new object.\n'
+ '\n'
+ '* When an in-place operator (like \'"+="\') is used, if the '
+ 'left\n'
+ ' operand implements "__iop__()", it is invoked without any '
+ 'coercion.\n'
+ ' When the operation falls back to "__op__()" and/or '
+ '"__rop__()", the\n'
+ ' normal coercion rules apply.\n'
+ '\n'
+ '* In "x + y", if *x* is a sequence that implements sequence\n'
+ ' concatenation, sequence concatenation is invoked.\n'
+ '\n'
+ '* In "x * y", if one operand is a sequence that implements '
+ 'sequence\n'
+ ' repetition, and the other is an integer ("int" or "long"), '
+ 'sequence\n'
+ ' repetition is invoked.\n'
+ '\n'
+ '* Rich comparisons (implemented by methods "__eq__()" and so '
+ 'on)\n'
+ ' never use coercion. Three-way comparison (implemented by\n'
+ ' "__cmp__()") does use coercion under the same conditions '
+ 'as other\n'
+ ' binary operations use it.\n'
+ '\n'
+ '* In the current implementation, the built-in numeric types '
+ '"int",\n'
+ ' "long", "float", and "complex" do not use coercion. All '
+ 'these types\n'
+ ' implement a "__coerce__()" method, for use by the '
+ 'built-in\n'
+ ' "coerce()" function.\n'
+ '\n'
+ ' Changed in version 2.7: The complex type no longer makes '
+ 'implicit\n'
+ ' calls to the "__coerce__()" method for mixed-type binary '
+ 'arithmetic\n'
+ ' operations.\n'
+ '\n'
+ '\n'
+ 'With Statement Context Managers\n'
+ '===============================\n'
+ '\n'
+ 'New in version 2.5.\n'
+ '\n'
+ 'A *context manager* is an object that defines the runtime '
+ 'context to\n'
+ 'be established when executing a "with" statement. The '
+ 'context manager\n'
+ 'handles the entry into, and the exit from, the desired '
+ 'runtime context\n'
+ 'for the execution of the block of code. Context managers '
+ 'are normally\n'
+ 'invoked using the "with" statement (described in section The '
+ 'with\n'
+ 'statement), but can also be used by directly invoking their '
+ 'methods.\n'
+ '\n'
+ 'Typical uses of context managers include saving and '
+ 'restoring various\n'
+ 'kinds of global state, locking and unlocking resources, '
+ 'closing opened\n'
+ 'files, etc.\n'
+ '\n'
+ 'For more information on context managers, see Context '
+ 'Manager Types.\n'
+ '\n'
+ 'object.__enter__(self)\n'
+ '\n'
+ ' Enter the runtime context related to this object. The '
+ '"with"\n'
+ " statement will bind this method's return value to the "
+ 'target(s)\n'
+ ' specified in the "as" clause of the statement, if any.\n'
+ '\n'
+ 'object.__exit__(self, exc_type, exc_value, traceback)\n'
+ '\n'
+ ' Exit the runtime context related to this object. The '
+ 'parameters\n'
+ ' describe the exception that caused the context to be '
+ 'exited. If the\n'
+ ' context was exited without an exception, all three '
+ 'arguments will\n'
+ ' be "None".\n'
+ '\n'
+ ' If an exception is supplied, and the method wishes to '
+ 'suppress the\n'
+ ' exception (i.e., prevent it from being propagated), it '
+ 'should\n'
+ ' return a true value. Otherwise, the exception will be '
+ 'processed\n'
+ ' normally upon exit from this method.\n'
+ '\n'
+ ' Note that "__exit__()" methods should not reraise the '
+ 'passed-in\n'
+ " exception; this is the caller's responsibility.\n"
+ '\n'
+ 'See also:\n'
+ '\n'
+ ' **PEP 343** - The "with" statement\n'
+ ' The specification, background, and examples for the '
+ 'Python "with"\n'
+ ' statement.\n'
+ '\n'
+ '\n'
+ 'Special method lookup for old-style classes\n'
+ '===========================================\n'
+ '\n'
+ 'For old-style classes, special methods are always looked up '
+ 'in exactly\n'
+ 'the same way as any other method or attribute. This is the '
+ 'case\n'
+ 'regardless of whether the method is being looked up '
+ 'explicitly as in\n'
+ '"x.__getitem__(i)" or implicitly as in "x[i]".\n'
+ '\n'
+ 'This behaviour means that special methods may exhibit '
+ 'different\n'
+ 'behaviour for different instances of a single old-style '
+ 'class if the\n'
+ 'appropriate special attributes are set differently:\n'
+ '\n'
+ ' >>> class C:\n'
+ ' ... pass\n'
+ ' ...\n'
+ ' >>> c1 = C()\n'
+ ' >>> c2 = C()\n'
+ ' >>> c1.__len__ = lambda: 5\n'
+ ' >>> c2.__len__ = lambda: 9\n'
+ ' >>> len(c1)\n'
+ ' 5\n'
+ ' >>> len(c2)\n'
+ ' 9\n'
+ '\n'
+ '\n'
+ 'Special method lookup for new-style classes\n'
+ '===========================================\n'
+ '\n'
+ 'For new-style classes, implicit invocations of special '
+ 'methods are\n'
+ "only guaranteed to work correctly if defined on an object's "
+ 'type, not\n'
+ "in the object's instance dictionary. That behaviour is the "
+ 'reason why\n'
+ 'the following code raises an exception (unlike the '
+ 'equivalent example\n'
+ 'with old-style classes):\n'
+ '\n'
+ ' >>> class C(object):\n'
+ ' ... pass\n'
+ ' ...\n'
+ ' >>> c = C()\n'
+ ' >>> c.__len__ = lambda: 5\n'
+ ' >>> len(c)\n'
+ ' Traceback (most recent call last):\n'
+ ' File "<stdin>", line 1, in <module>\n'
+ " TypeError: object of type 'C' has no len()\n"
+ '\n'
+ 'The rationale behind this behaviour lies with a number of '
+ 'special\n'
+ 'methods such as "__hash__()" and "__repr__()" that are '
+ 'implemented by\n'
+ 'all objects, including type objects. If the implicit lookup '
+ 'of these\n'
+ 'methods used the conventional lookup process, they would '
+ 'fail when\n'
+ 'invoked on the type object itself:\n'
+ '\n'
+ ' >>> 1 .__hash__() == hash(1)\n'
+ ' True\n'
+ ' >>> int.__hash__() == hash(int)\n'
+ ' Traceback (most recent call last):\n'
+ ' File "<stdin>", line 1, in <module>\n'
+ " TypeError: descriptor '__hash__' of 'int' object needs an "
+ 'argument\n'
+ '\n'
+ 'Incorrectly attempting to invoke an unbound method of a '
+ 'class in this\n'
+ "way is sometimes referred to as 'metaclass confusion', and "
+ 'is avoided\n'
+ 'by bypassing the instance when looking up special methods:\n'
+ '\n'
+ ' >>> type(1).__hash__(1) == hash(1)\n'
+ ' True\n'
+ ' >>> type(int).__hash__(int) == hash(int)\n'
+ ' True\n'
+ '\n'
+ 'In addition to bypassing any instance attributes in the '
+ 'interest of\n'
+ 'correctness, implicit special method lookup generally also '
+ 'bypasses\n'
+ 'the "__getattribute__()" method even of the object\'s '
+ 'metaclass:\n'
+ '\n'
+ ' >>> class Meta(type):\n'
+ ' ... def __getattribute__(*args):\n'
+ ' ... print "Metaclass getattribute invoked"\n'
+ ' ... return type.__getattribute__(*args)\n'
+ ' ...\n'
+ ' >>> class C(object):\n'
+ ' ... __metaclass__ = Meta\n'
+ ' ... def __len__(self):\n'
+ ' ... return 10\n'
+ ' ... def __getattribute__(*args):\n'
+ ' ... print "Class getattribute invoked"\n'
+ ' ... return object.__getattribute__(*args)\n'
+ ' ...\n'
+ ' >>> c = C()\n'
+ ' >>> c.__len__() # Explicit lookup via '
+ 'instance\n'
+ ' Class getattribute invoked\n'
+ ' 10\n'
+ ' >>> type(c).__len__(c) # Explicit lookup via '
+ 'type\n'
+ ' Metaclass getattribute invoked\n'
+ ' 10\n'
+ ' >>> len(c) # Implicit lookup\n'
+ ' 10\n'
+ '\n'
+ 'Bypassing the "__getattribute__()" machinery in this fashion '
+ 'provides\n'
+ 'significant scope for speed optimisations within the '
+ 'interpreter, at\n'
+ 'the cost of some flexibility in the handling of special '
+ 'methods (the\n'
+ 'special method *must* be set on the class object itself in '
+ 'order to be\n'
+ 'consistently invoked by the interpreter).\n'
+ '\n'
+ '-[ Footnotes ]-\n'
+ '\n'
+ "[1] It *is* possible in some cases to change an object's "
+ 'type,\n'
+ " under certain controlled conditions. It generally isn't "
+ 'a good\n'
+ ' idea though, since it can lead to some very strange '
+ 'behaviour if\n'
+ ' it is handled incorrectly.\n'
+ '\n'
+ '[2] For operands of the same type, it is assumed that if the '
+ 'non-\n'
+ ' reflected method (such as "__add__()") fails the '
+ 'operation is not\n'
+ ' supported, which is why the reflected method is not '
+ 'called.\n',
+ 'string-methods': '\n'
+ 'String Methods\n'
+ '**************\n'
+ '\n'
+ 'Below are listed the string methods which both 8-bit '
+ 'strings and\n'
+ 'Unicode objects support. Some of them are also available '
+ 'on\n'
+ '"bytearray" objects.\n'
+ '\n'
+ "In addition, Python's strings support the sequence type "
+ 'methods\n'
+ 'described in the Sequence Types --- str, unicode, list, '
+ 'tuple,\n'
+ 'bytearray, buffer, xrange section. To output formatted '
+ 'strings use\n'
+ 'template strings or the "%" operator described in the '
+ 'String\n'
+ 'Formatting Operations section. Also, see the "re" module '
+ 'for string\n'
+ 'functions based on regular expressions.\n'
+ '\n'
+ 'str.capitalize()\n'
+ '\n'
+ ' Return a copy of the string with its first character '
+ 'capitalized\n'
+ ' and the rest lowercased.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.center(width[, fillchar])\n'
+ '\n'
+ ' Return centered in a string of length *width*. Padding '
+ 'is done\n'
+ ' using the specified *fillchar* (default is a space).\n'
+ '\n'
+ ' Changed in version 2.4: Support for the *fillchar* '
+ 'argument.\n'
+ '\n'
+ 'str.count(sub[, start[, end]])\n'
+ '\n'
+ ' Return the number of non-overlapping occurrences of '
+ 'substring *sub*\n'
+ ' in the range [*start*, *end*]. Optional arguments '
+ '*start* and\n'
+ ' *end* are interpreted as in slice notation.\n'
+ '\n'
+ 'str.decode([encoding[, errors]])\n'
+ '\n'
+ ' Decodes the string using the codec registered for '
+ '*encoding*.\n'
+ ' *encoding* defaults to the default string encoding. '
+ '*errors* may\n'
+ ' be given to set a different error handling scheme. The '
+ 'default is\n'
+ ' "\'strict\'", meaning that encoding errors raise '
+ '"UnicodeError".\n'
+ ' Other possible values are "\'ignore\'", "\'replace\'" '
+ 'and any other\n'
+ ' name registered via "codecs.register_error()", see '
+ 'section Codec\n'
+ ' Base Classes.\n'
+ '\n'
+ ' New in version 2.2.\n'
+ '\n'
+ ' Changed in version 2.3: Support for other error '
+ 'handling schemes\n'
+ ' added.\n'
+ '\n'
+ ' Changed in version 2.7: Support for keyword arguments '
+ 'added.\n'
+ '\n'
+ 'str.encode([encoding[, errors]])\n'
+ '\n'
+ ' Return an encoded version of the string. Default '
+ 'encoding is the\n'
+ ' current default string encoding. *errors* may be given '
+ 'to set a\n'
+ ' different error handling scheme. The default for '
+ '*errors* is\n'
+ ' "\'strict\'", meaning that encoding errors raise a '
+ '"UnicodeError".\n'
+ ' Other possible values are "\'ignore\'", "\'replace\'",\n'
+ ' "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any '
+ 'other name\n'
+ ' registered via "codecs.register_error()", see section '
+ 'Codec Base\n'
+ ' Classes. For a list of possible encodings, see section '
+ 'Standard\n'
+ ' Encodings.\n'
+ '\n'
+ ' New in version 2.0.\n'
+ '\n'
+ ' Changed in version 2.3: Support for '
+ '"\'xmlcharrefreplace\'" and\n'
+ ' "\'backslashreplace\'" and other error handling schemes '
+ 'added.\n'
+ '\n'
+ ' Changed in version 2.7: Support for keyword arguments '
+ 'added.\n'
+ '\n'
+ 'str.endswith(suffix[, start[, end]])\n'
+ '\n'
+ ' Return "True" if the string ends with the specified '
+ '*suffix*,\n'
+ ' otherwise return "False". *suffix* can also be a tuple '
+ 'of suffixes\n'
+ ' to look for. With optional *start*, test beginning at '
+ 'that\n'
+ ' position. With optional *end*, stop comparing at that '
+ 'position.\n'
+ '\n'
+ ' Changed in version 2.5: Accept tuples as *suffix*.\n'
+ '\n'
+ 'str.expandtabs([tabsize])\n'
+ '\n'
+ ' Return a copy of the string where all tab characters '
+ 'are replaced\n'
+ ' by one or more spaces, depending on the current column '
+ 'and the\n'
+ ' given tab size. Tab positions occur every *tabsize* '
+ 'characters\n'
+ ' (default is 8, giving tab positions at columns 0, 8, 16 '
+ 'and so on).\n'
+ ' To expand the string, the current column is set to zero '
+ 'and the\n'
+ ' string is examined character by character. If the '
+ 'character is a\n'
+ ' tab ("\\t"), one or more space characters are inserted '
+ 'in the result\n'
+ ' until the current column is equal to the next tab '
+ 'position. (The\n'
+ ' tab character itself is not copied.) If the character '
+ 'is a newline\n'
+ ' ("\\n") or return ("\\r"), it is copied and the current '
+ 'column is\n'
+ ' reset to zero. Any other character is copied unchanged '
+ 'and the\n'
+ ' current column is incremented by one regardless of how '
+ 'the\n'
+ ' character is represented when printed.\n'
+ '\n'
+ " >>> '01\\t012\\t0123\\t01234'.expandtabs()\n"
+ " '01 012 0123 01234'\n"
+ " >>> '01\\t012\\t0123\\t01234'.expandtabs(4)\n"
+ " '01 012 0123 01234'\n"
+ '\n'
+ 'str.find(sub[, start[, end]])\n'
+ '\n'
+ ' Return the lowest index in the string where substring '
+ '*sub* is\n'
+ ' found within the slice "s[start:end]". Optional '
+ 'arguments *start*\n'
+ ' and *end* are interpreted as in slice notation. Return '
+ '"-1" if\n'
+ ' *sub* is not found.\n'
+ '\n'
+ ' Note: The "find()" method should be used only if you '
+ 'need to know\n'
+ ' the position of *sub*. To check if *sub* is a '
+ 'substring or not,\n'
+ ' use the "in" operator:\n'
+ '\n'
+ " >>> 'Py' in 'Python'\n"
+ ' True\n'
+ '\n'
+ 'str.format(*args, **kwargs)\n'
+ '\n'
+ ' Perform a string formatting operation. The string on '
+ 'which this\n'
+ ' method is called can contain literal text or '
+ 'replacement fields\n'
+ ' delimited by braces "{}". Each replacement field '
+ 'contains either\n'
+ ' the numeric index of a positional argument, or the name '
+ 'of a\n'
+ ' keyword argument. Returns a copy of the string where '
+ 'each\n'
+ ' replacement field is replaced with the string value of '
+ 'the\n'
+ ' corresponding argument.\n'
+ '\n'
+ ' >>> "The sum of 1 + 2 is {0}".format(1+2)\n'
+ " 'The sum of 1 + 2 is 3'\n"
+ '\n'
+ ' See Format String Syntax for a description of the '
+ 'various\n'
+ ' formatting options that can be specified in format '
+ 'strings.\n'
+ '\n'
+ ' This method of string formatting is the new standard in '
+ 'Python 3,\n'
+ ' and should be preferred to the "%" formatting described '
+ 'in String\n'
+ ' Formatting Operations in new code.\n'
+ '\n'
+ ' New in version 2.6.\n'
+ '\n'
+ 'str.index(sub[, start[, end]])\n'
+ '\n'
+ ' Like "find()", but raise "ValueError" when the '
+ 'substring is not\n'
+ ' found.\n'
+ '\n'
+ 'str.isalnum()\n'
+ '\n'
+ ' Return true if all characters in the string are '
+ 'alphanumeric and\n'
+ ' there is at least one character, false otherwise.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.isalpha()\n'
+ '\n'
+ ' Return true if all characters in the string are '
+ 'alphabetic and\n'
+ ' there is at least one character, false otherwise.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.isdigit()\n'
+ '\n'
+ ' Return true if all characters in the string are digits '
+ 'and there is\n'
+ ' at least one character, false otherwise.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.islower()\n'
+ '\n'
+ ' Return true if all cased characters [4] in the string '
+ 'are lowercase\n'
+ ' and there is at least one cased character, false '
+ 'otherwise.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.isspace()\n'
+ '\n'
+ ' Return true if there are only whitespace characters in '
+ 'the string\n'
+ ' and there is at least one character, false otherwise.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.istitle()\n'
+ '\n'
+ ' Return true if the string is a titlecased string and '
+ 'there is at\n'
+ ' least one character, for example uppercase characters '
+ 'may only\n'
+ ' follow uncased characters and lowercase characters only '
+ 'cased ones.\n'
+ ' Return false otherwise.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.isupper()\n'
+ '\n'
+ ' Return true if all cased characters [4] in the string '
+ 'are uppercase\n'
+ ' and there is at least one cased character, false '
+ 'otherwise.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.join(iterable)\n'
+ '\n'
+ ' Return a string which is the concatenation of the '
+ 'strings in\n'
+ ' *iterable*. A "TypeError" will be raised if there are '
+ 'any non-\n'
+ ' string values in *iterable*, including "bytes" '
+ 'objects. The\n'
+ ' separator between elements is the string providing this '
+ 'method.\n'
+ '\n'
+ 'str.ljust(width[, fillchar])\n'
+ '\n'
+ ' Return the string left justified in a string of length '
+ '*width*.\n'
+ ' Padding is done using the specified *fillchar* (default '
+ 'is a\n'
+ ' space). The original string is returned if *width* is '
+ 'less than or\n'
+ ' equal to "len(s)".\n'
+ '\n'
+ ' Changed in version 2.4: Support for the *fillchar* '
+ 'argument.\n'
+ '\n'
+ 'str.lower()\n'
+ '\n'
+ ' Return a copy of the string with all the cased '
+ 'characters [4]\n'
+ ' converted to lowercase.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.lstrip([chars])\n'
+ '\n'
+ ' Return a copy of the string with leading characters '
+ 'removed. The\n'
+ ' *chars* argument is a string specifying the set of '
+ 'characters to be\n'
+ ' removed. If omitted or "None", the *chars* argument '
+ 'defaults to\n'
+ ' removing whitespace. The *chars* argument is not a '
+ 'prefix; rather,\n'
+ ' all combinations of its values are stripped:\n'
+ '\n'
+ " >>> ' spacious '.lstrip()\n"
+ " 'spacious '\n"
+ " >>> 'www.example.com'.lstrip('cmowz.')\n"
+ " 'example.com'\n"
+ '\n'
+ ' Changed in version 2.2.2: Support for the *chars* '
+ 'argument.\n'
+ '\n'
+ 'str.partition(sep)\n'
+ '\n'
+ ' Split the string at the first occurrence of *sep*, and '
+ 'return a\n'
+ ' 3-tuple containing the part before the separator, the '
+ 'separator\n'
+ ' itself, and the part after the separator. If the '
+ 'separator is not\n'
+ ' found, return a 3-tuple containing the string itself, '
+ 'followed by\n'
+ ' two empty strings.\n'
+ '\n'
+ ' New in version 2.5.\n'
+ '\n'
+ 'str.replace(old, new[, count])\n'
+ '\n'
+ ' Return a copy of the string with all occurrences of '
+ 'substring *old*\n'
+ ' replaced by *new*. If the optional argument *count* is '
+ 'given, only\n'
+ ' the first *count* occurrences are replaced.\n'
+ '\n'
+ 'str.rfind(sub[, start[, end]])\n'
+ '\n'
+ ' Return the highest index in the string where substring '
+ '*sub* is\n'
+ ' found, such that *sub* is contained within '
+ '"s[start:end]".\n'
+ ' Optional arguments *start* and *end* are interpreted as '
+ 'in slice\n'
+ ' notation. Return "-1" on failure.\n'
+ '\n'
+ 'str.rindex(sub[, start[, end]])\n'
+ '\n'
+ ' Like "rfind()" but raises "ValueError" when the '
+ 'substring *sub* is\n'
+ ' not found.\n'
+ '\n'
+ 'str.rjust(width[, fillchar])\n'
+ '\n'
+ ' Return the string right justified in a string of length '
+ '*width*.\n'
+ ' Padding is done using the specified *fillchar* (default '
+ 'is a\n'
+ ' space). The original string is returned if *width* is '
+ 'less than or\n'
+ ' equal to "len(s)".\n'
+ '\n'
+ ' Changed in version 2.4: Support for the *fillchar* '
+ 'argument.\n'
+ '\n'
+ 'str.rpartition(sep)\n'
+ '\n'
+ ' Split the string at the last occurrence of *sep*, and '
+ 'return a\n'
+ ' 3-tuple containing the part before the separator, the '
+ 'separator\n'
+ ' itself, and the part after the separator. If the '
+ 'separator is not\n'
+ ' found, return a 3-tuple containing two empty strings, '
+ 'followed by\n'
+ ' the string itself.\n'
+ '\n'
+ ' New in version 2.5.\n'
+ '\n'
+ 'str.rsplit([sep[, maxsplit]])\n'
+ '\n'
+ ' Return a list of the words in the string, using *sep* '
+ 'as the\n'
+ ' delimiter string. If *maxsplit* is given, at most '
+ '*maxsplit* splits\n'
+ ' are done, the *rightmost* ones. If *sep* is not '
+ 'specified or\n'
+ ' "None", any whitespace string is a separator. Except '
+ 'for splitting\n'
+ ' from the right, "rsplit()" behaves like "split()" which '
+ 'is\n'
+ ' described in detail below.\n'
+ '\n'
+ ' New in version 2.4.\n'
+ '\n'
+ 'str.rstrip([chars])\n'
+ '\n'
+ ' Return a copy of the string with trailing characters '
+ 'removed. The\n'
+ ' *chars* argument is a string specifying the set of '
+ 'characters to be\n'
+ ' removed. If omitted or "None", the *chars* argument '
+ 'defaults to\n'
+ ' removing whitespace. The *chars* argument is not a '
+ 'suffix; rather,\n'
+ ' all combinations of its values are stripped:\n'
+ '\n'
+ " >>> ' spacious '.rstrip()\n"
+ " ' spacious'\n"
+ " >>> 'mississippi'.rstrip('ipz')\n"
+ " 'mississ'\n"
+ '\n'
+ ' Changed in version 2.2.2: Support for the *chars* '
+ 'argument.\n'
+ '\n'
+ 'str.split([sep[, maxsplit]])\n'
+ '\n'
+ ' Return a list of the words in the string, using *sep* '
+ 'as the\n'
+ ' delimiter string. If *maxsplit* is given, at most '
+ '*maxsplit*\n'
+ ' splits are done (thus, the list will have at most '
+ '"maxsplit+1"\n'
+ ' elements). If *maxsplit* is not specified or "-1", '
+ 'then there is\n'
+ ' no limit on the number of splits (all possible splits '
+ 'are made).\n'
+ '\n'
+ ' If *sep* is given, consecutive delimiters are not '
+ 'grouped together\n'
+ ' and are deemed to delimit empty strings (for example,\n'
+ ' "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', '
+ '\'2\']"). The *sep* argument\n'
+ ' may consist of multiple characters (for example,\n'
+ ' "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', '
+ '\'3\']"). Splitting an\n'
+ ' empty string with a specified separator returns '
+ '"[\'\']".\n'
+ '\n'
+ ' If *sep* is not specified or is "None", a different '
+ 'splitting\n'
+ ' algorithm is applied: runs of consecutive whitespace '
+ 'are regarded\n'
+ ' as a single separator, and the result will contain no '
+ 'empty strings\n'
+ ' at the start or end if the string has leading or '
+ 'trailing\n'
+ ' whitespace. Consequently, splitting an empty string or '
+ 'a string\n'
+ ' consisting of just whitespace with a "None" separator '
+ 'returns "[]".\n'
+ '\n'
+ ' For example, "\' 1 2 3 \'.split()" returns "[\'1\', '
+ '\'2\', \'3\']", and\n'
+ ' "\' 1 2 3 \'.split(None, 1)" returns "[\'1\', '
+ '\'2 3 \']".\n'
+ '\n'
+ 'str.splitlines([keepends])\n'
+ '\n'
+ ' Return a list of the lines in the string, breaking at '
+ 'line\n'
+ ' boundaries. This method uses the *universal newlines* '
+ 'approach to\n'
+ ' splitting lines. Line breaks are not included in the '
+ 'resulting list\n'
+ ' unless *keepends* is given and true.\n'
+ '\n'
+ ' Python recognizes ""\\r"", ""\\n"", and ""\\r\\n"" as '
+ 'line boundaries\n'
+ ' for 8-bit strings.\n'
+ '\n'
+ ' For example:\n'
+ '\n'
+ " >>> 'ab c\\n\\nde fg\\rkl\\r\\n'.splitlines()\n"
+ " ['ab c', '', 'de fg', 'kl']\n"
+ " >>> 'ab c\\n\\nde fg\\rkl\\r\\n'.splitlines(True)\n"
+ " ['ab c\\n', '\\n', 'de fg\\r', 'kl\\r\\n']\n"
+ '\n'
+ ' Unlike "split()" when a delimiter string *sep* is '
+ 'given, this\n'
+ ' method returns an empty list for the empty string, and '
+ 'a terminal\n'
+ ' line break does not result in an extra line:\n'
+ '\n'
+ ' >>> "".splitlines()\n'
+ ' []\n'
+ ' >>> "One line\\n".splitlines()\n'
+ " ['One line']\n"
+ '\n'
+ ' For comparison, "split(\'\\n\')" gives:\n'
+ '\n'
+ " >>> ''.split('\\n')\n"
+ " ['']\n"
+ " >>> 'Two lines\\n'.split('\\n')\n"
+ " ['Two lines', '']\n"
+ '\n'
+ 'unicode.splitlines([keepends])\n'
+ '\n'
+ ' Return a list of the lines in the string, like '
+ '"str.splitlines()".\n'
+ ' However, the Unicode method splits on the following '
+ 'line\n'
+ ' boundaries, which are a superset of the *universal '
+ 'newlines*\n'
+ ' recognized for 8-bit strings.\n'
+ '\n'
+ ' '
+ '+-------------------------+-------------------------------+\n'
+ ' | Representation | '
+ 'Description |\n'
+ ' '
+ '+=========================+===============================+\n'
+ ' | "\\n" | Line '
+ 'Feed |\n'
+ ' '
+ '+-------------------------+-------------------------------+\n'
+ ' | "\\r" | Carriage '
+ 'Return |\n'
+ ' '
+ '+-------------------------+-------------------------------+\n'
+ ' | "\\r\\n" | Carriage Return + Line '
+ 'Feed |\n'
+ ' '
+ '+-------------------------+-------------------------------+\n'
+ ' | "\\v" or "\\x0b" | Line '
+ 'Tabulation |\n'
+ ' '
+ '+-------------------------+-------------------------------+\n'
+ ' | "\\f" or "\\x0c" | Form '
+ 'Feed |\n'
+ ' '
+ '+-------------------------+-------------------------------+\n'
+ ' | "\\x1c" | File '
+ 'Separator |\n'
+ ' '
+ '+-------------------------+-------------------------------+\n'
+ ' | "\\x1d" | Group '
+ 'Separator |\n'
+ ' '
+ '+-------------------------+-------------------------------+\n'
+ ' | "\\x1e" | Record '
+ 'Separator |\n'
+ ' '
+ '+-------------------------+-------------------------------+\n'
+ ' | "\\x85" | Next Line (C1 Control '
+ 'Code) |\n'
+ ' '
+ '+-------------------------+-------------------------------+\n'
+ ' | "\\u2028" | Line '
+ 'Separator |\n'
+ ' '
+ '+-------------------------+-------------------------------+\n'
+ ' | "\\u2029" | Paragraph '
+ 'Separator |\n'
+ ' '
+ '+-------------------------+-------------------------------+\n'
+ '\n'
+ ' Changed in version 2.7: "\\v" and "\\f" added to list '
+ 'of line\n'
+ ' boundaries.\n'
+ '\n'
+ 'str.startswith(prefix[, start[, end]])\n'
+ '\n'
+ ' Return "True" if string starts with the *prefix*, '
+ 'otherwise return\n'
+ ' "False". *prefix* can also be a tuple of prefixes to '
+ 'look for.\n'
+ ' With optional *start*, test string beginning at that '
+ 'position.\n'
+ ' With optional *end*, stop comparing string at that '
+ 'position.\n'
+ '\n'
+ ' Changed in version 2.5: Accept tuples as *prefix*.\n'
+ '\n'
+ 'str.strip([chars])\n'
+ '\n'
+ ' Return a copy of the string with the leading and '
+ 'trailing\n'
+ ' characters removed. The *chars* argument is a string '
+ 'specifying the\n'
+ ' set of characters to be removed. If omitted or "None", '
+ 'the *chars*\n'
+ ' argument defaults to removing whitespace. The *chars* '
+ 'argument is\n'
+ ' not a prefix or suffix; rather, all combinations of its '
+ 'values are\n'
+ ' stripped:\n'
+ '\n'
+ " >>> ' spacious '.strip()\n"
+ " 'spacious'\n"
+ " >>> 'www.example.com'.strip('cmowz.')\n"
+ " 'example'\n"
+ '\n'
+ ' Changed in version 2.2.2: Support for the *chars* '
+ 'argument.\n'
+ '\n'
+ 'str.swapcase()\n'
+ '\n'
+ ' Return a copy of the string with uppercase characters '
+ 'converted to\n'
+ ' lowercase and vice versa.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.title()\n'
+ '\n'
+ ' Return a titlecased version of the string where words '
+ 'start with an\n'
+ ' uppercase character and the remaining characters are '
+ 'lowercase.\n'
+ '\n'
+ ' The algorithm uses a simple language-independent '
+ 'definition of a\n'
+ ' word as groups of consecutive letters. The definition '
+ 'works in\n'
+ ' many contexts but it means that apostrophes in '
+ 'contractions and\n'
+ ' possessives form word boundaries, which may not be the '
+ 'desired\n'
+ ' result:\n'
+ '\n'
+ ' >>> "they\'re bill\'s friends from the UK".title()\n'
+ ' "They\'Re Bill\'S Friends From The Uk"\n'
+ '\n'
+ ' A workaround for apostrophes can be constructed using '
+ 'regular\n'
+ ' expressions:\n'
+ '\n'
+ ' >>> import re\n'
+ ' >>> def titlecase(s):\n'
+ ' ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n'
+ ' ... lambda mo: '
+ 'mo.group(0)[0].upper() +\n'
+ ' ... '
+ 'mo.group(0)[1:].lower(),\n'
+ ' ... s)\n'
+ ' ...\n'
+ ' >>> titlecase("they\'re bill\'s friends.")\n'
+ ' "They\'re Bill\'s Friends."\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.translate(table[, deletechars])\n'
+ '\n'
+ ' Return a copy of the string where all characters '
+ 'occurring in the\n'
+ ' optional argument *deletechars* are removed, and the '
+ 'remaining\n'
+ ' characters have been mapped through the given '
+ 'translation table,\n'
+ ' which must be a string of length 256.\n'
+ '\n'
+ ' You can use the "maketrans()" helper function in the '
+ '"string"\n'
+ ' module to create a translation table. For string '
+ 'objects, set the\n'
+ ' *table* argument to "None" for translations that only '
+ 'delete\n'
+ ' characters:\n'
+ '\n'
+ " >>> 'read this short text'.translate(None, 'aeiou')\n"
+ " 'rd ths shrt txt'\n"
+ '\n'
+ ' New in version 2.6: Support for a "None" *table* '
+ 'argument.\n'
+ '\n'
+ ' For Unicode objects, the "translate()" method does not '
+ 'accept the\n'
+ ' optional *deletechars* argument. Instead, it returns a '
+ 'copy of the\n'
+ ' *s* where all characters have been mapped through the '
+ 'given\n'
+ ' translation table which must be a mapping of Unicode '
+ 'ordinals to\n'
+ ' Unicode ordinals, Unicode strings or "None". Unmapped '
+ 'characters\n'
+ ' are left untouched. Characters mapped to "None" are '
+ 'deleted. Note,\n'
+ ' a more flexible approach is to create a custom '
+ 'character mapping\n'
+ ' codec using the "codecs" module (see "encodings.cp1251" '
+ 'for an\n'
+ ' example).\n'
+ '\n'
+ 'str.upper()\n'
+ '\n'
+ ' Return a copy of the string with all the cased '
+ 'characters [4]\n'
+ ' converted to uppercase. Note that '
+ '"str.upper().isupper()" might be\n'
+ ' "False" if "s" contains uncased characters or if the '
+ 'Unicode\n'
+ ' category of the resulting character(s) is not "Lu" '
+ '(Letter,\n'
+ ' uppercase), but e.g. "Lt" (Letter, titlecase).\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.zfill(width)\n'
+ '\n'
+ ' Return the numeric string left filled with zeros in a '
+ 'string of\n'
+ ' length *width*. A sign prefix is handled correctly. '
+ 'The original\n'
+ ' string is returned if *width* is less than or equal to '
+ '"len(s)".\n'
+ '\n'
+ ' New in version 2.2.2.\n'
+ '\n'
+ 'The following methods are present only on unicode '
+ 'objects:\n'
+ '\n'
+ 'unicode.isnumeric()\n'
+ '\n'
+ ' Return "True" if there are only numeric characters in '
+ 'S, "False"\n'
+ ' otherwise. Numeric characters include digit characters, '
+ 'and all\n'
+ ' characters that have the Unicode numeric value '
+ 'property, e.g.\n'
+ ' U+2155, VULGAR FRACTION ONE FIFTH.\n'
+ '\n'
+ 'unicode.isdecimal()\n'
+ '\n'
+ ' Return "True" if there are only decimal characters in '
+ 'S, "False"\n'
+ ' otherwise. Decimal characters include digit characters, '
+ 'and all\n'
+ ' characters that can be used to form decimal-radix '
+ 'numbers, e.g.\n'
+ ' U+0660, ARABIC-INDIC DIGIT ZERO.\n',
+ 'strings': '\n'
+ 'String literals\n'
+ '***************\n'
+ '\n'
+ 'String literals are described by the following lexical '
+ 'definitions:\n'
+ '\n'
+ ' stringliteral ::= [stringprefix](shortstring | longstring)\n'
+ ' stringprefix ::= "r" | "u" | "ur" | "R" | "U" | "UR" | "Ur" '
+ '| "uR"\n'
+ ' | "b" | "B" | "br" | "Br" | "bR" | "BR"\n'
+ ' shortstring ::= "\'" shortstringitem* "\'" | \'"\' '
+ 'shortstringitem* \'"\'\n'
+ ' longstring ::= "\'\'\'" longstringitem* "\'\'\'"\n'
+ ' | \'"""\' longstringitem* \'"""\'\n'
+ ' shortstringitem ::= shortstringchar | escapeseq\n'
+ ' longstringitem ::= longstringchar | escapeseq\n'
+ ' shortstringchar ::= <any source character except "\\" or '
+ 'newline or the quote>\n'
+ ' longstringchar ::= <any source character except "\\">\n'
+ ' escapeseq ::= "\\" <any ASCII character>\n'
+ '\n'
+ 'One syntactic restriction not indicated by these productions is '
+ 'that\n'
+ 'whitespace is not allowed between the "stringprefix" and the rest '
+ 'of\n'
+ 'the string literal. The source character set is defined by the\n'
+ 'encoding declaration; it is ASCII if no encoding declaration is '
+ 'given\n'
+ 'in the source file; see section Encoding declarations.\n'
+ '\n'
+ 'In plain English: String literals can be enclosed in matching '
+ 'single\n'
+ 'quotes ("\'") or double quotes ("""). They can also be enclosed '
+ 'in\n'
+ 'matching groups of three single or double quotes (these are '
+ 'generally\n'
+ 'referred to as *triple-quoted strings*). The backslash ("\\")\n'
+ 'character is used to escape characters that otherwise have a '
+ 'special\n'
+ 'meaning, such as newline, backslash itself, or the quote '
+ 'character.\n'
+ 'String literals may optionally be prefixed with a letter "\'r\'" '
+ 'or\n'
+ '"\'R\'"; such strings are called *raw strings* and use different '
+ 'rules\n'
+ 'for interpreting backslash escape sequences. A prefix of "\'u\'" '
+ 'or\n'
+ '"\'U\'" makes the string a Unicode string. Unicode strings use '
+ 'the\n'
+ 'Unicode character set as defined by the Unicode Consortium and '
+ 'ISO\n'
+ '10646. Some additional escape sequences, described below, are\n'
+ 'available in Unicode strings. A prefix of "\'b\'" or "\'B\'" is '
+ 'ignored in\n'
+ 'Python 2; it indicates that the literal should become a bytes '
+ 'literal\n'
+ 'in Python 3 (e.g. when code is automatically converted with '
+ '2to3). A\n'
+ '"\'u\'" or "\'b\'" prefix may be followed by an "\'r\'" prefix.\n'
+ '\n'
+ 'In triple-quoted strings, unescaped newlines and quotes are '
+ 'allowed\n'
+ '(and are retained), except that three unescaped quotes in a row\n'
+ 'terminate the string. (A "quote" is the character used to open '
+ 'the\n'
+ 'string, i.e. either "\'" or """.)\n'
+ '\n'
+ 'Unless an "\'r\'" or "\'R\'" prefix is present, escape sequences '
+ 'in\n'
+ 'strings are interpreted according to rules similar to those used '
+ 'by\n'
+ 'Standard C. The recognized escape sequences are:\n'
+ '\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| Escape Sequence | Meaning | Notes '
+ '|\n'
+ '+===================+===================================+=========+\n'
+ '| "\\newline" | Ignored '
+ '| |\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\\\" | Backslash ("\\") '
+ '| |\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\\'" | Single quote ("\'") '
+ '| |\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\"" | Double quote (""") '
+ '| |\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\a" | ASCII Bell (BEL) '
+ '| |\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\b" | ASCII Backspace (BS) '
+ '| |\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\f" | ASCII Formfeed (FF) '
+ '| |\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\n" | ASCII Linefeed (LF) '
+ '| |\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\N{name}" | Character named *name* in the '
+ '| |\n'
+ '| | Unicode database (Unicode only) | '
+ '|\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\r" | ASCII Carriage Return (CR) '
+ '| |\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\t" | ASCII Horizontal Tab (TAB) '
+ '| |\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\uxxxx" | Character with 16-bit hex value | '
+ '(1) |\n'
+ '| | *xxxx* (Unicode only) | '
+ '|\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\Uxxxxxxxx" | Character with 32-bit hex value | '
+ '(2) |\n'
+ '| | *xxxxxxxx* (Unicode only) | '
+ '|\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\v" | ASCII Vertical Tab (VT) '
+ '| |\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\ooo" | Character with octal value *ooo* | '
+ '(3,5) |\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '| "\\xhh" | Character with hex value *hh* | '
+ '(4,5) |\n'
+ '+-------------------+-----------------------------------+---------+\n'
+ '\n'
+ 'Notes:\n'
+ '\n'
+ '1. Individual code units which form parts of a surrogate pair '
+ 'can\n'
+ ' be encoded using this escape sequence.\n'
+ '\n'
+ '2. Any Unicode character can be encoded this way, but characters\n'
+ ' outside the Basic Multilingual Plane (BMP) will be encoded '
+ 'using a\n'
+ ' surrogate pair if Python is compiled to use 16-bit code units '
+ '(the\n'
+ ' default).\n'
+ '\n'
+ '3. As in Standard C, up to three octal digits are accepted.\n'
+ '\n'
+ '4. Unlike in Standard C, exactly two hex digits are required.\n'
+ '\n'
+ '5. In a string literal, hexadecimal and octal escapes denote the\n'
+ ' byte with the given value; it is not necessary that the byte\n'
+ ' encodes a character in the source character set. In a Unicode\n'
+ ' literal, these escapes denote a Unicode character with the '
+ 'given\n'
+ ' value.\n'
+ '\n'
+ 'Unlike Standard C, all unrecognized escape sequences are left in '
+ 'the\n'
+ 'string unchanged, i.e., *the backslash is left in the string*. '
+ '(This\n'
+ 'behavior is useful when debugging: if an escape sequence is '
+ 'mistyped,\n'
+ 'the resulting output is more easily recognized as broken.) It is '
+ 'also\n'
+ 'important to note that the escape sequences marked as "(Unicode '
+ 'only)"\n'
+ 'in the table above fall into the category of unrecognized escapes '
+ 'for\n'
+ 'non-Unicode string literals.\n'
+ '\n'
+ 'When an "\'r\'" or "\'R\'" prefix is present, a character '
+ 'following a\n'
+ 'backslash is included in the string without change, and *all\n'
+ 'backslashes are left in the string*. For example, the string '
+ 'literal\n'
+ '"r"\\n"" consists of two characters: a backslash and a lowercase '
+ '"\'n\'".\n'
+ 'String quotes can be escaped with a backslash, but the backslash\n'
+ 'remains in the string; for example, "r"\\""" is a valid string '
+ 'literal\n'
+ 'consisting of two characters: a backslash and a double quote; '
+ '"r"\\""\n'
+ 'is not a valid string literal (even a raw string cannot end in an '
+ 'odd\n'
+ 'number of backslashes). Specifically, *a raw string cannot end '
+ 'in a\n'
+ 'single backslash* (since the backslash would escape the '
+ 'following\n'
+ 'quote character). Note also that a single backslash followed by '
+ 'a\n'
+ 'newline is interpreted as those two characters as part of the '
+ 'string,\n'
+ '*not* as a line continuation.\n'
+ '\n'
+ 'When an "\'r\'" or "\'R\'" prefix is used in conjunction with a '
+ '"\'u\'" or\n'
+ '"\'U\'" prefix, then the "\\uXXXX" and "\\UXXXXXXXX" escape '
+ 'sequences are\n'
+ 'processed while *all other backslashes are left in the string*. '
+ 'For\n'
+ 'example, the string literal "ur"\\u0062\\n"" consists of three '
+ 'Unicode\n'
+ "characters: 'LATIN SMALL LETTER B', 'REVERSE SOLIDUS', and "
+ "'LATIN\n"
+ "SMALL LETTER N'. Backslashes can be escaped with a preceding\n"
+ 'backslash; however, both remain in the string. As a result, '
+ '"\\uXXXX"\n'
+ 'escape sequences are only recognized when there are an odd number '
+ 'of\n'
+ 'backslashes.\n',
+ 'subscriptions': '\n'
+ 'Subscriptions\n'
+ '*************\n'
+ '\n'
+ 'A subscription selects an item of a sequence (string, tuple '
+ 'or list)\n'
+ 'or mapping (dictionary) object:\n'
+ '\n'
+ ' subscription ::= primary "[" expression_list "]"\n'
+ '\n'
+ 'The primary must evaluate to an object of a sequence or '
+ 'mapping type.\n'
+ '\n'
+ 'If the primary is a mapping, the expression list must '
+ 'evaluate to an\n'
+ 'object whose value is one of the keys of the mapping, and '
+ 'the\n'
+ 'subscription selects the value in the mapping that '
+ 'corresponds to that\n'
+ 'key. (The expression list is a tuple except if it has '
+ 'exactly one\n'
+ 'item.)\n'
+ '\n'
+ 'If the primary is a sequence, the expression (list) must '
+ 'evaluate to a\n'
+ 'plain integer. If this value is negative, the length of '
+ 'the sequence\n'
+ 'is added to it (so that, e.g., "x[-1]" selects the last '
+ 'item of "x".)\n'
+ 'The resulting value must be a nonnegative integer less than '
+ 'the number\n'
+ 'of items in the sequence, and the subscription selects the '
+ 'item whose\n'
+ 'index is that value (counting from zero).\n'
+ '\n'
+ "A string's items are characters. A character is not a "
+ 'separate data\n'
+ 'type but a string of exactly one character.\n',
+ 'truth': '\n'
+ 'Truth Value Testing\n'
+ '*******************\n'
+ '\n'
+ 'Any object can be tested for truth value, for use in an "if" or\n'
+ '"while" condition or as operand of the Boolean operations below. '
+ 'The\n'
+ 'following values are considered false:\n'
+ '\n'
+ '* "None"\n'
+ '\n'
+ '* "False"\n'
+ '\n'
+ '* zero of any numeric type, for example, "0", "0L", "0.0", "0j".\n'
+ '\n'
+ '* any empty sequence, for example, "\'\'", "()", "[]".\n'
+ '\n'
+ '* any empty mapping, for example, "{}".\n'
+ '\n'
+ '* instances of user-defined classes, if the class defines a\n'
+ ' "__nonzero__()" or "__len__()" method, when that method returns '
+ 'the\n'
+ ' integer zero or "bool" value "False". [1]\n'
+ '\n'
+ 'All other values are considered true --- so objects of many types '
+ 'are\n'
+ 'always true.\n'
+ '\n'
+ 'Operations and built-in functions that have a Boolean result '
+ 'always\n'
+ 'return "0" or "False" for false and "1" or "True" for true, unless\n'
+ 'otherwise stated. (Important exception: the Boolean operations '
+ '"or"\n'
+ 'and "and" always return one of their operands.)\n',
+ 'try': '\n'
+ 'The "try" statement\n'
+ '*******************\n'
+ '\n'
+ 'The "try" statement specifies exception handlers and/or cleanup code\n'
+ 'for a group of statements:\n'
+ '\n'
+ ' try_stmt ::= try1_stmt | try2_stmt\n'
+ ' try1_stmt ::= "try" ":" suite\n'
+ ' ("except" [expression [("as" | ",") identifier]] ":" '
+ 'suite)+\n'
+ ' ["else" ":" suite]\n'
+ ' ["finally" ":" suite]\n'
+ ' try2_stmt ::= "try" ":" suite\n'
+ ' "finally" ":" suite\n'
+ '\n'
+ 'Changed in version 2.5: In previous versions of Python,\n'
+ '"try"..."except"..."finally" did not work. "try"..."except" had to '
+ 'be\n'
+ 'nested in "try"..."finally".\n'
+ '\n'
+ 'The "except" clause(s) specify one or more exception handlers. When '
+ 'no\n'
+ 'exception occurs in the "try" clause, no exception handler is\n'
+ 'executed. When an exception occurs in the "try" suite, a search for '
+ 'an\n'
+ 'exception handler is started. This search inspects the except '
+ 'clauses\n'
+ 'in turn until one is found that matches the exception. An '
+ 'expression-\n'
+ 'less except clause, if present, must be last; it matches any\n'
+ 'exception. For an except clause with an expression, that expression\n'
+ 'is evaluated, and the clause matches the exception if the resulting\n'
+ 'object is "compatible" with the exception. An object is compatible\n'
+ 'with an exception if it is the class or a base class of the '
+ 'exception\n'
+ 'object, or a tuple containing an item compatible with the exception.\n'
+ '\n'
+ 'If no except clause matches the exception, the search for an '
+ 'exception\n'
+ 'handler continues in the surrounding code and on the invocation '
+ 'stack.\n'
+ '[1]\n'
+ '\n'
+ 'If the evaluation of an expression in the header of an except clause\n'
+ 'raises an exception, the original search for a handler is canceled '
+ 'and\n'
+ 'a search starts for the new exception in the surrounding code and on\n'
+ 'the call stack (it is treated as if the entire "try" statement '
+ 'raised\n'
+ 'the exception).\n'
+ '\n'
+ 'When a matching except clause is found, the exception is assigned to\n'
+ 'the target specified in that except clause, if present, and the '
+ 'except\n'
+ "clause's suite is executed. All except clauses must have an\n"
+ 'executable block. When the end of this block is reached, execution\n'
+ 'continues normally after the entire try statement. (This means that\n'
+ 'if two nested handlers exist for the same exception, and the '
+ 'exception\n'
+ 'occurs in the try clause of the inner handler, the outer handler '
+ 'will\n'
+ 'not handle the exception.)\n'
+ '\n'
+ "Before an except clause's suite is executed, details about the\n"
+ 'exception are assigned to three variables in the "sys" module:\n'
+ '"sys.exc_type" receives the object identifying the exception;\n'
+ '"sys.exc_value" receives the exception\'s parameter;\n'
+ '"sys.exc_traceback" receives a traceback object (see section The\n'
+ 'standard type hierarchy) identifying the point in the program where\n'
+ 'the exception occurred. These details are also available through the\n'
+ '"sys.exc_info()" function, which returns a tuple "(exc_type,\n'
+ 'exc_value, exc_traceback)". Use of the corresponding variables is\n'
+ 'deprecated in favor of this function, since their use is unsafe in a\n'
+ 'threaded program. As of Python 1.5, the variables are restored to\n'
+ 'their previous values (before the call) when returning from a '
+ 'function\n'
+ 'that handled an exception.\n'
+ '\n'
+ 'The optional "else" clause is executed if and when control flows off\n'
+ 'the end of the "try" clause. [2] Exceptions in the "else" clause are\n'
+ 'not handled by the preceding "except" clauses.\n'
+ '\n'
+ 'If "finally" is present, it specifies a \'cleanup\' handler. The '
+ '"try"\n'
+ 'clause is executed, including any "except" and "else" clauses. If '
+ 'an\n'
+ 'exception occurs in any of the clauses and is not handled, the\n'
+ 'exception is temporarily saved. The "finally" clause is executed. '
+ 'If\n'
+ 'there is a saved exception, it is re-raised at the end of the\n'
+ '"finally" clause. If the "finally" clause raises another exception '
+ 'or\n'
+ 'executes a "return" or "break" statement, the saved exception is\n'
+ 'discarded:\n'
+ '\n'
+ ' >>> def f():\n'
+ ' ... try:\n'
+ ' ... 1/0\n'
+ ' ... finally:\n'
+ ' ... return 42\n'
+ ' ...\n'
+ ' >>> f()\n'
+ ' 42\n'
+ '\n'
+ 'The exception information is not available to the program during\n'
+ 'execution of the "finally" clause.\n'
+ '\n'
+ 'When a "return", "break" or "continue" statement is executed in the\n'
+ '"try" suite of a "try"..."finally" statement, the "finally" clause '
+ 'is\n'
+ 'also executed \'on the way out.\' A "continue" statement is illegal '
+ 'in\n'
+ 'the "finally" clause. (The reason is a problem with the current\n'
+ 'implementation --- this restriction may be lifted in the future).\n'
+ '\n'
+ 'The return value of a function is determined by the last "return"\n'
+ 'statement executed. Since the "finally" clause always executes, a\n'
+ '"return" statement executed in the "finally" clause will always be '
+ 'the\n'
+ 'last one executed:\n'
+ '\n'
+ ' >>> def foo():\n'
+ ' ... try:\n'
+ " ... return 'try'\n"
+ ' ... finally:\n'
+ " ... return 'finally'\n"
+ ' ...\n'
+ ' >>> foo()\n'
+ " 'finally'\n"
+ '\n'
+ 'Additional information on exceptions can be found in section\n'
+ 'Exceptions, and information on using the "raise" statement to '
+ 'generate\n'
+ 'exceptions may be found in section The raise statement.\n',
+ 'types': '\n'
+ 'The standard type hierarchy\n'
+ '***************************\n'
+ '\n'
+ 'Below is a list of the types that are built into Python. '
+ 'Extension\n'
+ 'modules (written in C, Java, or other languages, depending on the\n'
+ 'implementation) can define additional types. Future versions of\n'
+ 'Python may add types to the type hierarchy (e.g., rational '
+ 'numbers,\n'
+ 'efficiently stored arrays of integers, etc.).\n'
+ '\n'
+ 'Some of the type descriptions below contain a paragraph listing\n'
+ "'special attributes.' These are attributes that provide access to "
+ 'the\n'
+ 'implementation and are not intended for general use. Their '
+ 'definition\n'
+ 'may change in the future.\n'
+ '\n'
+ 'None\n'
+ ' This type has a single value. There is a single object with '
+ 'this\n'
+ ' value. This object is accessed through the built-in name "None". '
+ 'It\n'
+ ' is used to signify the absence of a value in many situations, '
+ 'e.g.,\n'
+ " it is returned from functions that don't explicitly return\n"
+ ' anything. Its truth value is false.\n'
+ '\n'
+ 'NotImplemented\n'
+ ' This type has a single value. There is a single object with '
+ 'this\n'
+ ' value. This object is accessed through the built-in name\n'
+ ' "NotImplemented". Numeric methods and rich comparison methods '
+ 'may\n'
+ ' return this value if they do not implement the operation for '
+ 'the\n'
+ ' operands provided. (The interpreter will then try the '
+ 'reflected\n'
+ ' operation, or some other fallback, depending on the operator.) '
+ 'Its\n'
+ ' truth value is true.\n'
+ '\n'
+ 'Ellipsis\n'
+ ' This type has a single value. There is a single object with '
+ 'this\n'
+ ' value. This object is accessed through the built-in name\n'
+ ' "Ellipsis". It is used to indicate the presence of the "..." '
+ 'syntax\n'
+ ' in a slice. Its truth value is true.\n'
+ '\n'
+ '"numbers.Number"\n'
+ ' These are created by numeric literals and returned as results '
+ 'by\n'
+ ' arithmetic operators and arithmetic built-in functions. '
+ 'Numeric\n'
+ ' objects are immutable; once created their value never changes.\n'
+ ' Python numbers are of course strongly related to mathematical\n'
+ ' numbers, but subject to the limitations of numerical '
+ 'representation\n'
+ ' in computers.\n'
+ '\n'
+ ' Python distinguishes between integers, floating point numbers, '
+ 'and\n'
+ ' complex numbers:\n'
+ '\n'
+ ' "numbers.Integral"\n'
+ ' These represent elements from the mathematical set of '
+ 'integers\n'
+ ' (positive and negative).\n'
+ '\n'
+ ' There are three types of integers:\n'
+ '\n'
+ ' Plain integers\n'
+ ' These represent numbers in the range -2147483648 through\n'
+ ' 2147483647. (The range may be larger on machines with a\n'
+ ' larger natural word size, but not smaller.) When the '
+ 'result\n'
+ ' of an operation would fall outside this range, the result '
+ 'is\n'
+ ' normally returned as a long integer (in some cases, the\n'
+ ' exception "OverflowError" is raised instead). For the\n'
+ ' purpose of shift and mask operations, integers are assumed '
+ 'to\n'
+ " have a binary, 2's complement notation using 32 or more "
+ 'bits,\n'
+ ' and hiding no bits from the user (i.e., all 4294967296\n'
+ ' different bit patterns correspond to different values).\n'
+ '\n'
+ ' Long integers\n'
+ ' These represent numbers in an unlimited range, subject to\n'
+ ' available (virtual) memory only. For the purpose of '
+ 'shift\n'
+ ' and mask operations, a binary representation is assumed, '
+ 'and\n'
+ " negative numbers are represented in a variant of 2's\n"
+ ' complement which gives the illusion of an infinite string '
+ 'of\n'
+ ' sign bits extending to the left.\n'
+ '\n'
+ ' Booleans\n'
+ ' These represent the truth values False and True. The two\n'
+ ' objects representing the values "False" and "True" are '
+ 'the\n'
+ ' only Boolean objects. The Boolean type is a subtype of '
+ 'plain\n'
+ ' integers, and Boolean values behave like the values 0 and '
+ '1,\n'
+ ' respectively, in almost all contexts, the exception being\n'
+ ' that when converted to a string, the strings ""False"" or\n'
+ ' ""True"" are returned, respectively.\n'
+ '\n'
+ ' The rules for integer representation are intended to give '
+ 'the\n'
+ ' most meaningful interpretation of shift and mask operations\n'
+ ' involving negative integers and the least surprises when\n'
+ ' switching between the plain and long integer domains. Any\n'
+ ' operation, if it yields a result in the plain integer '
+ 'domain,\n'
+ ' will yield the same result in the long integer domain or '
+ 'when\n'
+ ' using mixed operands. The switch between domains is '
+ 'transparent\n'
+ ' to the programmer.\n'
+ '\n'
+ ' "numbers.Real" ("float")\n'
+ ' These represent machine-level double precision floating '
+ 'point\n'
+ ' numbers. You are at the mercy of the underlying machine\n'
+ ' architecture (and C or Java implementation) for the accepted\n'
+ ' range and handling of overflow. Python does not support '
+ 'single-\n'
+ ' precision floating point numbers; the savings in processor '
+ 'and\n'
+ ' memory usage that are usually the reason for using these are\n'
+ ' dwarfed by the overhead of using objects in Python, so there '
+ 'is\n'
+ ' no reason to complicate the language with two kinds of '
+ 'floating\n'
+ ' point numbers.\n'
+ '\n'
+ ' "numbers.Complex"\n'
+ ' These represent complex numbers as a pair of machine-level\n'
+ ' double precision floating point numbers. The same caveats '
+ 'apply\n'
+ ' as for floating point numbers. The real and imaginary parts '
+ 'of a\n'
+ ' complex number "z" can be retrieved through the read-only\n'
+ ' attributes "z.real" and "z.imag".\n'
+ '\n'
+ 'Sequences\n'
+ ' These represent finite ordered sets indexed by non-negative\n'
+ ' numbers. The built-in function "len()" returns the number of '
+ 'items\n'
+ ' of a sequence. When the length of a sequence is *n*, the index '
+ 'set\n'
+ ' contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence *a* '
+ 'is\n'
+ ' selected by "a[i]".\n'
+ '\n'
+ ' Sequences also support slicing: "a[i:j]" selects all items with\n'
+ ' index *k* such that *i* "<=" *k* "<" *j*. When used as an\n'
+ ' expression, a slice is a sequence of the same type. This '
+ 'implies\n'
+ ' that the index set is renumbered so that it starts at 0.\n'
+ '\n'
+ ' Some sequences also support "extended slicing" with a third '
+ '"step"\n'
+ ' parameter: "a[i:j:k]" selects all items of *a* with index *x* '
+ 'where\n'
+ ' "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n'
+ '\n'
+ ' Sequences are distinguished according to their mutability:\n'
+ '\n'
+ ' Immutable sequences\n'
+ ' An object of an immutable sequence type cannot change once it '
+ 'is\n'
+ ' created. (If the object contains references to other '
+ 'objects,\n'
+ ' these other objects may be mutable and may be changed; '
+ 'however,\n'
+ ' the collection of objects directly referenced by an '
+ 'immutable\n'
+ ' object cannot change.)\n'
+ '\n'
+ ' The following types are immutable sequences:\n'
+ '\n'
+ ' Strings\n'
+ ' The items of a string are characters. There is no '
+ 'separate\n'
+ ' character type; a character is represented by a string of '
+ 'one\n'
+ ' item. Characters represent (at least) 8-bit bytes. The\n'
+ ' built-in functions "chr()" and "ord()" convert between\n'
+ ' characters and nonnegative integers representing the byte\n'
+ ' values. Bytes with the values 0--127 usually represent '
+ 'the\n'
+ ' corresponding ASCII values, but the interpretation of '
+ 'values\n'
+ ' is up to the program. The string data type is also used '
+ 'to\n'
+ ' represent arrays of bytes, e.g., to hold data read from a\n'
+ ' file.\n'
+ '\n'
+ ' (On systems whose native character set is not ASCII, '
+ 'strings\n'
+ ' may use EBCDIC in their internal representation, provided '
+ 'the\n'
+ ' functions "chr()" and "ord()" implement a mapping between\n'
+ ' ASCII and EBCDIC, and string comparison preserves the '
+ 'ASCII\n'
+ ' order. Or perhaps someone can propose a better rule?)\n'
+ '\n'
+ ' Unicode\n'
+ ' The items of a Unicode object are Unicode code units. A\n'
+ ' Unicode code unit is represented by a Unicode object of '
+ 'one\n'
+ ' item and can hold either a 16-bit or 32-bit value\n'
+ ' representing a Unicode ordinal (the maximum value for the\n'
+ ' ordinal is given in "sys.maxunicode", and depends on how\n'
+ ' Python is configured at compile time). Surrogate pairs '
+ 'may\n'
+ ' be present in the Unicode object, and will be reported as '
+ 'two\n'
+ ' separate items. The built-in functions "unichr()" and\n'
+ ' "ord()" convert between code units and nonnegative '
+ 'integers\n'
+ ' representing the Unicode ordinals as defined in the '
+ 'Unicode\n'
+ ' Standard 3.0. Conversion from and to other encodings are\n'
+ ' possible through the Unicode method "encode()" and the '
+ 'built-\n'
+ ' in function "unicode()".\n'
+ '\n'
+ ' Tuples\n'
+ ' The items of a tuple are arbitrary Python objects. Tuples '
+ 'of\n'
+ ' two or more items are formed by comma-separated lists of\n'
+ " expressions. A tuple of one item (a 'singleton') can be\n"
+ ' formed by affixing a comma to an expression (an expression '
+ 'by\n'
+ ' itself does not create a tuple, since parentheses must be\n'
+ ' usable for grouping of expressions). An empty tuple can '
+ 'be\n'
+ ' formed by an empty pair of parentheses.\n'
+ '\n'
+ ' Mutable sequences\n'
+ ' Mutable sequences can be changed after they are created. '
+ 'The\n'
+ ' subscription and slicing notations can be used as the target '
+ 'of\n'
+ ' assignment and "del" (delete) statements.\n'
+ '\n'
+ ' There are currently two intrinsic mutable sequence types:\n'
+ '\n'
+ ' Lists\n'
+ ' The items of a list are arbitrary Python objects. Lists '
+ 'are\n'
+ ' formed by placing a comma-separated list of expressions '
+ 'in\n'
+ ' square brackets. (Note that there are no special cases '
+ 'needed\n'
+ ' to form lists of length 0 or 1.)\n'
+ '\n'
+ ' Byte Arrays\n'
+ ' A bytearray object is a mutable array. They are created '
+ 'by\n'
+ ' the built-in "bytearray()" constructor. Aside from being\n'
+ ' mutable (and hence unhashable), byte arrays otherwise '
+ 'provide\n'
+ ' the same interface and functionality as immutable bytes\n'
+ ' objects.\n'
+ '\n'
+ ' The extension module "array" provides an additional example '
+ 'of a\n'
+ ' mutable sequence type.\n'
+ '\n'
+ 'Set types\n'
+ ' These represent unordered, finite sets of unique, immutable\n'
+ ' objects. As such, they cannot be indexed by any subscript. '
+ 'However,\n'
+ ' they can be iterated over, and the built-in function "len()"\n'
+ ' returns the number of items in a set. Common uses for sets are '
+ 'fast\n'
+ ' membership testing, removing duplicates from a sequence, and\n'
+ ' computing mathematical operations such as intersection, union,\n'
+ ' difference, and symmetric difference.\n'
+ '\n'
+ ' For set elements, the same immutability rules apply as for\n'
+ ' dictionary keys. Note that numeric types obey the normal rules '
+ 'for\n'
+ ' numeric comparison: if two numbers compare equal (e.g., "1" and\n'
+ ' "1.0"), only one of them can be contained in a set.\n'
+ '\n'
+ ' There are currently two intrinsic set types:\n'
+ '\n'
+ ' Sets\n'
+ ' These represent a mutable set. They are created by the '
+ 'built-in\n'
+ ' "set()" constructor and can be modified afterwards by '
+ 'several\n'
+ ' methods, such as "add()".\n'
+ '\n'
+ ' Frozen sets\n'
+ ' These represent an immutable set. They are created by the\n'
+ ' built-in "frozenset()" constructor. As a frozenset is '
+ 'immutable\n'
+ ' and *hashable*, it can be used again as an element of '
+ 'another\n'
+ ' set, or as a dictionary key.\n'
+ '\n'
+ 'Mappings\n'
+ ' These represent finite sets of objects indexed by arbitrary '
+ 'index\n'
+ ' sets. The subscript notation "a[k]" selects the item indexed by '
+ '"k"\n'
+ ' from the mapping "a"; this can be used in expressions and as '
+ 'the\n'
+ ' target of assignments or "del" statements. The built-in '
+ 'function\n'
+ ' "len()" returns the number of items in a mapping.\n'
+ '\n'
+ ' There is currently a single intrinsic mapping type:\n'
+ '\n'
+ ' Dictionaries\n'
+ ' These represent finite sets of objects indexed by nearly\n'
+ ' arbitrary values. The only types of values not acceptable '
+ 'as\n'
+ ' keys are values containing lists or dictionaries or other\n'
+ ' mutable types that are compared by value rather than by '
+ 'object\n'
+ ' identity, the reason being that the efficient implementation '
+ 'of\n'
+ " dictionaries requires a key's hash value to remain constant.\n"
+ ' Numeric types used for keys obey the normal rules for '
+ 'numeric\n'
+ ' comparison: if two numbers compare equal (e.g., "1" and '
+ '"1.0")\n'
+ ' then they can be used interchangeably to index the same\n'
+ ' dictionary entry.\n'
+ '\n'
+ ' Dictionaries are mutable; they can be created by the "{...}"\n'
+ ' notation (see section Dictionary displays).\n'
+ '\n'
+ ' The extension modules "dbm", "gdbm", and "bsddb" provide\n'
+ ' additional examples of mapping types.\n'
+ '\n'
+ 'Callable types\n'
+ ' These are the types to which the function call operation (see\n'
+ ' section Calls) can be applied:\n'
+ '\n'
+ ' User-defined functions\n'
+ ' A user-defined function object is created by a function\n'
+ ' definition (see section Function definitions). It should be\n'
+ ' called with an argument list containing the same number of '
+ 'items\n'
+ " as the function's formal parameter list.\n"
+ '\n'
+ ' Special attributes:\n'
+ '\n'
+ ' '
+ '+-------------------------+---------------------------------+-------------+\n'
+ ' | Attribute | Meaning '
+ '| |\n'
+ ' '
+ '+=========================+=================================+=============+\n'
+ ' | "__doc__" "func_doc" | The function\'s documentation '
+ '| Writable |\n'
+ ' | | string, or "None" if '
+ '| |\n'
+ ' | | unavailable. '
+ '| |\n'
+ ' '
+ '+-------------------------+---------------------------------+-------------+\n'
+ ' | "__name__" "func_name" | The function\'s name '
+ '| Writable |\n'
+ ' '
+ '+-------------------------+---------------------------------+-------------+\n'
+ ' | "__module__" | The name of the module the | '
+ 'Writable |\n'
+ ' | | function was defined in, or '
+ '| |\n'
+ ' | | "None" if unavailable. '
+ '| |\n'
+ ' '
+ '+-------------------------+---------------------------------+-------------+\n'
+ ' | "__defaults__" | A tuple containing default | '
+ 'Writable |\n'
+ ' | "func_defaults" | argument values for those '
+ '| |\n'
+ ' | | arguments that have defaults, '
+ '| |\n'
+ ' | | or "None" if no arguments have '
+ '| |\n'
+ ' | | a default value. '
+ '| |\n'
+ ' '
+ '+-------------------------+---------------------------------+-------------+\n'
+ ' | "__code__" "func_code" | The code object representing | '
+ 'Writable |\n'
+ ' | | the compiled function body. '
+ '| |\n'
+ ' '
+ '+-------------------------+---------------------------------+-------------+\n'
+ ' | "__globals__" | A reference to the dictionary | '
+ 'Read-only |\n'
+ ' | "func_globals" | that holds the function\'s '
+ '| |\n'
+ ' | | global variables --- the global '
+ '| |\n'
+ ' | | namespace of the module in '
+ '| |\n'
+ ' | | which the function was defined. '
+ '| |\n'
+ ' '
+ '+-------------------------+---------------------------------+-------------+\n'
+ ' | "__dict__" "func_dict" | The namespace supporting | '
+ 'Writable |\n'
+ ' | | arbitrary function attributes. '
+ '| |\n'
+ ' '
+ '+-------------------------+---------------------------------+-------------+\n'
+ ' | "__closure__" | "None" or a tuple of cells that | '
+ 'Read-only |\n'
+ ' | "func_closure" | contain bindings for the '
+ '| |\n'
+ " | | function's free variables. "
+ '| |\n'
+ ' '
+ '+-------------------------+---------------------------------+-------------+\n'
+ '\n'
+ ' Most of the attributes labelled "Writable" check the type of '
+ 'the\n'
+ ' assigned value.\n'
+ '\n'
+ ' Changed in version 2.4: "func_name" is now writable.\n'
+ '\n'
+ ' Changed in version 2.6: The double-underscore attributes\n'
+ ' "__closure__", "__code__", "__defaults__", and "__globals__"\n'
+ ' were introduced as aliases for the corresponding "func_*"\n'
+ ' attributes for forwards compatibility with Python 3.\n'
+ '\n'
+ ' Function objects also support getting and setting arbitrary\n'
+ ' attributes, which can be used, for example, to attach '
+ 'metadata\n'
+ ' to functions. Regular attribute dot-notation is used to get '
+ 'and\n'
+ ' set such attributes. *Note that the current implementation '
+ 'only\n'
+ ' supports function attributes on user-defined functions. '
+ 'Function\n'
+ ' attributes on built-in functions may be supported in the\n'
+ ' future.*\n'
+ '\n'
+ " Additional information about a function's definition can be\n"
+ ' retrieved from its code object; see the description of '
+ 'internal\n'
+ ' types below.\n'
+ '\n'
+ ' User-defined methods\n'
+ ' A user-defined method object combines a class, a class '
+ 'instance\n'
+ ' (or "None") and any callable object (normally a user-defined\n'
+ ' function).\n'
+ '\n'
+ ' Special read-only attributes: "im_self" is the class '
+ 'instance\n'
+ ' object, "im_func" is the function object; "im_class" is the\n'
+ ' class of "im_self" for bound methods or the class that asked '
+ 'for\n'
+ ' the method for unbound methods; "__doc__" is the method\'s\n'
+ ' documentation (same as "im_func.__doc__"); "__name__" is the\n'
+ ' method name (same as "im_func.__name__"); "__module__" is '
+ 'the\n'
+ ' name of the module the method was defined in, or "None" if\n'
+ ' unavailable.\n'
+ '\n'
+ ' Changed in version 2.2: "im_self" used to refer to the class\n'
+ ' that defined the method.\n'
+ '\n'
+ ' Changed in version 2.6: For Python 3 forward-compatibility,\n'
+ ' "im_func" is also available as "__func__", and "im_self" as\n'
+ ' "__self__".\n'
+ '\n'
+ ' Methods also support accessing (but not setting) the '
+ 'arbitrary\n'
+ ' function attributes on the underlying function object.\n'
+ '\n'
+ ' User-defined method objects may be created when getting an\n'
+ ' attribute of a class (perhaps via an instance of that class), '
+ 'if\n'
+ ' that attribute is a user-defined function object, an unbound\n'
+ ' user-defined method object, or a class method object. When '
+ 'the\n'
+ ' attribute is a user-defined method object, a new method '
+ 'object\n'
+ ' is only created if the class from which it is being retrieved '
+ 'is\n'
+ ' the same as, or a derived class of, the class stored in the\n'
+ ' original method object; otherwise, the original method object '
+ 'is\n'
+ ' used as it is.\n'
+ '\n'
+ ' When a user-defined method object is created by retrieving a\n'
+ ' user-defined function object from a class, its "im_self"\n'
+ ' attribute is "None" and the method object is said to be '
+ 'unbound.\n'
+ ' When one is created by retrieving a user-defined function '
+ 'object\n'
+ ' from a class via one of its instances, its "im_self" '
+ 'attribute\n'
+ ' is the instance, and the method object is said to be bound. '
+ 'In\n'
+ ' either case, the new method\'s "im_class" attribute is the '
+ 'class\n'
+ ' from which the retrieval takes place, and its "im_func"\n'
+ ' attribute is the original function object.\n'
+ '\n'
+ ' When a user-defined method object is created by retrieving\n'
+ ' another method object from a class or instance, the behaviour '
+ 'is\n'
+ ' the same as for a function object, except that the "im_func"\n'
+ ' attribute of the new instance is not the original method '
+ 'object\n'
+ ' but its "im_func" attribute.\n'
+ '\n'
+ ' When a user-defined method object is created by retrieving a\n'
+ ' class method object from a class or instance, its "im_self"\n'
+ ' attribute is the class itself, and its "im_func" attribute '
+ 'is\n'
+ ' the function object underlying the class method.\n'
+ '\n'
+ ' When an unbound user-defined method object is called, the\n'
+ ' underlying function ("im_func") is called, with the '
+ 'restriction\n'
+ ' that the first argument must be an instance of the proper '
+ 'class\n'
+ ' ("im_class") or of a derived class thereof.\n'
+ '\n'
+ ' When a bound user-defined method object is called, the\n'
+ ' underlying function ("im_func") is called, inserting the '
+ 'class\n'
+ ' instance ("im_self") in front of the argument list. For\n'
+ ' instance, when "C" is a class which contains a definition for '
+ 'a\n'
+ ' function "f()", and "x" is an instance of "C", calling '
+ '"x.f(1)"\n'
+ ' is equivalent to calling "C.f(x, 1)".\n'
+ '\n'
+ ' When a user-defined method object is derived from a class '
+ 'method\n'
+ ' object, the "class instance" stored in "im_self" will '
+ 'actually\n'
+ ' be the class itself, so that calling either "x.f(1)" or '
+ '"C.f(1)"\n'
+ ' is equivalent to calling "f(C,1)" where "f" is the '
+ 'underlying\n'
+ ' function.\n'
+ '\n'
+ ' Note that the transformation from function object to (unbound '
+ 'or\n'
+ ' bound) method object happens each time the attribute is\n'
+ ' retrieved from the class or instance. In some cases, a '
+ 'fruitful\n'
+ ' optimization is to assign the attribute to a local variable '
+ 'and\n'
+ ' call that local variable. Also notice that this '
+ 'transformation\n'
+ ' only happens for user-defined functions; other callable '
+ 'objects\n'
+ ' (and all non-callable objects) are retrieved without\n'
+ ' transformation. It is also important to note that '
+ 'user-defined\n'
+ ' functions which are attributes of a class instance are not\n'
+ ' converted to bound methods; this *only* happens when the\n'
+ ' function is an attribute of the class.\n'
+ '\n'
+ ' Generator functions\n'
+ ' A function or method which uses the "yield" statement (see\n'
+ ' section The yield statement) is called a *generator '
+ 'function*.\n'
+ ' Such a function, when called, always returns an iterator '
+ 'object\n'
+ ' which can be used to execute the body of the function: '
+ 'calling\n'
+ ' the iterator\'s "next()" method will cause the function to\n'
+ ' execute until it provides a value using the "yield" '
+ 'statement.\n'
+ ' When the function executes a "return" statement or falls off '
+ 'the\n'
+ ' end, a "StopIteration" exception is raised and the iterator '
+ 'will\n'
+ ' have reached the end of the set of values to be returned.\n'
+ '\n'
+ ' Built-in functions\n'
+ ' A built-in function object is a wrapper around a C function.\n'
+ ' Examples of built-in functions are "len()" and "math.sin()"\n'
+ ' ("math" is a standard built-in module). The number and type '
+ 'of\n'
+ ' the arguments are determined by the C function. Special '
+ 'read-\n'
+ ' only attributes: "__doc__" is the function\'s documentation\n'
+ ' string, or "None" if unavailable; "__name__" is the '
+ "function's\n"
+ ' name; "__self__" is set to "None" (but see the next item);\n'
+ ' "__module__" is the name of the module the function was '
+ 'defined\n'
+ ' in or "None" if unavailable.\n'
+ '\n'
+ ' Built-in methods\n'
+ ' This is really a different disguise of a built-in function, '
+ 'this\n'
+ ' time containing an object passed to the C function as an\n'
+ ' implicit extra argument. An example of a built-in method is\n'
+ ' "alist.append()", assuming *alist* is a list object. In this\n'
+ ' case, the special read-only attribute "__self__" is set to '
+ 'the\n'
+ ' object denoted by *alist*.\n'
+ '\n'
+ ' Class Types\n'
+ ' Class types, or "new-style classes," are callable. These\n'
+ ' objects normally act as factories for new instances of\n'
+ ' themselves, but variations are possible for class types that\n'
+ ' override "__new__()". The arguments of the call are passed '
+ 'to\n'
+ ' "__new__()" and, in the typical case, to "__init__()" to\n'
+ ' initialize the new instance.\n'
+ '\n'
+ ' Classic Classes\n'
+ ' Class objects are described below. When a class object is\n'
+ ' called, a new class instance (also described below) is '
+ 'created\n'
+ " and returned. This implies a call to the class's "
+ '"__init__()"\n'
+ ' method if it has one. Any arguments are passed on to the\n'
+ ' "__init__()" method. If there is no "__init__()" method, '
+ 'the\n'
+ ' class must be called without arguments.\n'
+ '\n'
+ ' Class instances\n'
+ ' Class instances are described below. Class instances are\n'
+ ' callable only when the class has a "__call__()" method;\n'
+ ' "x(arguments)" is a shorthand for "x.__call__(arguments)".\n'
+ '\n'
+ 'Modules\n'
+ ' Modules are imported by the "import" statement (see section The\n'
+ ' import statement). A module object has a namespace implemented '
+ 'by a\n'
+ ' dictionary object (this is the dictionary referenced by the\n'
+ ' func_globals attribute of functions defined in the module).\n'
+ ' Attribute references are translated to lookups in this '
+ 'dictionary,\n'
+ ' e.g., "m.x" is equivalent to "m.__dict__["x"]". A module object\n'
+ ' does not contain the code object used to initialize the module\n'
+ " (since it isn't needed once the initialization is done).\n"
+ '\n'
+ " Attribute assignment updates the module's namespace dictionary,\n"
+ ' e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n'
+ '\n'
+ ' Special read-only attribute: "__dict__" is the module\'s '
+ 'namespace\n'
+ ' as a dictionary object.\n'
+ '\n'
+ ' **CPython implementation detail:** Because of the way CPython\n'
+ ' clears module dictionaries, the module dictionary will be '
+ 'cleared\n'
+ ' when the module falls out of scope even if the dictionary still '
+ 'has\n'
+ ' live references. To avoid this, copy the dictionary or keep '
+ 'the\n'
+ ' module around while using its dictionary directly.\n'
+ '\n'
+ ' Predefined (writable) attributes: "__name__" is the module\'s '
+ 'name;\n'
+ ' "__doc__" is the module\'s documentation string, or "None" if\n'
+ ' unavailable; "__file__" is the pathname of the file from which '
+ 'the\n'
+ ' module was loaded, if it was loaded from a file. The "__file__"\n'
+ ' attribute is not present for C modules that are statically '
+ 'linked\n'
+ ' into the interpreter; for extension modules loaded dynamically '
+ 'from\n'
+ ' a shared library, it is the pathname of the shared library '
+ 'file.\n'
+ '\n'
+ 'Classes\n'
+ ' Both class types (new-style classes) and class objects (old-\n'
+ ' style/classic classes) are typically created by class '
+ 'definitions\n'
+ ' (see section Class definitions). A class has a namespace\n'
+ ' implemented by a dictionary object. Class attribute references '
+ 'are\n'
+ ' translated to lookups in this dictionary, e.g., "C.x" is '
+ 'translated\n'
+ ' to "C.__dict__["x"]" (although for new-style classes in '
+ 'particular\n'
+ ' there are a number of hooks which allow for other means of '
+ 'locating\n'
+ ' attributes). When the attribute name is not found there, the\n'
+ ' attribute search continues in the base classes. For old-style\n'
+ ' classes, the search is depth-first, left-to-right in the order '
+ 'of\n'
+ ' occurrence in the base class list. New-style classes use the '
+ 'more\n'
+ ' complex C3 method resolution order which behaves correctly even '
+ 'in\n'
+ " the presence of 'diamond' inheritance structures where there "
+ 'are\n'
+ ' multiple inheritance paths leading back to a common ancestor.\n'
+ ' Additional details on the C3 MRO used by new-style classes can '
+ 'be\n'
+ ' found in the documentation accompanying the 2.3 release at\n'
+ ' https://www.python.org/download/releases/2.3/mro/.\n'
+ '\n'
+ ' When a class attribute reference (for class "C", say) would '
+ 'yield a\n'
+ ' user-defined function object or an unbound user-defined method\n'
+ ' object whose associated class is either "C" or one of its base\n'
+ ' classes, it is transformed into an unbound user-defined method\n'
+ ' object whose "im_class" attribute is "C". When it would yield a\n'
+ ' class method object, it is transformed into a bound '
+ 'user-defined\n'
+ ' method object whose "im_self" attribute is "C". When it would\n'
+ ' yield a static method object, it is transformed into the object\n'
+ ' wrapped by the static method object. See section Implementing\n'
+ ' Descriptors for another way in which attributes retrieved from '
+ 'a\n'
+ ' class may differ from those actually contained in its '
+ '"__dict__"\n'
+ ' (note that only new-style classes support descriptors).\n'
+ '\n'
+ " Class attribute assignments update the class's dictionary, "
+ 'never\n'
+ ' the dictionary of a base class.\n'
+ '\n'
+ ' A class object can be called (see above) to yield a class '
+ 'instance\n'
+ ' (see below).\n'
+ '\n'
+ ' Special attributes: "__name__" is the class name; "__module__" '
+ 'is\n'
+ ' the module name in which the class was defined; "__dict__" is '
+ 'the\n'
+ ' dictionary containing the class\'s namespace; "__bases__" is a '
+ 'tuple\n'
+ ' (possibly empty or a singleton) containing the base classes, in '
+ 'the\n'
+ ' order of their occurrence in the base class list; "__doc__" is '
+ 'the\n'
+ ' class\'s documentation string, or "None" if undefined.\n'
+ '\n'
+ 'Class instances\n'
+ ' A class instance is created by calling a class object (see '
+ 'above).\n'
+ ' A class instance has a namespace implemented as a dictionary '
+ 'which\n'
+ ' is the first place in which attribute references are searched.\n'
+ " When an attribute is not found there, and the instance's class "
+ 'has\n'
+ ' an attribute by that name, the search continues with the class\n'
+ ' attributes. If a class attribute is found that is a '
+ 'user-defined\n'
+ ' function object or an unbound user-defined method object whose\n'
+ ' associated class is the class (call it "C") of the instance for\n'
+ ' which the attribute reference was initiated or one of its bases, '
+ 'it\n'
+ ' is transformed into a bound user-defined method object whose\n'
+ ' "im_class" attribute is "C" and whose "im_self" attribute is '
+ 'the\n'
+ ' instance. Static method and class method objects are also\n'
+ ' transformed, as if they had been retrieved from class "C"; see\n'
+ ' above under "Classes". See section Implementing Descriptors for\n'
+ ' another way in which attributes of a class retrieved via its\n'
+ ' instances may differ from the objects actually stored in the\n'
+ ' class\'s "__dict__". If no class attribute is found, and the\n'
+ ' object\'s class has a "__getattr__()" method, that is called to\n'
+ ' satisfy the lookup.\n'
+ '\n'
+ " Attribute assignments and deletions update the instance's\n"
+ " dictionary, never a class's dictionary. If the class has a\n"
+ ' "__setattr__()" or "__delattr__()" method, this is called '
+ 'instead\n'
+ ' of updating the instance dictionary directly.\n'
+ '\n'
+ ' Class instances can pretend to be numbers, sequences, or '
+ 'mappings\n'
+ ' if they have methods with certain special names. See section\n'
+ ' Special method names.\n'
+ '\n'
+ ' Special attributes: "__dict__" is the attribute dictionary;\n'
+ ' "__class__" is the instance\'s class.\n'
+ '\n'
+ 'Files\n'
+ ' A file object represents an open file. File objects are created '
+ 'by\n'
+ ' the "open()" built-in function, and also by "os.popen()",\n'
+ ' "os.fdopen()", and the "makefile()" method of socket objects '
+ '(and\n'
+ ' perhaps by other functions or methods provided by extension\n'
+ ' modules). The objects "sys.stdin", "sys.stdout" and '
+ '"sys.stderr"\n'
+ ' are initialized to file objects corresponding to the '
+ "interpreter's\n"
+ ' standard input, output and error streams. See File Objects for\n'
+ ' complete documentation of file objects.\n'
+ '\n'
+ 'Internal types\n'
+ ' A few types used internally by the interpreter are exposed to '
+ 'the\n'
+ ' user. Their definitions may change with future versions of the\n'
+ ' interpreter, but they are mentioned here for completeness.\n'
+ '\n'
+ ' Code objects\n'
+ ' Code objects represent *byte-compiled* executable Python '
+ 'code,\n'
+ ' or *bytecode*. The difference between a code object and a\n'
+ ' function object is that the function object contains an '
+ 'explicit\n'
+ " reference to the function's globals (the module in which it "
+ 'was\n'
+ ' defined), while a code object contains no context; also the\n'
+ ' default argument values are stored in the function object, '
+ 'not\n'
+ ' in the code object (because they represent values calculated '
+ 'at\n'
+ ' run-time). Unlike function objects, code objects are '
+ 'immutable\n'
+ ' and contain no references (directly or indirectly) to '
+ 'mutable\n'
+ ' objects.\n'
+ '\n'
+ ' Special read-only attributes: "co_name" gives the function '
+ 'name;\n'
+ ' "co_argcount" is the number of positional arguments '
+ '(including\n'
+ ' arguments with default values); "co_nlocals" is the number '
+ 'of\n'
+ ' local variables used by the function (including arguments);\n'
+ ' "co_varnames" is a tuple containing the names of the local\n'
+ ' variables (starting with the argument names); "co_cellvars" '
+ 'is a\n'
+ ' tuple containing the names of local variables that are\n'
+ ' referenced by nested functions; "co_freevars" is a tuple\n'
+ ' containing the names of free variables; "co_code" is a '
+ 'string\n'
+ ' representing the sequence of bytecode instructions; '
+ '"co_consts"\n'
+ ' is a tuple containing the literals used by the bytecode;\n'
+ ' "co_names" is a tuple containing the names used by the '
+ 'bytecode;\n'
+ ' "co_filename" is the filename from which the code was '
+ 'compiled;\n'
+ ' "co_firstlineno" is the first line number of the function;\n'
+ ' "co_lnotab" is a string encoding the mapping from bytecode\n'
+ ' offsets to line numbers (for details see the source code of '
+ 'the\n'
+ ' interpreter); "co_stacksize" is the required stack size\n'
+ ' (including local variables); "co_flags" is an integer '
+ 'encoding a\n'
+ ' number of flags for the interpreter.\n'
+ '\n'
+ ' The following flag bits are defined for "co_flags": bit '
+ '"0x04"\n'
+ ' is set if the function uses the "*arguments" syntax to accept '
+ 'an\n'
+ ' arbitrary number of positional arguments; bit "0x08" is set '
+ 'if\n'
+ ' the function uses the "**keywords" syntax to accept '
+ 'arbitrary\n'
+ ' keyword arguments; bit "0x20" is set if the function is a\n'
+ ' generator.\n'
+ '\n'
+ ' Future feature declarations ("from __future__ import '
+ 'division")\n'
+ ' also use bits in "co_flags" to indicate whether a code '
+ 'object\n'
+ ' was compiled with a particular feature enabled: bit "0x2000" '
+ 'is\n'
+ ' set if the function was compiled with future division '
+ 'enabled;\n'
+ ' bits "0x10" and "0x1000" were used in earlier versions of\n'
+ ' Python.\n'
+ '\n'
+ ' Other bits in "co_flags" are reserved for internal use.\n'
+ '\n'
+ ' If a code object represents a function, the first item in\n'
+ ' "co_consts" is the documentation string of the function, or\n'
+ ' "None" if undefined.\n'
+ '\n'
+ ' Frame objects\n'
+ ' Frame objects represent execution frames. They may occur in\n'
+ ' traceback objects (see below).\n'
+ '\n'
+ ' Special read-only attributes: "f_back" is to the previous '
+ 'stack\n'
+ ' frame (towards the caller), or "None" if this is the bottom\n'
+ ' stack frame; "f_code" is the code object being executed in '
+ 'this\n'
+ ' frame; "f_locals" is the dictionary used to look up local\n'
+ ' variables; "f_globals" is used for global variables;\n'
+ ' "f_builtins" is used for built-in (intrinsic) names;\n'
+ ' "f_restricted" is a flag indicating whether the function is\n'
+ ' executing in restricted execution mode; "f_lasti" gives the\n'
+ ' precise instruction (this is an index into the bytecode '
+ 'string\n'
+ ' of the code object).\n'
+ '\n'
+ ' Special writable attributes: "f_trace", if not "None", is a\n'
+ ' function called at the start of each source code line (this '
+ 'is\n'
+ ' used by the debugger); "f_exc_type", "f_exc_value",\n'
+ ' "f_exc_traceback" represent the last exception raised in the\n'
+ ' parent frame provided another exception was ever raised in '
+ 'the\n'
+ ' current frame (in all other cases they are "None"); '
+ '"f_lineno"\n'
+ ' is the current line number of the frame --- writing to this '
+ 'from\n'
+ ' within a trace function jumps to the given line (only for '
+ 'the\n'
+ ' bottom-most frame). A debugger can implement a Jump command\n'
+ ' (aka Set Next Statement) by writing to f_lineno.\n'
+ '\n'
+ ' Traceback objects\n'
+ ' Traceback objects represent a stack trace of an exception. '
+ 'A\n'
+ ' traceback object is created when an exception occurs. When '
+ 'the\n'
+ ' search for an exception handler unwinds the execution stack, '
+ 'at\n'
+ ' each unwound level a traceback object is inserted in front '
+ 'of\n'
+ ' the current traceback. When an exception handler is '
+ 'entered,\n'
+ ' the stack trace is made available to the program. (See '
+ 'section\n'
+ ' The try statement.) It is accessible as "sys.exc_traceback", '
+ 'and\n'
+ ' also as the third item of the tuple returned by\n'
+ ' "sys.exc_info()". The latter is the preferred interface, '
+ 'since\n'
+ ' it works correctly when the program is using multiple '
+ 'threads.\n'
+ ' When the program contains no suitable handler, the stack '
+ 'trace\n'
+ ' is written (nicely formatted) to the standard error stream; '
+ 'if\n'
+ ' the interpreter is interactive, it is also made available to '
+ 'the\n'
+ ' user as "sys.last_traceback".\n'
+ '\n'
+ ' Special read-only attributes: "tb_next" is the next level in '
+ 'the\n'
+ ' stack trace (towards the frame where the exception occurred), '
+ 'or\n'
+ ' "None" if there is no next level; "tb_frame" points to the\n'
+ ' execution frame of the current level; "tb_lineno" gives the '
+ 'line\n'
+ ' number where the exception occurred; "tb_lasti" indicates '
+ 'the\n'
+ ' precise instruction. The line number and last instruction '
+ 'in\n'
+ ' the traceback may differ from the line number of its frame\n'
+ ' object if the exception occurred in a "try" statement with '
+ 'no\n'
+ ' matching except clause or with a finally clause.\n'
+ '\n'
+ ' Slice objects\n'
+ ' Slice objects are used to represent slices when *extended '
+ 'slice\n'
+ ' syntax* is used. This is a slice using two colons, or '
+ 'multiple\n'
+ ' slices or ellipses separated by commas, e.g., "a[i:j:step]",\n'
+ ' "a[i:j, k:l]", or "a[..., i:j]". They are also created by '
+ 'the\n'
+ ' built-in "slice()" function.\n'
+ '\n'
+ ' Special read-only attributes: "start" is the lower bound; '
+ '"stop"\n'
+ ' is the upper bound; "step" is the step value; each is "None" '
+ 'if\n'
+ ' omitted. These attributes can have any type.\n'
+ '\n'
+ ' Slice objects support one method:\n'
+ '\n'
+ ' slice.indices(self, length)\n'
+ '\n'
+ ' This method takes a single integer argument *length* and\n'
+ ' computes information about the extended slice that the '
+ 'slice\n'
+ ' object would describe if applied to a sequence of '
+ '*length*\n'
+ ' items. It returns a tuple of three integers; '
+ 'respectively\n'
+ ' these are the *start* and *stop* indices and the *step* '
+ 'or\n'
+ ' stride length of the slice. Missing or out-of-bounds '
+ 'indices\n'
+ ' are handled in a manner consistent with regular slices.\n'
+ '\n'
+ ' New in version 2.3.\n'
+ '\n'
+ ' Static method objects\n'
+ ' Static method objects provide a way of defeating the\n'
+ ' transformation of function objects to method objects '
+ 'described\n'
+ ' above. A static method object is a wrapper around any other\n'
+ ' object, usually a user-defined method object. When a static\n'
+ ' method object is retrieved from a class or a class instance, '
+ 'the\n'
+ ' object actually returned is the wrapped object, which is not\n'
+ ' subject to any further transformation. Static method objects '
+ 'are\n'
+ ' not themselves callable, although the objects they wrap '
+ 'usually\n'
+ ' are. Static method objects are created by the built-in\n'
+ ' "staticmethod()" constructor.\n'
+ '\n'
+ ' Class method objects\n'
+ ' A class method object, like a static method object, is a '
+ 'wrapper\n'
+ ' around another object that alters the way in which that '
+ 'object\n'
+ ' is retrieved from classes and class instances. The behaviour '
+ 'of\n'
+ ' class method objects upon such retrieval is described above,\n'
+ ' under "User-defined methods". Class method objects are '
+ 'created\n'
+ ' by the built-in "classmethod()" constructor.\n',
+ 'typesfunctions': '\n'
+ 'Functions\n'
+ '*********\n'
+ '\n'
+ 'Function objects are created by function definitions. The '
+ 'only\n'
+ 'operation on a function object is to call it: '
+ '"func(argument-list)".\n'
+ '\n'
+ 'There are really two flavors of function objects: built-in '
+ 'functions\n'
+ 'and user-defined functions. Both support the same '
+ 'operation (to call\n'
+ 'the function), but the implementation is different, hence '
+ 'the\n'
+ 'different object types.\n'
+ '\n'
+ 'See Function definitions for more information.\n',
+ 'typesmapping': '\n'
+ 'Mapping Types --- "dict"\n'
+ '************************\n'
+ '\n'
+ 'A *mapping* object maps *hashable* values to arbitrary '
+ 'objects.\n'
+ 'Mappings are mutable objects. There is currently only one '
+ 'standard\n'
+ 'mapping type, the *dictionary*. (For other containers see '
+ 'the built\n'
+ 'in "list", "set", and "tuple" classes, and the "collections" '
+ 'module.)\n'
+ '\n'
+ "A dictionary's keys are *almost* arbitrary values. Values "
+ 'that are\n'
+ 'not *hashable*, that is, values containing lists, '
+ 'dictionaries or\n'
+ 'other mutable types (that are compared by value rather than '
+ 'by object\n'
+ 'identity) may not be used as keys. Numeric types used for '
+ 'keys obey\n'
+ 'the normal rules for numeric comparison: if two numbers '
+ 'compare equal\n'
+ '(such as "1" and "1.0") then they can be used '
+ 'interchangeably to index\n'
+ 'the same dictionary entry. (Note however, that since '
+ 'computers store\n'
+ 'floating-point numbers as approximations it is usually '
+ 'unwise to use\n'
+ 'them as dictionary keys.)\n'
+ '\n'
+ 'Dictionaries can be created by placing a comma-separated '
+ 'list of "key:\n'
+ 'value" pairs within braces, for example: "{\'jack\': 4098, '
+ "'sjoerd':\n"
+ '4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the '
+ '"dict"\n'
+ 'constructor.\n'
+ '\n'
+ 'class dict(**kwarg)\n'
+ 'class dict(mapping, **kwarg)\n'
+ 'class dict(iterable, **kwarg)\n'
+ '\n'
+ ' Return a new dictionary initialized from an optional '
+ 'positional\n'
+ ' argument and a possibly empty set of keyword arguments.\n'
+ '\n'
+ ' If no positional argument is given, an empty dictionary '
+ 'is created.\n'
+ ' If a positional argument is given and it is a mapping '
+ 'object, a\n'
+ ' dictionary is created with the same key-value pairs as '
+ 'the mapping\n'
+ ' object. Otherwise, the positional argument must be an '
+ '*iterable*\n'
+ ' object. Each item in the iterable must itself be an '
+ 'iterable with\n'
+ ' exactly two objects. The first object of each item '
+ 'becomes a key\n'
+ ' in the new dictionary, and the second object the '
+ 'corresponding\n'
+ ' value. If a key occurs more than once, the last value '
+ 'for that key\n'
+ ' becomes the corresponding value in the new dictionary.\n'
+ '\n'
+ ' If keyword arguments are given, the keyword arguments and '
+ 'their\n'
+ ' values are added to the dictionary created from the '
+ 'positional\n'
+ ' argument. If a key being added is already present, the '
+ 'value from\n'
+ ' the keyword argument replaces the value from the '
+ 'positional\n'
+ ' argument.\n'
+ '\n'
+ ' To illustrate, the following examples all return a '
+ 'dictionary equal\n'
+ ' to "{"one": 1, "two": 2, "three": 3}":\n'
+ '\n'
+ ' >>> a = dict(one=1, two=2, three=3)\n'
+ " >>> b = {'one': 1, 'two': 2, 'three': 3}\n"
+ " >>> c = dict(zip(['one', 'two', 'three'], [1, 2, 3]))\n"
+ " >>> d = dict([('two', 2), ('one', 1), ('three', 3)])\n"
+ " >>> e = dict({'three': 3, 'one': 1, 'two': 2})\n"
+ ' >>> a == b == c == d == e\n'
+ ' True\n'
+ '\n'
+ ' Providing keyword arguments as in the first example only '
+ 'works for\n'
+ ' keys that are valid Python identifiers. Otherwise, any '
+ 'valid keys\n'
+ ' can be used.\n'
+ '\n'
+ ' New in version 2.2.\n'
+ '\n'
+ ' Changed in version 2.3: Support for building a dictionary '
+ 'from\n'
+ ' keyword arguments added.\n'
+ '\n'
+ ' These are the operations that dictionaries support (and '
+ 'therefore,\n'
+ ' custom mapping types should support too):\n'
+ '\n'
+ ' len(d)\n'
+ '\n'
+ ' Return the number of items in the dictionary *d*.\n'
+ '\n'
+ ' d[key]\n'
+ '\n'
+ ' Return the item of *d* with key *key*. Raises a '
+ '"KeyError" if\n'
+ ' *key* is not in the map.\n'
+ '\n'
+ ' If a subclass of dict defines a method "__missing__()" '
+ 'and *key*\n'
+ ' is not present, the "d[key]" operation calls that '
+ 'method with\n'
+ ' the key *key* as argument. The "d[key]" operation '
+ 'then returns\n'
+ ' or raises whatever is returned or raised by the\n'
+ ' "__missing__(key)" call. No other operations or '
+ 'methods invoke\n'
+ ' "__missing__()". If "__missing__()" is not defined, '
+ '"KeyError"\n'
+ ' is raised. "__missing__()" must be a method; it cannot '
+ 'be an\n'
+ ' instance variable:\n'
+ '\n'
+ ' >>> class Counter(dict):\n'
+ ' ... def __missing__(self, key):\n'
+ ' ... return 0\n'
+ ' >>> c = Counter()\n'
+ " >>> c['red']\n"
+ ' 0\n'
+ " >>> c['red'] += 1\n"
+ " >>> c['red']\n"
+ ' 1\n'
+ '\n'
+ ' The example above shows part of the implementation of\n'
+ ' "collections.Counter". A different "__missing__" '
+ 'method is used\n'
+ ' by "collections.defaultdict".\n'
+ '\n'
+ ' New in version 2.5: Recognition of __missing__ methods '
+ 'of dict\n'
+ ' subclasses.\n'
+ '\n'
+ ' d[key] = value\n'
+ '\n'
+ ' Set "d[key]" to *value*.\n'
+ '\n'
+ ' del d[key]\n'
+ '\n'
+ ' Remove "d[key]" from *d*. Raises a "KeyError" if '
+ '*key* is not\n'
+ ' in the map.\n'
+ '\n'
+ ' key in d\n'
+ '\n'
+ ' Return "True" if *d* has a key *key*, else "False".\n'
+ '\n'
+ ' New in version 2.2.\n'
+ '\n'
+ ' key not in d\n'
+ '\n'
+ ' Equivalent to "not key in d".\n'
+ '\n'
+ ' New in version 2.2.\n'
+ '\n'
+ ' iter(d)\n'
+ '\n'
+ ' Return an iterator over the keys of the dictionary. '
+ 'This is a\n'
+ ' shortcut for "iterkeys()".\n'
+ '\n'
+ ' clear()\n'
+ '\n'
+ ' Remove all items from the dictionary.\n'
+ '\n'
+ ' copy()\n'
+ '\n'
+ ' Return a shallow copy of the dictionary.\n'
+ '\n'
+ ' fromkeys(seq[, value])\n'
+ '\n'
+ ' Create a new dictionary with keys from *seq* and '
+ 'values set to\n'
+ ' *value*.\n'
+ '\n'
+ ' "fromkeys()" is a class method that returns a new '
+ 'dictionary.\n'
+ ' *value* defaults to "None".\n'
+ '\n'
+ ' New in version 2.3.\n'
+ '\n'
+ ' get(key[, default])\n'
+ '\n'
+ ' Return the value for *key* if *key* is in the '
+ 'dictionary, else\n'
+ ' *default*. If *default* is not given, it defaults to '
+ '"None", so\n'
+ ' that this method never raises a "KeyError".\n'
+ '\n'
+ ' has_key(key)\n'
+ '\n'
+ ' Test for the presence of *key* in the dictionary. '
+ '"has_key()"\n'
+ ' is deprecated in favor of "key in d".\n'
+ '\n'
+ ' items()\n'
+ '\n'
+ ' Return a copy of the dictionary\'s list of "(key, '
+ 'value)" pairs.\n'
+ '\n'
+ ' **CPython implementation detail:** Keys and values are '
+ 'listed in\n'
+ ' an arbitrary order which is non-random, varies across '
+ 'Python\n'
+ " implementations, and depends on the dictionary's "
+ 'history of\n'
+ ' insertions and deletions.\n'
+ '\n'
+ ' If "items()", "keys()", "values()", "iteritems()", '
+ '"iterkeys()",\n'
+ ' and "itervalues()" are called with no intervening '
+ 'modifications\n'
+ ' to the dictionary, the lists will directly '
+ 'correspond. This\n'
+ ' allows the creation of "(value, key)" pairs using '
+ '"zip()":\n'
+ ' "pairs = zip(d.values(), d.keys())". The same '
+ 'relationship\n'
+ ' holds for the "iterkeys()" and "itervalues()" methods: '
+ '"pairs =\n'
+ ' zip(d.itervalues(), d.iterkeys())" provides the same '
+ 'value for\n'
+ ' "pairs". Another way to create the same list is "pairs '
+ '= [(v, k)\n'
+ ' for (k, v) in d.iteritems()]".\n'
+ '\n'
+ ' iteritems()\n'
+ '\n'
+ ' Return an iterator over the dictionary\'s "(key, '
+ 'value)" pairs.\n'
+ ' See the note for "dict.items()".\n'
+ '\n'
+ ' Using "iteritems()" while adding or deleting entries '
+ 'in the\n'
+ ' dictionary may raise a "RuntimeError" or fail to '
+ 'iterate over\n'
+ ' all entries.\n'
+ '\n'
+ ' New in version 2.2.\n'
+ '\n'
+ ' iterkeys()\n'
+ '\n'
+ " Return an iterator over the dictionary's keys. See "
+ 'the note for\n'
+ ' "dict.items()".\n'
+ '\n'
+ ' Using "iterkeys()" while adding or deleting entries in '
+ 'the\n'
+ ' dictionary may raise a "RuntimeError" or fail to '
+ 'iterate over\n'
+ ' all entries.\n'
+ '\n'
+ ' New in version 2.2.\n'
+ '\n'
+ ' itervalues()\n'
+ '\n'
+ " Return an iterator over the dictionary's values. See "
+ 'the note\n'
+ ' for "dict.items()".\n'
+ '\n'
+ ' Using "itervalues()" while adding or deleting entries '
+ 'in the\n'
+ ' dictionary may raise a "RuntimeError" or fail to '
+ 'iterate over\n'
+ ' all entries.\n'
+ '\n'
+ ' New in version 2.2.\n'
+ '\n'
+ ' keys()\n'
+ '\n'
+ " Return a copy of the dictionary's list of keys. See "
+ 'the note\n'
+ ' for "dict.items()".\n'
+ '\n'
+ ' pop(key[, default])\n'
+ '\n'
+ ' If *key* is in the dictionary, remove it and return '
+ 'its value,\n'
+ ' else return *default*. If *default* is not given and '
+ '*key* is\n'
+ ' not in the dictionary, a "KeyError" is raised.\n'
+ '\n'
+ ' New in version 2.3.\n'
+ '\n'
+ ' popitem()\n'
+ '\n'
+ ' Remove and return an arbitrary "(key, value)" pair '
+ 'from the\n'
+ ' dictionary.\n'
+ '\n'
+ ' "popitem()" is useful to destructively iterate over a\n'
+ ' dictionary, as often used in set algorithms. If the '
+ 'dictionary\n'
+ ' is empty, calling "popitem()" raises a "KeyError".\n'
+ '\n'
+ ' setdefault(key[, default])\n'
+ '\n'
+ ' If *key* is in the dictionary, return its value. If '
+ 'not, insert\n'
+ ' *key* with a value of *default* and return *default*. '
+ '*default*\n'
+ ' defaults to "None".\n'
+ '\n'
+ ' update([other])\n'
+ '\n'
+ ' Update the dictionary with the key/value pairs from '
+ '*other*,\n'
+ ' overwriting existing keys. Return "None".\n'
+ '\n'
+ ' "update()" accepts either another dictionary object or '
+ 'an\n'
+ ' iterable of key/value pairs (as tuples or other '
+ 'iterables of\n'
+ ' length two). If keyword arguments are specified, the '
+ 'dictionary\n'
+ ' is then updated with those key/value pairs: '
+ '"d.update(red=1,\n'
+ ' blue=2)".\n'
+ '\n'
+ ' Changed in version 2.4: Allowed the argument to be an '
+ 'iterable\n'
+ ' of key/value pairs and allowed keyword arguments.\n'
+ '\n'
+ ' values()\n'
+ '\n'
+ " Return a copy of the dictionary's list of values. See "
+ 'the note\n'
+ ' for "dict.items()".\n'
+ '\n'
+ ' viewitems()\n'
+ '\n'
+ ' Return a new view of the dictionary\'s items ("(key, '
+ 'value)"\n'
+ ' pairs). See below for documentation of view objects.\n'
+ '\n'
+ ' New in version 2.7.\n'
+ '\n'
+ ' viewkeys()\n'
+ '\n'
+ " Return a new view of the dictionary's keys. See below "
+ 'for\n'
+ ' documentation of view objects.\n'
+ '\n'
+ ' New in version 2.7.\n'
+ '\n'
+ ' viewvalues()\n'
+ '\n'
+ " Return a new view of the dictionary's values. See "
+ 'below for\n'
+ ' documentation of view objects.\n'
+ '\n'
+ ' New in version 2.7.\n'
+ '\n'
+ ' Dictionaries compare equal if and only if they have the '
+ 'same "(key,\n'
+ ' value)" pairs.\n'
+ '\n'
+ '\n'
+ 'Dictionary view objects\n'
+ '=======================\n'
+ '\n'
+ 'The objects returned by "dict.viewkeys()", '
+ '"dict.viewvalues()" and\n'
+ '"dict.viewitems()" are *view objects*. They provide a '
+ 'dynamic view on\n'
+ "the dictionary's entries, which means that when the "
+ 'dictionary\n'
+ 'changes, the view reflects these changes.\n'
+ '\n'
+ 'Dictionary views can be iterated over to yield their '
+ 'respective data,\n'
+ 'and support membership tests:\n'
+ '\n'
+ 'len(dictview)\n'
+ '\n'
+ ' Return the number of entries in the dictionary.\n'
+ '\n'
+ 'iter(dictview)\n'
+ '\n'
+ ' Return an iterator over the keys, values or items '
+ '(represented as\n'
+ ' tuples of "(key, value)") in the dictionary.\n'
+ '\n'
+ ' Keys and values are iterated over in an arbitrary order '
+ 'which is\n'
+ ' non-random, varies across Python implementations, and '
+ 'depends on\n'
+ " the dictionary's history of insertions and deletions. If "
+ 'keys,\n'
+ ' values and items views are iterated over with no '
+ 'intervening\n'
+ ' modifications to the dictionary, the order of items will '
+ 'directly\n'
+ ' correspond. This allows the creation of "(value, key)" '
+ 'pairs using\n'
+ ' "zip()": "pairs = zip(d.values(), d.keys())". Another '
+ 'way to\n'
+ ' create the same list is "pairs = [(v, k) for (k, v) in '
+ 'd.items()]".\n'
+ '\n'
+ ' Iterating views while adding or deleting entries in the '
+ 'dictionary\n'
+ ' may raise a "RuntimeError" or fail to iterate over all '
+ 'entries.\n'
+ '\n'
+ 'x in dictview\n'
+ '\n'
+ ' Return "True" if *x* is in the underlying dictionary\'s '
+ 'keys, values\n'
+ ' or items (in the latter case, *x* should be a "(key, '
+ 'value)"\n'
+ ' tuple).\n'
+ '\n'
+ 'Keys views are set-like since their entries are unique and '
+ 'hashable.\n'
+ 'If all values are hashable, so that (key, value) pairs are '
+ 'unique and\n'
+ 'hashable, then the items view is also set-like. (Values '
+ 'views are not\n'
+ 'treated as set-like since the entries are generally not '
+ 'unique.) Then\n'
+ 'these set operations are available ("other" refers either to '
+ 'another\n'
+ 'view or a set):\n'
+ '\n'
+ 'dictview & other\n'
+ '\n'
+ ' Return the intersection of the dictview and the other '
+ 'object as a\n'
+ ' new set.\n'
+ '\n'
+ 'dictview | other\n'
+ '\n'
+ ' Return the union of the dictview and the other object as '
+ 'a new set.\n'
+ '\n'
+ 'dictview - other\n'
+ '\n'
+ ' Return the difference between the dictview and the other '
+ 'object\n'
+ " (all elements in *dictview* that aren't in *other*) as a "
+ 'new set.\n'
+ '\n'
+ 'dictview ^ other\n'
+ '\n'
+ ' Return the symmetric difference (all elements either in '
+ '*dictview*\n'
+ ' or *other*, but not in both) of the dictview and the '
+ 'other object\n'
+ ' as a new set.\n'
+ '\n'
+ 'An example of dictionary view usage:\n'
+ '\n'
+ " >>> dishes = {'eggs': 2, 'sausage': 1, 'bacon': 1, "
+ "'spam': 500}\n"
+ ' >>> keys = dishes.viewkeys()\n'
+ ' >>> values = dishes.viewvalues()\n'
+ '\n'
+ ' >>> # iteration\n'
+ ' >>> n = 0\n'
+ ' >>> for val in values:\n'
+ ' ... n += val\n'
+ ' >>> print(n)\n'
+ ' 504\n'
+ '\n'
+ ' >>> # keys and values are iterated over in the same '
+ 'order\n'
+ ' >>> list(keys)\n'
+ " ['eggs', 'bacon', 'sausage', 'spam']\n"
+ ' >>> list(values)\n'
+ ' [2, 1, 1, 500]\n'
+ '\n'
+ ' >>> # view objects are dynamic and reflect dict changes\n'
+ " >>> del dishes['eggs']\n"
+ " >>> del dishes['sausage']\n"
+ ' >>> list(keys)\n'
+ " ['spam', 'bacon']\n"
+ '\n'
+ ' >>> # set operations\n'
+ " >>> keys & {'eggs', 'bacon', 'salad'}\n"
+ " {'bacon'}\n",
+ 'typesmethods': '\n'
+ 'Methods\n'
+ '*******\n'
+ '\n'
+ 'Methods are functions that are called using the attribute '
+ 'notation.\n'
+ 'There are two flavors: built-in methods (such as "append()" '
+ 'on lists)\n'
+ 'and class instance methods. Built-in methods are described '
+ 'with the\n'
+ 'types that support them.\n'
+ '\n'
+ 'The implementation adds two special read-only attributes to '
+ 'class\n'
+ 'instance methods: "m.im_self" is the object on which the '
+ 'method\n'
+ 'operates, and "m.im_func" is the function implementing the '
+ 'method.\n'
+ 'Calling "m(arg-1, arg-2, ..., arg-n)" is completely '
+ 'equivalent to\n'
+ 'calling "m.im_func(m.im_self, arg-1, arg-2, ..., arg-n)".\n'
+ '\n'
+ 'Class instance methods are either *bound* or *unbound*, '
+ 'referring to\n'
+ 'whether the method was accessed through an instance or a '
+ 'class,\n'
+ 'respectively. When a method is unbound, its "im_self" '
+ 'attribute will\n'
+ 'be "None" and if called, an explicit "self" object must be '
+ 'passed as\n'
+ 'the first argument. In this case, "self" must be an '
+ 'instance of the\n'
+ "unbound method's class (or a subclass of that class), "
+ 'otherwise a\n'
+ '"TypeError" is raised.\n'
+ '\n'
+ 'Like function objects, methods objects support getting '
+ 'arbitrary\n'
+ 'attributes. However, since method attributes are actually '
+ 'stored on\n'
+ 'the underlying function object ("meth.im_func"), setting '
+ 'method\n'
+ 'attributes on either bound or unbound methods is '
+ 'disallowed.\n'
+ 'Attempting to set an attribute on a method results in an\n'
+ '"AttributeError" being raised. In order to set a method '
+ 'attribute,\n'
+ 'you need to explicitly set it on the underlying function '
+ 'object:\n'
+ '\n'
+ ' >>> class C:\n'
+ ' ... def method(self):\n'
+ ' ... pass\n'
+ ' ...\n'
+ ' >>> c = C()\n'
+ " >>> c.method.whoami = 'my name is method' # can't set on "
+ 'the method\n'
+ ' Traceback (most recent call last):\n'
+ ' File "<stdin>", line 1, in <module>\n'
+ " AttributeError: 'instancemethod' object has no attribute "
+ "'whoami'\n"
+ " >>> c.method.im_func.whoami = 'my name is method'\n"
+ ' >>> c.method.whoami\n'
+ " 'my name is method'\n"
+ '\n'
+ 'See The standard type hierarchy for more information.\n',
+ 'typesmodules': '\n'
+ 'Modules\n'
+ '*******\n'
+ '\n'
+ 'The only special operation on a module is attribute access: '
+ '"m.name",\n'
+ 'where *m* is a module and *name* accesses a name defined in '
+ "*m*'s\n"
+ 'symbol table. Module attributes can be assigned to. (Note '
+ 'that the\n'
+ '"import" statement is not, strictly speaking, an operation '
+ 'on a module\n'
+ 'object; "import foo" does not require a module object named '
+ '*foo* to\n'
+ 'exist, rather it requires an (external) *definition* for a '
+ 'module\n'
+ 'named *foo* somewhere.)\n'
+ '\n'
+ 'A special attribute of every module is "__dict__". This is '
+ 'the\n'
+ "dictionary containing the module's symbol table. Modifying "
+ 'this\n'
+ "dictionary will actually change the module's symbol table, "
+ 'but direct\n'
+ 'assignment to the "__dict__" attribute is not possible (you '
+ 'can write\n'
+ '"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but '
+ "you can't\n"
+ 'write "m.__dict__ = {}"). Modifying "__dict__" directly is '
+ 'not\n'
+ 'recommended.\n'
+ '\n'
+ 'Modules built into the interpreter are written like this: '
+ '"<module\n'
+ '\'sys\' (built-in)>". If loaded from a file, they are '
+ 'written as\n'
+ '"<module \'os\' from '
+ '\'/usr/local/lib/pythonX.Y/os.pyc\'>".\n',
+ 'typesseq': '\n'
+ 'Sequence Types --- "str", "unicode", "list", "tuple", '
+ '"bytearray", "buffer", "xrange"\n'
+ '*************************************************************************************\n'
+ '\n'
+ 'There are seven sequence types: strings, Unicode strings, '
+ 'lists,\n'
+ 'tuples, bytearrays, buffers, and xrange objects.\n'
+ '\n'
+ 'For other containers see the built in "dict" and "set" classes, '
+ 'and\n'
+ 'the "collections" module.\n'
+ '\n'
+ 'String literals are written in single or double quotes: '
+ '"\'xyzzy\'",\n'
+ '""frobozz"". See String literals for more about string '
+ 'literals.\n'
+ 'Unicode strings are much like strings, but are specified in the '
+ 'syntax\n'
+ 'using a preceding "\'u\'" character: "u\'abc\'", "u"def"". In '
+ 'addition to\n'
+ 'the functionality described here, there are also '
+ 'string-specific\n'
+ 'methods described in the String Methods section. Lists are '
+ 'constructed\n'
+ 'with square brackets, separating items with commas: "[a, b, '
+ 'c]".\n'
+ 'Tuples are constructed by the comma operator (not within square\n'
+ 'brackets), with or without enclosing parentheses, but an empty '
+ 'tuple\n'
+ 'must have the enclosing parentheses, such as "a, b, c" or "()". '
+ 'A\n'
+ 'single item tuple must have a trailing comma, such as "(d,)".\n'
+ '\n'
+ 'Bytearray objects are created with the built-in function\n'
+ '"bytearray()".\n'
+ '\n'
+ 'Buffer objects are not directly supported by Python syntax, but '
+ 'can be\n'
+ 'created by calling the built-in function "buffer()". They '
+ "don't\n"
+ 'support concatenation or repetition.\n'
+ '\n'
+ 'Objects of type xrange are similar to buffers in that there is '
+ 'no\n'
+ 'specific syntax to create them, but they are created using the\n'
+ '"xrange()" function. They don\'t support slicing, concatenation '
+ 'or\n'
+ 'repetition, and using "in", "not in", "min()" or "max()" on them '
+ 'is\n'
+ 'inefficient.\n'
+ '\n'
+ 'Most sequence types support the following operations. The "in" '
+ 'and\n'
+ '"not in" operations have the same priorities as the comparison\n'
+ 'operations. The "+" and "*" operations have the same priority '
+ 'as the\n'
+ 'corresponding numeric operations. [3] Additional methods are '
+ 'provided\n'
+ 'for Mutable Sequence Types.\n'
+ '\n'
+ 'This table lists the sequence operations sorted in ascending '
+ 'priority.\n'
+ 'In the table, *s* and *t* are sequences of the same type; *n*, '
+ '*i* and\n'
+ '*j* are integers:\n'
+ '\n'
+ '+--------------------+----------------------------------+------------+\n'
+ '| Operation | Result | '
+ 'Notes |\n'
+ '+====================+==================================+============+\n'
+ '| "x in s" | "True" if an item of *s* is | '
+ '(1) |\n'
+ '| | equal to *x*, else "False" '
+ '| |\n'
+ '+--------------------+----------------------------------+------------+\n'
+ '| "x not in s" | "False" if an item of *s* is | '
+ '(1) |\n'
+ '| | equal to *x*, else "True" '
+ '| |\n'
+ '+--------------------+----------------------------------+------------+\n'
+ '| "s + t" | the concatenation of *s* and *t* | '
+ '(6) |\n'
+ '+--------------------+----------------------------------+------------+\n'
+ '| "s * n, n * s" | equivalent to adding *s* to | '
+ '(2) |\n'
+ '| | itself *n* times '
+ '| |\n'
+ '+--------------------+----------------------------------+------------+\n'
+ '| "s[i]" | *i*th item of *s*, origin 0 | '
+ '(3) |\n'
+ '+--------------------+----------------------------------+------------+\n'
+ '| "s[i:j]" | slice of *s* from *i* to *j* | '
+ '(3)(4) |\n'
+ '+--------------------+----------------------------------+------------+\n'
+ '| "s[i:j:k]" | slice of *s* from *i* to *j* | '
+ '(3)(5) |\n'
+ '| | with step *k* '
+ '| |\n'
+ '+--------------------+----------------------------------+------------+\n'
+ '| "len(s)" | length of *s* '
+ '| |\n'
+ '+--------------------+----------------------------------+------------+\n'
+ '| "min(s)" | smallest item of *s* '
+ '| |\n'
+ '+--------------------+----------------------------------+------------+\n'
+ '| "max(s)" | largest item of *s* '
+ '| |\n'
+ '+--------------------+----------------------------------+------------+\n'
+ '| "s.index(x)" | index of the first occurrence of '
+ '| |\n'
+ '| | *x* in *s* '
+ '| |\n'
+ '+--------------------+----------------------------------+------------+\n'
+ '| "s.count(x)" | total number of occurrences of '
+ '| |\n'
+ '| | *x* in *s* '
+ '| |\n'
+ '+--------------------+----------------------------------+------------+\n'
+ '\n'
+ 'Sequence types also support comparisons. In particular, tuples '
+ 'and\n'
+ 'lists are compared lexicographically by comparing corresponding\n'
+ 'elements. This means that to compare equal, every element must '
+ 'compare\n'
+ 'equal and the two sequences must be of the same type and have '
+ 'the same\n'
+ 'length. (For full details see Comparisons in the language '
+ 'reference.)\n'
+ '\n'
+ 'Notes:\n'
+ '\n'
+ '1. When *s* is a string or Unicode string object the "in" and '
+ '"not\n'
+ ' in" operations act like a substring test. In Python '
+ 'versions\n'
+ ' before 2.3, *x* had to be a string of length 1. In Python 2.3 '
+ 'and\n'
+ ' beyond, *x* may be a string of any length.\n'
+ '\n'
+ '2. Values of *n* less than "0" are treated as "0" (which yields '
+ 'an\n'
+ ' empty sequence of the same type as *s*). Note that items in '
+ 'the\n'
+ ' sequence *s* are not copied; they are referenced multiple '
+ 'times.\n'
+ ' This often haunts new Python programmers; consider:\n'
+ '\n'
+ ' >>> lists = [[]] * 3\n'
+ ' >>> lists\n'
+ ' [[], [], []]\n'
+ ' >>> lists[0].append(3)\n'
+ ' >>> lists\n'
+ ' [[3], [3], [3]]\n'
+ '\n'
+ ' What has happened is that "[[]]" is a one-element list '
+ 'containing\n'
+ ' an empty list, so all three elements of "[[]] * 3" are '
+ 'references\n'
+ ' to this single empty list. Modifying any of the elements of\n'
+ ' "lists" modifies this single list. You can create a list of\n'
+ ' different lists this way:\n'
+ '\n'
+ ' >>> lists = [[] for i in range(3)]\n'
+ ' >>> lists[0].append(3)\n'
+ ' >>> lists[1].append(5)\n'
+ ' >>> lists[2].append(7)\n'
+ ' >>> lists\n'
+ ' [[3], [5], [7]]\n'
+ '\n'
+ ' Further explanation is available in the FAQ entry How do I '
+ 'create a\n'
+ ' multidimensional list?.\n'
+ '\n'
+ '3. If *i* or *j* is negative, the index is relative to the end '
+ 'of\n'
+ ' sequence *s*: "len(s) + i" or "len(s) + j" is substituted. '
+ 'But\n'
+ ' note that "-0" is still "0".\n'
+ '\n'
+ '4. The slice of *s* from *i* to *j* is defined as the sequence '
+ 'of\n'
+ ' items with index *k* such that "i <= k < j". If *i* or *j* '
+ 'is\n'
+ ' greater than "len(s)", use "len(s)". If *i* is omitted or '
+ '"None",\n'
+ ' use "0". If *j* is omitted or "None", use "len(s)". If *i* '
+ 'is\n'
+ ' greater than or equal to *j*, the slice is empty.\n'
+ '\n'
+ '5. The slice of *s* from *i* to *j* with step *k* is defined as '
+ 'the\n'
+ ' sequence of items with index "x = i + n*k" such that "0 <= n '
+ '<\n'
+ ' (j-i)/k". In other words, the indices are "i", "i+k", '
+ '"i+2*k",\n'
+ ' "i+3*k" and so on, stopping when *j* is reached (but never\n'
+ ' including *j*). When *k* is positive, *i* and *j* are '
+ 'reduced to\n'
+ ' "len(s)" if they are greater. When *k* is negative, *i* and '
+ '*j* are\n'
+ ' reduced to "len(s) - 1" if they are greater. If *i* or *j* '
+ 'are\n'
+ ' omitted or "None", they become "end" values (which end '
+ 'depends on\n'
+ ' the sign of *k*). Note, *k* cannot be zero. If *k* is '
+ '"None", it\n'
+ ' is treated like "1".\n'
+ '\n'
+ '6. **CPython implementation detail:** If *s* and *t* are both\n'
+ ' strings, some Python implementations such as CPython can '
+ 'usually\n'
+ ' perform an in-place optimization for assignments of the form '
+ '"s = s\n'
+ ' + t" or "s += t". When applicable, this optimization makes\n'
+ ' quadratic run-time much less likely. This optimization is '
+ 'both\n'
+ ' version and implementation dependent. For performance '
+ 'sensitive\n'
+ ' code, it is preferable to use the "str.join()" method which '
+ 'assures\n'
+ ' consistent linear concatenation performance across versions '
+ 'and\n'
+ ' implementations.\n'
+ '\n'
+ ' Changed in version 2.4: Formerly, string concatenation never\n'
+ ' occurred in-place.\n'
+ '\n'
+ '\n'
+ 'String Methods\n'
+ '==============\n'
+ '\n'
+ 'Below are listed the string methods which both 8-bit strings '
+ 'and\n'
+ 'Unicode objects support. Some of them are also available on\n'
+ '"bytearray" objects.\n'
+ '\n'
+ "In addition, Python's strings support the sequence type methods\n"
+ 'described in the Sequence Types --- str, unicode, list, tuple,\n'
+ 'bytearray, buffer, xrange section. To output formatted strings '
+ 'use\n'
+ 'template strings or the "%" operator described in the String\n'
+ 'Formatting Operations section. Also, see the "re" module for '
+ 'string\n'
+ 'functions based on regular expressions.\n'
+ '\n'
+ 'str.capitalize()\n'
+ '\n'
+ ' Return a copy of the string with its first character '
+ 'capitalized\n'
+ ' and the rest lowercased.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.center(width[, fillchar])\n'
+ '\n'
+ ' Return centered in a string of length *width*. Padding is '
+ 'done\n'
+ ' using the specified *fillchar* (default is a space).\n'
+ '\n'
+ ' Changed in version 2.4: Support for the *fillchar* argument.\n'
+ '\n'
+ 'str.count(sub[, start[, end]])\n'
+ '\n'
+ ' Return the number of non-overlapping occurrences of substring '
+ '*sub*\n'
+ ' in the range [*start*, *end*]. Optional arguments *start* '
+ 'and\n'
+ ' *end* are interpreted as in slice notation.\n'
+ '\n'
+ 'str.decode([encoding[, errors]])\n'
+ '\n'
+ ' Decodes the string using the codec registered for '
+ '*encoding*.\n'
+ ' *encoding* defaults to the default string encoding. *errors* '
+ 'may\n'
+ ' be given to set a different error handling scheme. The '
+ 'default is\n'
+ ' "\'strict\'", meaning that encoding errors raise '
+ '"UnicodeError".\n'
+ ' Other possible values are "\'ignore\'", "\'replace\'" and any '
+ 'other\n'
+ ' name registered via "codecs.register_error()", see section '
+ 'Codec\n'
+ ' Base Classes.\n'
+ '\n'
+ ' New in version 2.2.\n'
+ '\n'
+ ' Changed in version 2.3: Support for other error handling '
+ 'schemes\n'
+ ' added.\n'
+ '\n'
+ ' Changed in version 2.7: Support for keyword arguments added.\n'
+ '\n'
+ 'str.encode([encoding[, errors]])\n'
+ '\n'
+ ' Return an encoded version of the string. Default encoding is '
+ 'the\n'
+ ' current default string encoding. *errors* may be given to '
+ 'set a\n'
+ ' different error handling scheme. The default for *errors* '
+ 'is\n'
+ ' "\'strict\'", meaning that encoding errors raise a '
+ '"UnicodeError".\n'
+ ' Other possible values are "\'ignore\'", "\'replace\'",\n'
+ ' "\'xmlcharrefreplace\'", "\'backslashreplace\'" and any other '
+ 'name\n'
+ ' registered via "codecs.register_error()", see section Codec '
+ 'Base\n'
+ ' Classes. For a list of possible encodings, see section '
+ 'Standard\n'
+ ' Encodings.\n'
+ '\n'
+ ' New in version 2.0.\n'
+ '\n'
+ ' Changed in version 2.3: Support for "\'xmlcharrefreplace\'" '
+ 'and\n'
+ ' "\'backslashreplace\'" and other error handling schemes '
+ 'added.\n'
+ '\n'
+ ' Changed in version 2.7: Support for keyword arguments added.\n'
+ '\n'
+ 'str.endswith(suffix[, start[, end]])\n'
+ '\n'
+ ' Return "True" if the string ends with the specified '
+ '*suffix*,\n'
+ ' otherwise return "False". *suffix* can also be a tuple of '
+ 'suffixes\n'
+ ' to look for. With optional *start*, test beginning at that\n'
+ ' position. With optional *end*, stop comparing at that '
+ 'position.\n'
+ '\n'
+ ' Changed in version 2.5: Accept tuples as *suffix*.\n'
+ '\n'
+ 'str.expandtabs([tabsize])\n'
+ '\n'
+ ' Return a copy of the string where all tab characters are '
+ 'replaced\n'
+ ' by one or more spaces, depending on the current column and '
+ 'the\n'
+ ' given tab size. Tab positions occur every *tabsize* '
+ 'characters\n'
+ ' (default is 8, giving tab positions at columns 0, 8, 16 and '
+ 'so on).\n'
+ ' To expand the string, the current column is set to zero and '
+ 'the\n'
+ ' string is examined character by character. If the character '
+ 'is a\n'
+ ' tab ("\\t"), one or more space characters are inserted in the '
+ 'result\n'
+ ' until the current column is equal to the next tab position. '
+ '(The\n'
+ ' tab character itself is not copied.) If the character is a '
+ 'newline\n'
+ ' ("\\n") or return ("\\r"), it is copied and the current '
+ 'column is\n'
+ ' reset to zero. Any other character is copied unchanged and '
+ 'the\n'
+ ' current column is incremented by one regardless of how the\n'
+ ' character is represented when printed.\n'
+ '\n'
+ " >>> '01\\t012\\t0123\\t01234'.expandtabs()\n"
+ " '01 012 0123 01234'\n"
+ " >>> '01\\t012\\t0123\\t01234'.expandtabs(4)\n"
+ " '01 012 0123 01234'\n"
+ '\n'
+ 'str.find(sub[, start[, end]])\n'
+ '\n'
+ ' Return the lowest index in the string where substring *sub* '
+ 'is\n'
+ ' found within the slice "s[start:end]". Optional arguments '
+ '*start*\n'
+ ' and *end* are interpreted as in slice notation. Return "-1" '
+ 'if\n'
+ ' *sub* is not found.\n'
+ '\n'
+ ' Note: The "find()" method should be used only if you need to '
+ 'know\n'
+ ' the position of *sub*. To check if *sub* is a substring or '
+ 'not,\n'
+ ' use the "in" operator:\n'
+ '\n'
+ " >>> 'Py' in 'Python'\n"
+ ' True\n'
+ '\n'
+ 'str.format(*args, **kwargs)\n'
+ '\n'
+ ' Perform a string formatting operation. The string on which '
+ 'this\n'
+ ' method is called can contain literal text or replacement '
+ 'fields\n'
+ ' delimited by braces "{}". Each replacement field contains '
+ 'either\n'
+ ' the numeric index of a positional argument, or the name of a\n'
+ ' keyword argument. Returns a copy of the string where each\n'
+ ' replacement field is replaced with the string value of the\n'
+ ' corresponding argument.\n'
+ '\n'
+ ' >>> "The sum of 1 + 2 is {0}".format(1+2)\n'
+ " 'The sum of 1 + 2 is 3'\n"
+ '\n'
+ ' See Format String Syntax for a description of the various\n'
+ ' formatting options that can be specified in format strings.\n'
+ '\n'
+ ' This method of string formatting is the new standard in '
+ 'Python 3,\n'
+ ' and should be preferred to the "%" formatting described in '
+ 'String\n'
+ ' Formatting Operations in new code.\n'
+ '\n'
+ ' New in version 2.6.\n'
+ '\n'
+ 'str.index(sub[, start[, end]])\n'
+ '\n'
+ ' Like "find()", but raise "ValueError" when the substring is '
+ 'not\n'
+ ' found.\n'
+ '\n'
+ 'str.isalnum()\n'
+ '\n'
+ ' Return true if all characters in the string are alphanumeric '
+ 'and\n'
+ ' there is at least one character, false otherwise.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.isalpha()\n'
+ '\n'
+ ' Return true if all characters in the string are alphabetic '
+ 'and\n'
+ ' there is at least one character, false otherwise.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.isdigit()\n'
+ '\n'
+ ' Return true if all characters in the string are digits and '
+ 'there is\n'
+ ' at least one character, false otherwise.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.islower()\n'
+ '\n'
+ ' Return true if all cased characters [4] in the string are '
+ 'lowercase\n'
+ ' and there is at least one cased character, false otherwise.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.isspace()\n'
+ '\n'
+ ' Return true if there are only whitespace characters in the '
+ 'string\n'
+ ' and there is at least one character, false otherwise.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.istitle()\n'
+ '\n'
+ ' Return true if the string is a titlecased string and there is '
+ 'at\n'
+ ' least one character, for example uppercase characters may '
+ 'only\n'
+ ' follow uncased characters and lowercase characters only cased '
+ 'ones.\n'
+ ' Return false otherwise.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.isupper()\n'
+ '\n'
+ ' Return true if all cased characters [4] in the string are '
+ 'uppercase\n'
+ ' and there is at least one cased character, false otherwise.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.join(iterable)\n'
+ '\n'
+ ' Return a string which is the concatenation of the strings in\n'
+ ' *iterable*. A "TypeError" will be raised if there are any '
+ 'non-\n'
+ ' string values in *iterable*, including "bytes" objects. The\n'
+ ' separator between elements is the string providing this '
+ 'method.\n'
+ '\n'
+ 'str.ljust(width[, fillchar])\n'
+ '\n'
+ ' Return the string left justified in a string of length '
+ '*width*.\n'
+ ' Padding is done using the specified *fillchar* (default is a\n'
+ ' space). The original string is returned if *width* is less '
+ 'than or\n'
+ ' equal to "len(s)".\n'
+ '\n'
+ ' Changed in version 2.4: Support for the *fillchar* argument.\n'
+ '\n'
+ 'str.lower()\n'
+ '\n'
+ ' Return a copy of the string with all the cased characters '
+ '[4]\n'
+ ' converted to lowercase.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.lstrip([chars])\n'
+ '\n'
+ ' Return a copy of the string with leading characters removed. '
+ 'The\n'
+ ' *chars* argument is a string specifying the set of characters '
+ 'to be\n'
+ ' removed. If omitted or "None", the *chars* argument defaults '
+ 'to\n'
+ ' removing whitespace. The *chars* argument is not a prefix; '
+ 'rather,\n'
+ ' all combinations of its values are stripped:\n'
+ '\n'
+ " >>> ' spacious '.lstrip()\n"
+ " 'spacious '\n"
+ " >>> 'www.example.com'.lstrip('cmowz.')\n"
+ " 'example.com'\n"
+ '\n'
+ ' Changed in version 2.2.2: Support for the *chars* argument.\n'
+ '\n'
+ 'str.partition(sep)\n'
+ '\n'
+ ' Split the string at the first occurrence of *sep*, and return '
+ 'a\n'
+ ' 3-tuple containing the part before the separator, the '
+ 'separator\n'
+ ' itself, and the part after the separator. If the separator '
+ 'is not\n'
+ ' found, return a 3-tuple containing the string itself, '
+ 'followed by\n'
+ ' two empty strings.\n'
+ '\n'
+ ' New in version 2.5.\n'
+ '\n'
+ 'str.replace(old, new[, count])\n'
+ '\n'
+ ' Return a copy of the string with all occurrences of substring '
+ '*old*\n'
+ ' replaced by *new*. If the optional argument *count* is '
+ 'given, only\n'
+ ' the first *count* occurrences are replaced.\n'
+ '\n'
+ 'str.rfind(sub[, start[, end]])\n'
+ '\n'
+ ' Return the highest index in the string where substring *sub* '
+ 'is\n'
+ ' found, such that *sub* is contained within "s[start:end]".\n'
+ ' Optional arguments *start* and *end* are interpreted as in '
+ 'slice\n'
+ ' notation. Return "-1" on failure.\n'
+ '\n'
+ 'str.rindex(sub[, start[, end]])\n'
+ '\n'
+ ' Like "rfind()" but raises "ValueError" when the substring '
+ '*sub* is\n'
+ ' not found.\n'
+ '\n'
+ 'str.rjust(width[, fillchar])\n'
+ '\n'
+ ' Return the string right justified in a string of length '
+ '*width*.\n'
+ ' Padding is done using the specified *fillchar* (default is a\n'
+ ' space). The original string is returned if *width* is less '
+ 'than or\n'
+ ' equal to "len(s)".\n'
+ '\n'
+ ' Changed in version 2.4: Support for the *fillchar* argument.\n'
+ '\n'
+ 'str.rpartition(sep)\n'
+ '\n'
+ ' Split the string at the last occurrence of *sep*, and return '
+ 'a\n'
+ ' 3-tuple containing the part before the separator, the '
+ 'separator\n'
+ ' itself, and the part after the separator. If the separator '
+ 'is not\n'
+ ' found, return a 3-tuple containing two empty strings, '
+ 'followed by\n'
+ ' the string itself.\n'
+ '\n'
+ ' New in version 2.5.\n'
+ '\n'
+ 'str.rsplit([sep[, maxsplit]])\n'
+ '\n'
+ ' Return a list of the words in the string, using *sep* as the\n'
+ ' delimiter string. If *maxsplit* is given, at most *maxsplit* '
+ 'splits\n'
+ ' are done, the *rightmost* ones. If *sep* is not specified '
+ 'or\n'
+ ' "None", any whitespace string is a separator. Except for '
+ 'splitting\n'
+ ' from the right, "rsplit()" behaves like "split()" which is\n'
+ ' described in detail below.\n'
+ '\n'
+ ' New in version 2.4.\n'
+ '\n'
+ 'str.rstrip([chars])\n'
+ '\n'
+ ' Return a copy of the string with trailing characters '
+ 'removed. The\n'
+ ' *chars* argument is a string specifying the set of characters '
+ 'to be\n'
+ ' removed. If omitted or "None", the *chars* argument defaults '
+ 'to\n'
+ ' removing whitespace. The *chars* argument is not a suffix; '
+ 'rather,\n'
+ ' all combinations of its values are stripped:\n'
+ '\n'
+ " >>> ' spacious '.rstrip()\n"
+ " ' spacious'\n"
+ " >>> 'mississippi'.rstrip('ipz')\n"
+ " 'mississ'\n"
+ '\n'
+ ' Changed in version 2.2.2: Support for the *chars* argument.\n'
+ '\n'
+ 'str.split([sep[, maxsplit]])\n'
+ '\n'
+ ' Return a list of the words in the string, using *sep* as the\n'
+ ' delimiter string. If *maxsplit* is given, at most '
+ '*maxsplit*\n'
+ ' splits are done (thus, the list will have at most '
+ '"maxsplit+1"\n'
+ ' elements). If *maxsplit* is not specified or "-1", then '
+ 'there is\n'
+ ' no limit on the number of splits (all possible splits are '
+ 'made).\n'
+ '\n'
+ ' If *sep* is given, consecutive delimiters are not grouped '
+ 'together\n'
+ ' and are deemed to delimit empty strings (for example,\n'
+ ' "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The '
+ '*sep* argument\n'
+ ' may consist of multiple characters (for example,\n'
+ ' "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). '
+ 'Splitting an\n'
+ ' empty string with a specified separator returns "[\'\']".\n'
+ '\n'
+ ' If *sep* is not specified or is "None", a different '
+ 'splitting\n'
+ ' algorithm is applied: runs of consecutive whitespace are '
+ 'regarded\n'
+ ' as a single separator, and the result will contain no empty '
+ 'strings\n'
+ ' at the start or end if the string has leading or trailing\n'
+ ' whitespace. Consequently, splitting an empty string or a '
+ 'string\n'
+ ' consisting of just whitespace with a "None" separator returns '
+ '"[]".\n'
+ '\n'
+ ' For example, "\' 1 2 3 \'.split()" returns "[\'1\', '
+ '\'2\', \'3\']", and\n'
+ ' "\' 1 2 3 \'.split(None, 1)" returns "[\'1\', \'2 3 '
+ '\']".\n'
+ '\n'
+ 'str.splitlines([keepends])\n'
+ '\n'
+ ' Return a list of the lines in the string, breaking at line\n'
+ ' boundaries. This method uses the *universal newlines* '
+ 'approach to\n'
+ ' splitting lines. Line breaks are not included in the '
+ 'resulting list\n'
+ ' unless *keepends* is given and true.\n'
+ '\n'
+ ' Python recognizes ""\\r"", ""\\n"", and ""\\r\\n"" as line '
+ 'boundaries\n'
+ ' for 8-bit strings.\n'
+ '\n'
+ ' For example:\n'
+ '\n'
+ " >>> 'ab c\\n\\nde fg\\rkl\\r\\n'.splitlines()\n"
+ " ['ab c', '', 'de fg', 'kl']\n"
+ " >>> 'ab c\\n\\nde fg\\rkl\\r\\n'.splitlines(True)\n"
+ " ['ab c\\n', '\\n', 'de fg\\r', 'kl\\r\\n']\n"
+ '\n'
+ ' Unlike "split()" when a delimiter string *sep* is given, '
+ 'this\n'
+ ' method returns an empty list for the empty string, and a '
+ 'terminal\n'
+ ' line break does not result in an extra line:\n'
+ '\n'
+ ' >>> "".splitlines()\n'
+ ' []\n'
+ ' >>> "One line\\n".splitlines()\n'
+ " ['One line']\n"
+ '\n'
+ ' For comparison, "split(\'\\n\')" gives:\n'
+ '\n'
+ " >>> ''.split('\\n')\n"
+ " ['']\n"
+ " >>> 'Two lines\\n'.split('\\n')\n"
+ " ['Two lines', '']\n"
+ '\n'
+ 'unicode.splitlines([keepends])\n'
+ '\n'
+ ' Return a list of the lines in the string, like '
+ '"str.splitlines()".\n'
+ ' However, the Unicode method splits on the following line\n'
+ ' boundaries, which are a superset of the *universal newlines*\n'
+ ' recognized for 8-bit strings.\n'
+ '\n'
+ ' +-------------------------+-------------------------------+\n'
+ ' | Representation | Description |\n'
+ ' +=========================+===============================+\n'
+ ' | "\\n" | Line Feed |\n'
+ ' +-------------------------+-------------------------------+\n'
+ ' | "\\r" | Carriage Return |\n'
+ ' +-------------------------+-------------------------------+\n'
+ ' | "\\r\\n" | Carriage Return + Line Feed '
+ '|\n'
+ ' +-------------------------+-------------------------------+\n'
+ ' | "\\v" or "\\x0b" | Line Tabulation '
+ '|\n'
+ ' +-------------------------+-------------------------------+\n'
+ ' | "\\f" or "\\x0c" | Form Feed '
+ '|\n'
+ ' +-------------------------+-------------------------------+\n'
+ ' | "\\x1c" | File Separator |\n'
+ ' +-------------------------+-------------------------------+\n'
+ ' | "\\x1d" | Group Separator |\n'
+ ' +-------------------------+-------------------------------+\n'
+ ' | "\\x1e" | Record Separator |\n'
+ ' +-------------------------+-------------------------------+\n'
+ ' | "\\x85" | Next Line (C1 Control Code) |\n'
+ ' +-------------------------+-------------------------------+\n'
+ ' | "\\u2028" | Line Separator |\n'
+ ' +-------------------------+-------------------------------+\n'
+ ' | "\\u2029" | Paragraph Separator |\n'
+ ' +-------------------------+-------------------------------+\n'
+ '\n'
+ ' Changed in version 2.7: "\\v" and "\\f" added to list of '
+ 'line\n'
+ ' boundaries.\n'
+ '\n'
+ 'str.startswith(prefix[, start[, end]])\n'
+ '\n'
+ ' Return "True" if string starts with the *prefix*, otherwise '
+ 'return\n'
+ ' "False". *prefix* can also be a tuple of prefixes to look '
+ 'for.\n'
+ ' With optional *start*, test string beginning at that '
+ 'position.\n'
+ ' With optional *end*, stop comparing string at that position.\n'
+ '\n'
+ ' Changed in version 2.5: Accept tuples as *prefix*.\n'
+ '\n'
+ 'str.strip([chars])\n'
+ '\n'
+ ' Return a copy of the string with the leading and trailing\n'
+ ' characters removed. The *chars* argument is a string '
+ 'specifying the\n'
+ ' set of characters to be removed. If omitted or "None", the '
+ '*chars*\n'
+ ' argument defaults to removing whitespace. The *chars* '
+ 'argument is\n'
+ ' not a prefix or suffix; rather, all combinations of its '
+ 'values are\n'
+ ' stripped:\n'
+ '\n'
+ " >>> ' spacious '.strip()\n"
+ " 'spacious'\n"
+ " >>> 'www.example.com'.strip('cmowz.')\n"
+ " 'example'\n"
+ '\n'
+ ' Changed in version 2.2.2: Support for the *chars* argument.\n'
+ '\n'
+ 'str.swapcase()\n'
+ '\n'
+ ' Return a copy of the string with uppercase characters '
+ 'converted to\n'
+ ' lowercase and vice versa.\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.title()\n'
+ '\n'
+ ' Return a titlecased version of the string where words start '
+ 'with an\n'
+ ' uppercase character and the remaining characters are '
+ 'lowercase.\n'
+ '\n'
+ ' The algorithm uses a simple language-independent definition '
+ 'of a\n'
+ ' word as groups of consecutive letters. The definition works '
+ 'in\n'
+ ' many contexts but it means that apostrophes in contractions '
+ 'and\n'
+ ' possessives form word boundaries, which may not be the '
+ 'desired\n'
+ ' result:\n'
+ '\n'
+ ' >>> "they\'re bill\'s friends from the UK".title()\n'
+ ' "They\'Re Bill\'S Friends From The Uk"\n'
+ '\n'
+ ' A workaround for apostrophes can be constructed using '
+ 'regular\n'
+ ' expressions:\n'
+ '\n'
+ ' >>> import re\n'
+ ' >>> def titlecase(s):\n'
+ ' ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n'
+ ' ... lambda mo: mo.group(0)[0].upper() +\n'
+ ' ... mo.group(0)[1:].lower(),\n'
+ ' ... s)\n'
+ ' ...\n'
+ ' >>> titlecase("they\'re bill\'s friends.")\n'
+ ' "They\'re Bill\'s Friends."\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.translate(table[, deletechars])\n'
+ '\n'
+ ' Return a copy of the string where all characters occurring in '
+ 'the\n'
+ ' optional argument *deletechars* are removed, and the '
+ 'remaining\n'
+ ' characters have been mapped through the given translation '
+ 'table,\n'
+ ' which must be a string of length 256.\n'
+ '\n'
+ ' You can use the "maketrans()" helper function in the '
+ '"string"\n'
+ ' module to create a translation table. For string objects, set '
+ 'the\n'
+ ' *table* argument to "None" for translations that only delete\n'
+ ' characters:\n'
+ '\n'
+ " >>> 'read this short text'.translate(None, 'aeiou')\n"
+ " 'rd ths shrt txt'\n"
+ '\n'
+ ' New in version 2.6: Support for a "None" *table* argument.\n'
+ '\n'
+ ' For Unicode objects, the "translate()" method does not accept '
+ 'the\n'
+ ' optional *deletechars* argument. Instead, it returns a copy '
+ 'of the\n'
+ ' *s* where all characters have been mapped through the given\n'
+ ' translation table which must be a mapping of Unicode ordinals '
+ 'to\n'
+ ' Unicode ordinals, Unicode strings or "None". Unmapped '
+ 'characters\n'
+ ' are left untouched. Characters mapped to "None" are deleted. '
+ 'Note,\n'
+ ' a more flexible approach is to create a custom character '
+ 'mapping\n'
+ ' codec using the "codecs" module (see "encodings.cp1251" for '
+ 'an\n'
+ ' example).\n'
+ '\n'
+ 'str.upper()\n'
+ '\n'
+ ' Return a copy of the string with all the cased characters '
+ '[4]\n'
+ ' converted to uppercase. Note that "str.upper().isupper()" '
+ 'might be\n'
+ ' "False" if "s" contains uncased characters or if the Unicode\n'
+ ' category of the resulting character(s) is not "Lu" (Letter,\n'
+ ' uppercase), but e.g. "Lt" (Letter, titlecase).\n'
+ '\n'
+ ' For 8-bit strings, this method is locale-dependent.\n'
+ '\n'
+ 'str.zfill(width)\n'
+ '\n'
+ ' Return the numeric string left filled with zeros in a string '
+ 'of\n'
+ ' length *width*. A sign prefix is handled correctly. The '
+ 'original\n'
+ ' string is returned if *width* is less than or equal to '
+ '"len(s)".\n'
+ '\n'
+ ' New in version 2.2.2.\n'
+ '\n'
+ 'The following methods are present only on unicode objects:\n'
+ '\n'
+ 'unicode.isnumeric()\n'
+ '\n'
+ ' Return "True" if there are only numeric characters in S, '
+ '"False"\n'
+ ' otherwise. Numeric characters include digit characters, and '
+ 'all\n'
+ ' characters that have the Unicode numeric value property, '
+ 'e.g.\n'
+ ' U+2155, VULGAR FRACTION ONE FIFTH.\n'
+ '\n'
+ 'unicode.isdecimal()\n'
+ '\n'
+ ' Return "True" if there are only decimal characters in S, '
+ '"False"\n'
+ ' otherwise. Decimal characters include digit characters, and '
+ 'all\n'
+ ' characters that can be used to form decimal-radix numbers, '
+ 'e.g.\n'
+ ' U+0660, ARABIC-INDIC DIGIT ZERO.\n'
+ '\n'
+ '\n'
+ 'String Formatting Operations\n'
+ '============================\n'
+ '\n'
+ 'String and Unicode objects have one unique built-in operation: '
+ 'the "%"\n'
+ 'operator (modulo). This is also known as the string '
+ '*formatting* or\n'
+ '*interpolation* operator. Given "format % values" (where '
+ '*format* is\n'
+ 'a string or Unicode object), "%" conversion specifications in '
+ '*format*\n'
+ 'are replaced with zero or more elements of *values*. The effect '
+ 'is\n'
+ 'similar to the using "sprintf()" in the C language. If *format* '
+ 'is a\n'
+ 'Unicode object, or if any of the objects being converted using '
+ 'the\n'
+ '"%s" conversion are Unicode objects, the result will also be a '
+ 'Unicode\n'
+ 'object.\n'
+ '\n'
+ 'If *format* requires a single argument, *values* may be a single '
+ 'non-\n'
+ 'tuple object. [5] Otherwise, *values* must be a tuple with '
+ 'exactly\n'
+ 'the number of items specified by the format string, or a single\n'
+ 'mapping object (for example, a dictionary).\n'
+ '\n'
+ 'A conversion specifier contains two or more characters and has '
+ 'the\n'
+ 'following components, which must occur in this order:\n'
+ '\n'
+ '1. The "\'%\'" character, which marks the start of the '
+ 'specifier.\n'
+ '\n'
+ '2. Mapping key (optional), consisting of a parenthesised '
+ 'sequence\n'
+ ' of characters (for example, "(somename)").\n'
+ '\n'
+ '3. Conversion flags (optional), which affect the result of some\n'
+ ' conversion types.\n'
+ '\n'
+ '4. Minimum field width (optional). If specified as an "\'*\'"\n'
+ ' (asterisk), the actual width is read from the next element of '
+ 'the\n'
+ ' tuple in *values*, and the object to convert comes after the\n'
+ ' minimum field width and optional precision.\n'
+ '\n'
+ '5. Precision (optional), given as a "\'.\'" (dot) followed by '
+ 'the\n'
+ ' precision. If specified as "\'*\'" (an asterisk), the actual '
+ 'width\n'
+ ' is read from the next element of the tuple in *values*, and '
+ 'the\n'
+ ' value to convert comes after the precision.\n'
+ '\n'
+ '6. Length modifier (optional).\n'
+ '\n'
+ '7. Conversion type.\n'
+ '\n'
+ 'When the right argument is a dictionary (or other mapping type), '
+ 'then\n'
+ 'the formats in the string *must* include a parenthesised mapping '
+ 'key\n'
+ 'into that dictionary inserted immediately after the "\'%\'" '
+ 'character.\n'
+ 'The mapping key selects the value to be formatted from the '
+ 'mapping.\n'
+ 'For example:\n'
+ '\n'
+ ">>> print '%(language)s has %(number)03d quote types.' % \\\n"
+ '... {"language": "Python", "number": 2}\n'
+ 'Python has 002 quote types.\n'
+ '\n'
+ 'In this case no "*" specifiers may occur in a format (since '
+ 'they\n'
+ 'require a sequential parameter list).\n'
+ '\n'
+ 'The conversion flag characters are:\n'
+ '\n'
+ '+-----------+-----------------------------------------------------------------------+\n'
+ '| Flag | '
+ 'Meaning '
+ '|\n'
+ '+===========+=======================================================================+\n'
+ '| "\'#\'" | The value conversion will use the "alternate '
+ 'form" (where defined |\n'
+ '| | '
+ 'below). '
+ '|\n'
+ '+-----------+-----------------------------------------------------------------------+\n'
+ '| "\'0\'" | The conversion will be zero padded for numeric '
+ 'values. |\n'
+ '+-----------+-----------------------------------------------------------------------+\n'
+ '| "\'-\'" | The converted value is left adjusted (overrides '
+ 'the "\'0\'" conversion |\n'
+ '| | if both are '
+ 'given). |\n'
+ '+-----------+-----------------------------------------------------------------------+\n'
+ '| "\' \'" | (a space) A blank should be left before a '
+ 'positive number (or empty |\n'
+ '| | string) produced by a signed '
+ 'conversion. |\n'
+ '+-----------+-----------------------------------------------------------------------+\n'
+ '| "\'+\'" | A sign character ("\'+\'" or "\'-\'") will '
+ 'precede the conversion |\n'
+ '| | (overrides a "space" '
+ 'flag). |\n'
+ '+-----------+-----------------------------------------------------------------------+\n'
+ '\n'
+ 'A length modifier ("h", "l", or "L") may be present, but is '
+ 'ignored as\n'
+ 'it is not necessary for Python -- so e.g. "%ld" is identical to '
+ '"%d".\n'
+ '\n'
+ 'The conversion types are:\n'
+ '\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| Conversion | '
+ 'Meaning | Notes '
+ '|\n'
+ '+==============+=======================================================+=========+\n'
+ '| "\'d\'" | Signed integer '
+ 'decimal. | |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'i\'" | Signed integer '
+ 'decimal. | |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'o\'" | Signed octal '
+ 'value. | (1) |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'u\'" | Obsolete type -- it is identical to '
+ '"\'d\'". | (7) |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'x\'" | Signed hexadecimal '
+ '(lowercase). | (2) |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'X\'" | Signed hexadecimal '
+ '(uppercase). | (2) |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'e\'" | Floating point exponential format '
+ '(lowercase). | (3) |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'E\'" | Floating point exponential format '
+ '(uppercase). | (3) |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'f\'" | Floating point decimal '
+ 'format. | (3) |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'F\'" | Floating point decimal '
+ 'format. | (3) |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'g\'" | Floating point format. Uses lowercase '
+ 'exponential | (4) |\n'
+ '| | format if exponent is less than -4 or not less '
+ 'than | |\n'
+ '| | precision, decimal format '
+ 'otherwise. | |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'G\'" | Floating point format. Uses uppercase '
+ 'exponential | (4) |\n'
+ '| | format if exponent is less than -4 or not less '
+ 'than | |\n'
+ '| | precision, decimal format '
+ 'otherwise. | |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'c\'" | Single character (accepts integer or single '
+ 'character | |\n'
+ '| | '
+ 'string). | '
+ '|\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'r\'" | String (converts any Python object using '
+ 'repr()). | (5) |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'s\'" | String (converts any Python object using '
+ '"str()"). | (6) |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '| "\'%\'" | No argument is converted, results in a '
+ '"\'%\'" | |\n'
+ '| | character in the '
+ 'result. | |\n'
+ '+--------------+-------------------------------------------------------+---------+\n'
+ '\n'
+ 'Notes:\n'
+ '\n'
+ '1. The alternate form causes a leading zero ("\'0\'") to be '
+ 'inserted\n'
+ ' between left-hand padding and the formatting of the number if '
+ 'the\n'
+ ' leading character of the result is not already a zero.\n'
+ '\n'
+ '2. The alternate form causes a leading "\'0x\'" or "\'0X\'" '
+ '(depending\n'
+ ' on whether the "\'x\'" or "\'X\'" format was used) to be '
+ 'inserted\n'
+ ' before the first digit.\n'
+ '\n'
+ '3. The alternate form causes the result to always contain a '
+ 'decimal\n'
+ ' point, even if no digits follow it.\n'
+ '\n'
+ ' The precision determines the number of digits after the '
+ 'decimal\n'
+ ' point and defaults to 6.\n'
+ '\n'
+ '4. The alternate form causes the result to always contain a '
+ 'decimal\n'
+ ' point, and trailing zeroes are not removed as they would '
+ 'otherwise\n'
+ ' be.\n'
+ '\n'
+ ' The precision determines the number of significant digits '
+ 'before\n'
+ ' and after the decimal point and defaults to 6.\n'
+ '\n'
+ '5. The "%r" conversion was added in Python 2.0.\n'
+ '\n'
+ ' The precision determines the maximal number of characters '
+ 'used.\n'
+ '\n'
+ '6. If the object or format provided is a "unicode" string, the\n'
+ ' resulting string will also be "unicode".\n'
+ '\n'
+ ' The precision determines the maximal number of characters '
+ 'used.\n'
+ '\n'
+ '7. See **PEP 237**.\n'
+ '\n'
+ 'Since Python strings have an explicit length, "%s" conversions '
+ 'do not\n'
+ 'assume that "\'\\0\'" is the end of the string.\n'
+ '\n'
+ 'Changed in version 2.7: "%f" conversions for numbers whose '
+ 'absolute\n'
+ 'value is over 1e50 are no longer replaced by "%g" conversions.\n'
+ '\n'
+ 'Additional string operations are defined in standard modules '
+ '"string"\n'
+ 'and "re".\n'
+ '\n'
+ '\n'
+ 'XRange Type\n'
+ '===========\n'
+ '\n'
+ 'The "xrange" type is an immutable sequence which is commonly '
+ 'used for\n'
+ 'looping. The advantage of the "xrange" type is that an '
+ '"xrange"\n'
+ 'object will always take the same amount of memory, no matter the '
+ 'size\n'
+ 'of the range it represents. There are no consistent '
+ 'performance\n'
+ 'advantages.\n'
+ '\n'
+ 'XRange objects have very little behavior: they only support '
+ 'indexing,\n'
+ 'iteration, and the "len()" function.\n'
+ '\n'
+ '\n'
+ 'Mutable Sequence Types\n'
+ '======================\n'
+ '\n'
+ 'List and "bytearray" objects support additional operations that '
+ 'allow\n'
+ 'in-place modification of the object. Other mutable sequence '
+ 'types\n'
+ '(when added to the language) should also support these '
+ 'operations.\n'
+ 'Strings and tuples are immutable sequence types: such objects '
+ 'cannot\n'
+ 'be modified once created. The following operations are defined '
+ 'on\n'
+ 'mutable sequence types (where *x* is an arbitrary object):\n'
+ '\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| Operation | '
+ 'Result | Notes |\n'
+ '+================================+==================================+=======================+\n'
+ '| "s[i] = x" | item *i* of *s* is replaced '
+ 'by | |\n'
+ '| | '
+ '*x* | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s[i:j] = t" | slice of *s* from *i* to *j* '
+ 'is | |\n'
+ '| | replaced by the contents of '
+ 'the | |\n'
+ '| | iterable '
+ '*t* | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "del s[i:j]" | same as "s[i:j] = '
+ '[]" | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s[i:j:k] = t" | the elements of "s[i:j:k]" '
+ 'are | (1) |\n'
+ '| | replaced by those of '
+ '*t* | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "del s[i:j:k]" | removes the elements '
+ 'of | |\n'
+ '| | "s[i:j:k]" from the '
+ 'list | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.append(x)" | same as "s[len(s):len(s)] = '
+ '[x]" | (2) |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.extend(t)" or "s += t" | for the most part the same '
+ 'as | (3) |\n'
+ '| | "s[len(s):len(s)] = '
+ 't" | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s *= n" | updates *s* with its '
+ 'contents | (11) |\n'
+ '| | repeated *n* '
+ 'times | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.count(x)" | return number of *i*\'s for '
+ 'which | |\n'
+ '| | "s[i] == '
+ 'x" | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.index(x[, i[, j]])" | return smallest *k* such '
+ 'that | (4) |\n'
+ '| | "s[k] == x" and "i <= k < '
+ 'j" | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.insert(i, x)" | same as "s[i:i] = '
+ '[x]" | (5) |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.pop([i])" | same as "x = s[i]; del '
+ 's[i]; | (6) |\n'
+ '| | return '
+ 'x" | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.remove(x)" | same as "del '
+ 's[s.index(x)]" | (4) |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.reverse()" | reverses the items of *s* '
+ 'in | (7) |\n'
+ '| | '
+ 'place | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.sort([cmp[, key[, | sort the items of *s* in '
+ 'place | (7)(8)(9)(10) |\n'
+ '| reverse]]])" '
+ '| | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '\n'
+ 'Notes:\n'
+ '\n'
+ '1. *t* must have the same length as the slice it is replacing.\n'
+ '\n'
+ '2. The C implementation of Python has historically accepted\n'
+ ' multiple parameters and implicitly joined them into a tuple; '
+ 'this\n'
+ ' no longer works in Python 2.0. Use of this misfeature has '
+ 'been\n'
+ ' deprecated since Python 1.4.\n'
+ '\n'
+ '3. *t* can be any iterable object.\n'
+ '\n'
+ '4. Raises "ValueError" when *x* is not found in *s*. When a\n'
+ ' negative index is passed as the second or third parameter to '
+ 'the\n'
+ ' "index()" method, the list length is added, as for slice '
+ 'indices.\n'
+ ' If it is still negative, it is truncated to zero, as for '
+ 'slice\n'
+ ' indices.\n'
+ '\n'
+ ' Changed in version 2.3: Previously, "index()" didn\'t have '
+ 'arguments\n'
+ ' for specifying start and stop positions.\n'
+ '\n'
+ '5. When a negative index is passed as the first parameter to '
+ 'the\n'
+ ' "insert()" method, the list length is added, as for slice '
+ 'indices.\n'
+ ' If it is still negative, it is truncated to zero, as for '
+ 'slice\n'
+ ' indices.\n'
+ '\n'
+ ' Changed in version 2.3: Previously, all negative indices '
+ 'were\n'
+ ' truncated to zero.\n'
+ '\n'
+ '6. The "pop()" method\'s optional argument *i* defaults to "-1", '
+ 'so\n'
+ ' that by default the last item is removed and returned.\n'
+ '\n'
+ '7. The "sort()" and "reverse()" methods modify the list in '
+ 'place\n'
+ ' for economy of space when sorting or reversing a large list. '
+ 'To\n'
+ " remind you that they operate by side effect, they don't "
+ 'return the\n'
+ ' sorted or reversed list.\n'
+ '\n'
+ '8. The "sort()" method takes optional arguments for controlling '
+ 'the\n'
+ ' comparisons.\n'
+ '\n'
+ ' *cmp* specifies a custom comparison function of two arguments '
+ '(list\n'
+ ' items) which should return a negative, zero or positive '
+ 'number\n'
+ ' depending on whether the first argument is considered smaller '
+ 'than,\n'
+ ' equal to, or larger than the second argument: "cmp=lambda '
+ 'x,y:\n'
+ ' cmp(x.lower(), y.lower())". The default value is "None".\n'
+ '\n'
+ ' *key* specifies a function of one argument that is used to '
+ 'extract\n'
+ ' a comparison key from each list element: "key=str.lower". '
+ 'The\n'
+ ' default value is "None".\n'
+ '\n'
+ ' *reverse* is a boolean value. If set to "True", then the '
+ 'list\n'
+ ' elements are sorted as if each comparison were reversed.\n'
+ '\n'
+ ' In general, the *key* and *reverse* conversion processes are '
+ 'much\n'
+ ' faster than specifying an equivalent *cmp* function. This '
+ 'is\n'
+ ' because *cmp* is called multiple times for each list element '
+ 'while\n'
+ ' *key* and *reverse* touch each element only once. Use\n'
+ ' "functools.cmp_to_key()" to convert an old-style *cmp* '
+ 'function to\n'
+ ' a *key* function.\n'
+ '\n'
+ ' Changed in version 2.3: Support for "None" as an equivalent '
+ 'to\n'
+ ' omitting *cmp* was added.\n'
+ '\n'
+ ' Changed in version 2.4: Support for *key* and *reverse* was '
+ 'added.\n'
+ '\n'
+ '9. Starting with Python 2.3, the "sort()" method is guaranteed '
+ 'to\n'
+ ' be stable. A sort is stable if it guarantees not to change '
+ 'the\n'
+ ' relative order of elements that compare equal --- this is '
+ 'helpful\n'
+ ' for sorting in multiple passes (for example, sort by '
+ 'department,\n'
+ ' then by salary grade).\n'
+ '\n'
+ '10. **CPython implementation detail:** While a list is being\n'
+ ' sorted, the effect of attempting to mutate, or even inspect, '
+ 'the\n'
+ ' list is undefined. The C implementation of Python 2.3 and '
+ 'newer\n'
+ ' makes the list appear empty for the duration, and raises\n'
+ ' "ValueError" if it can detect that the list has been '
+ 'mutated\n'
+ ' during a sort.\n'
+ '\n'
+ '11. The value *n* is an integer, or an object implementing\n'
+ ' "__index__()". Zero and negative values of *n* clear the\n'
+ ' sequence. Items in the sequence are not copied; they are\n'
+ ' referenced multiple times, as explained for "s * n" under '
+ 'Sequence\n'
+ ' Types --- str, unicode, list, tuple, bytearray, buffer, '
+ 'xrange.\n',
+ 'typesseq-mutable': '\n'
+ 'Mutable Sequence Types\n'
+ '**********************\n'
+ '\n'
+ 'List and "bytearray" objects support additional '
+ 'operations that allow\n'
+ 'in-place modification of the object. Other mutable '
+ 'sequence types\n'
+ '(when added to the language) should also support these '
+ 'operations.\n'
+ 'Strings and tuples are immutable sequence types: such '
+ 'objects cannot\n'
+ 'be modified once created. The following operations are '
+ 'defined on\n'
+ 'mutable sequence types (where *x* is an arbitrary '
+ 'object):\n'
+ '\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| Operation | '
+ 'Result | Notes '
+ '|\n'
+ '+================================+==================================+=======================+\n'
+ '| "s[i] = x" | item *i* of *s* is '
+ 'replaced by | |\n'
+ '| | '
+ '*x* | '
+ '|\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s[i:j] = t" | slice of *s* from *i* '
+ 'to *j* is | |\n'
+ '| | replaced by the '
+ 'contents of the | |\n'
+ '| | iterable '
+ '*t* | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "del s[i:j]" | same as "s[i:j] = '
+ '[]" | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s[i:j:k] = t" | the elements of '
+ '"s[i:j:k]" are | (1) |\n'
+ '| | replaced by those of '
+ '*t* | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "del s[i:j:k]" | removes the elements '
+ 'of | |\n'
+ '| | "s[i:j:k]" from the '
+ 'list | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.append(x)" | same as '
+ '"s[len(s):len(s)] = [x]" | (2) |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.extend(t)" or "s += t" | for the most part the '
+ 'same as | (3) |\n'
+ '| | "s[len(s):len(s)] = '
+ 't" | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s *= n" | updates *s* with its '
+ 'contents | (11) |\n'
+ '| | repeated *n* '
+ 'times | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.count(x)" | return number of '
+ "*i*'s for which | |\n"
+ '| | "s[i] == '
+ 'x" | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.index(x[, i[, j]])" | return smallest *k* '
+ 'such that | (4) |\n'
+ '| | "s[k] == x" and "i <= '
+ 'k < j" | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.insert(i, x)" | same as "s[i:i] = '
+ '[x]" | (5) |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.pop([i])" | same as "x = s[i]; '
+ 'del s[i]; | (6) |\n'
+ '| | return '
+ 'x" | |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.remove(x)" | same as "del '
+ 's[s.index(x)]" | (4) |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.reverse()" | reverses the items of '
+ '*s* in | (7) |\n'
+ '| | '
+ 'place | '
+ '|\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '| "s.sort([cmp[, key[, | sort the items of *s* '
+ 'in place | (7)(8)(9)(10) |\n'
+ '| reverse]]])" '
+ '| '
+ '| |\n'
+ '+--------------------------------+----------------------------------+-----------------------+\n'
+ '\n'
+ 'Notes:\n'
+ '\n'
+ '1. *t* must have the same length as the slice it is '
+ 'replacing.\n'
+ '\n'
+ '2. The C implementation of Python has historically '
+ 'accepted\n'
+ ' multiple parameters and implicitly joined them into a '
+ 'tuple; this\n'
+ ' no longer works in Python 2.0. Use of this '
+ 'misfeature has been\n'
+ ' deprecated since Python 1.4.\n'
+ '\n'
+ '3. *t* can be any iterable object.\n'
+ '\n'
+ '4. Raises "ValueError" when *x* is not found in *s*. '
+ 'When a\n'
+ ' negative index is passed as the second or third '
+ 'parameter to the\n'
+ ' "index()" method, the list length is added, as for '
+ 'slice indices.\n'
+ ' If it is still negative, it is truncated to zero, as '
+ 'for slice\n'
+ ' indices.\n'
+ '\n'
+ ' Changed in version 2.3: Previously, "index()" didn\'t '
+ 'have arguments\n'
+ ' for specifying start and stop positions.\n'
+ '\n'
+ '5. When a negative index is passed as the first '
+ 'parameter to the\n'
+ ' "insert()" method, the list length is added, as for '
+ 'slice indices.\n'
+ ' If it is still negative, it is truncated to zero, as '
+ 'for slice\n'
+ ' indices.\n'
+ '\n'
+ ' Changed in version 2.3: Previously, all negative '
+ 'indices were\n'
+ ' truncated to zero.\n'
+ '\n'
+ '6. The "pop()" method\'s optional argument *i* defaults '
+ 'to "-1", so\n'
+ ' that by default the last item is removed and '
+ 'returned.\n'
+ '\n'
+ '7. The "sort()" and "reverse()" methods modify the list '
+ 'in place\n'
+ ' for economy of space when sorting or reversing a '
+ 'large list. To\n'
+ ' remind you that they operate by side effect, they '
+ "don't return the\n"
+ ' sorted or reversed list.\n'
+ '\n'
+ '8. The "sort()" method takes optional arguments for '
+ 'controlling the\n'
+ ' comparisons.\n'
+ '\n'
+ ' *cmp* specifies a custom comparison function of two '
+ 'arguments (list\n'
+ ' items) which should return a negative, zero or '
+ 'positive number\n'
+ ' depending on whether the first argument is considered '
+ 'smaller than,\n'
+ ' equal to, or larger than the second argument: '
+ '"cmp=lambda x,y:\n'
+ ' cmp(x.lower(), y.lower())". The default value is '
+ '"None".\n'
+ '\n'
+ ' *key* specifies a function of one argument that is '
+ 'used to extract\n'
+ ' a comparison key from each list element: '
+ '"key=str.lower". The\n'
+ ' default value is "None".\n'
+ '\n'
+ ' *reverse* is a boolean value. If set to "True", then '
+ 'the list\n'
+ ' elements are sorted as if each comparison were '
+ 'reversed.\n'
+ '\n'
+ ' In general, the *key* and *reverse* conversion '
+ 'processes are much\n'
+ ' faster than specifying an equivalent *cmp* function. '
+ 'This is\n'
+ ' because *cmp* is called multiple times for each list '
+ 'element while\n'
+ ' *key* and *reverse* touch each element only once. '
+ 'Use\n'
+ ' "functools.cmp_to_key()" to convert an old-style '
+ '*cmp* function to\n'
+ ' a *key* function.\n'
+ '\n'
+ ' Changed in version 2.3: Support for "None" as an '
+ 'equivalent to\n'
+ ' omitting *cmp* was added.\n'
+ '\n'
+ ' Changed in version 2.4: Support for *key* and '
+ '*reverse* was added.\n'
+ '\n'
+ '9. Starting with Python 2.3, the "sort()" method is '
+ 'guaranteed to\n'
+ ' be stable. A sort is stable if it guarantees not to '
+ 'change the\n'
+ ' relative order of elements that compare equal --- '
+ 'this is helpful\n'
+ ' for sorting in multiple passes (for example, sort by '
+ 'department,\n'
+ ' then by salary grade).\n'
+ '\n'
+ '10. **CPython implementation detail:** While a list is '
+ 'being\n'
+ ' sorted, the effect of attempting to mutate, or even '
+ 'inspect, the\n'
+ ' list is undefined. The C implementation of Python '
+ '2.3 and newer\n'
+ ' makes the list appear empty for the duration, and '
+ 'raises\n'
+ ' "ValueError" if it can detect that the list has been '
+ 'mutated\n'
+ ' during a sort.\n'
+ '\n'
+ '11. The value *n* is an integer, or an object '
+ 'implementing\n'
+ ' "__index__()". Zero and negative values of *n* '
+ 'clear the\n'
+ ' sequence. Items in the sequence are not copied; '
+ 'they are\n'
+ ' referenced multiple times, as explained for "s * n" '
+ 'under Sequence\n'
+ ' Types --- str, unicode, list, tuple, bytearray, '
+ 'buffer, xrange.\n',
+ 'unary': '\n'
+ 'Unary arithmetic and bitwise operations\n'
+ '***************************************\n'
+ '\n'
+ 'All unary arithmetic and bitwise operations have the same '
+ 'priority:\n'
+ '\n'
+ ' u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n'
+ '\n'
+ 'The unary "-" (minus) operator yields the negation of its numeric\n'
+ 'argument.\n'
+ '\n'
+ 'The unary "+" (plus) operator yields its numeric argument '
+ 'unchanged.\n'
+ '\n'
+ 'The unary "~" (invert) operator yields the bitwise inversion of '
+ 'its\n'
+ 'plain or long integer argument. The bitwise inversion of "x" is\n'
+ 'defined as "-(x+1)". It only applies to integral numbers.\n'
+ '\n'
+ 'In all three cases, if the argument does not have the proper type, '
+ 'a\n'
+ '"TypeError" exception is raised.\n',
+ 'while': '\n'
+ 'The "while" statement\n'
+ '*********************\n'
+ '\n'
+ 'The "while" statement is used for repeated execution as long as an\n'
+ 'expression is true:\n'
+ '\n'
+ ' while_stmt ::= "while" expression ":" suite\n'
+ ' ["else" ":" suite]\n'
+ '\n'
+ 'This repeatedly tests the expression and, if it is true, executes '
+ 'the\n'
+ 'first suite; if the expression is false (which may be the first '
+ 'time\n'
+ 'it is tested) the suite of the "else" clause, if present, is '
+ 'executed\n'
+ 'and the loop terminates.\n'
+ '\n'
+ 'A "break" statement executed in the first suite terminates the '
+ 'loop\n'
+ 'without executing the "else" clause\'s suite. A "continue" '
+ 'statement\n'
+ 'executed in the first suite skips the rest of the suite and goes '
+ 'back\n'
+ 'to testing the expression.\n',
+ 'with': '\n'
+ 'The "with" statement\n'
+ '********************\n'
+ '\n'
+ 'New in version 2.5.\n'
+ '\n'
+ 'The "with" statement is used to wrap the execution of a block with\n'
+ 'methods defined by a context manager (see section With Statement\n'
+ 'Context Managers). This allows common "try"..."except"..."finally"\n'
+ 'usage patterns to be encapsulated for convenient reuse.\n'
+ '\n'
+ ' with_stmt ::= "with" with_item ("," with_item)* ":" suite\n'
+ ' with_item ::= expression ["as" target]\n'
+ '\n'
+ 'The execution of the "with" statement with one "item" proceeds as\n'
+ 'follows:\n'
+ '\n'
+ '1. The context expression (the expression given in the "with_item")\n'
+ ' is evaluated to obtain a context manager.\n'
+ '\n'
+ '2. The context manager\'s "__exit__()" is loaded for later use.\n'
+ '\n'
+ '3. The context manager\'s "__enter__()" method is invoked.\n'
+ '\n'
+ '4. If a target was included in the "with" statement, the return\n'
+ ' value from "__enter__()" is assigned to it.\n'
+ '\n'
+ ' Note: The "with" statement guarantees that if the "__enter__()"\n'
+ ' method returns without an error, then "__exit__()" will always '
+ 'be\n'
+ ' called. Thus, if an error occurs during the assignment to the\n'
+ ' target list, it will be treated the same as an error occurring\n'
+ ' within the suite would be. See step 6 below.\n'
+ '\n'
+ '5. The suite is executed.\n'
+ '\n'
+ '6. The context manager\'s "__exit__()" method is invoked. If an\n'
+ ' exception caused the suite to be exited, its type, value, and\n'
+ ' traceback are passed as arguments to "__exit__()". Otherwise, '
+ 'three\n'
+ ' "None" arguments are supplied.\n'
+ '\n'
+ ' If the suite was exited due to an exception, and the return '
+ 'value\n'
+ ' from the "__exit__()" method was false, the exception is '
+ 'reraised.\n'
+ ' If the return value was true, the exception is suppressed, and\n'
+ ' execution continues with the statement following the "with"\n'
+ ' statement.\n'
+ '\n'
+ ' If the suite was exited for any reason other than an exception, '
+ 'the\n'
+ ' return value from "__exit__()" is ignored, and execution '
+ 'proceeds\n'
+ ' at the normal location for the kind of exit that was taken.\n'
+ '\n'
+ 'With more than one item, the context managers are processed as if\n'
+ 'multiple "with" statements were nested:\n'
+ '\n'
+ ' with A() as a, B() as b:\n'
+ ' suite\n'
+ '\n'
+ 'is equivalent to\n'
+ '\n'
+ ' with A() as a:\n'
+ ' with B() as b:\n'
+ ' suite\n'
+ '\n'
+ 'Note: In Python 2.5, the "with" statement is only allowed when the\n'
+ ' "with_statement" feature has been enabled. It is always enabled '
+ 'in\n'
+ ' Python 2.6.\n'
+ '\n'
+ 'Changed in version 2.7: Support for multiple context expressions.\n'
+ '\n'
+ 'See also:\n'
+ '\n'
+ ' **PEP 343** - The "with" statement\n'
+ ' The specification, background, and examples for the Python '
+ '"with"\n'
+ ' statement.\n',
+ 'yield': '\n'
+ 'The "yield" statement\n'
+ '*********************\n'
+ '\n'
+ ' yield_stmt ::= yield_expression\n'
+ '\n'
+ 'The "yield" statement is only used when defining a generator '
+ 'function,\n'
+ 'and is only used in the body of the generator function. Using a\n'
+ '"yield" statement in a function definition is sufficient to cause '
+ 'that\n'
+ 'definition to create a generator function instead of a normal\n'
+ 'function.\n'
+ '\n'
+ 'When a generator function is called, it returns an iterator known '
+ 'as a\n'
+ 'generator iterator, or more commonly, a generator. The body of '
+ 'the\n'
+ "generator function is executed by calling the generator's "
+ '"next()"\n'
+ 'method repeatedly until it raises an exception.\n'
+ '\n'
+ 'When a "yield" statement is executed, the state of the generator '
+ 'is\n'
+ 'frozen and the value of "expression_list" is returned to '
+ '"next()"\'s\n'
+ 'caller. By "frozen" we mean that all local state is retained,\n'
+ 'including the current bindings of local variables, the instruction\n'
+ 'pointer, and the internal evaluation stack: enough information is\n'
+ 'saved so that the next time "next()" is invoked, the function can\n'
+ 'proceed exactly as if the "yield" statement were just another '
+ 'external\n'
+ 'call.\n'
+ '\n'
+ 'As of Python version 2.5, the "yield" statement is now allowed in '
+ 'the\n'
+ '"try" clause of a "try" ... "finally" construct. If the generator '
+ 'is\n'
+ 'not resumed before it is finalized (by reaching a zero reference '
+ 'count\n'
+ "or by being garbage collected), the generator-iterator's "
+ '"close()"\n'
+ 'method will be called, allowing any pending "finally" clauses to\n'
+ 'execute.\n'
+ '\n'
+ 'For full details of "yield" semantics, refer to the Yield '
+ 'expressions\n'
+ 'section.\n'
+ '\n'
+ 'Note: In Python 2.2, the "yield" statement was only allowed when '
+ 'the\n'
+ ' "generators" feature has been enabled. This "__future__" import\n'
+ ' statement was used to enable the feature:\n'
+ '\n'
+ ' from __future__ import generators\n'
+ '\n'
+ 'See also:\n'
+ '\n'
+ ' **PEP 255** - Simple Generators\n'
+ ' The proposal for adding generators and the "yield" statement '
+ 'to\n'
+ ' Python.\n'
+ '\n'
+ ' **PEP 342** - Coroutines via Enhanced Generators\n'
+ ' The proposal that, among other generator enhancements, '
+ 'proposed\n'
+ ' allowing "yield" to appear inside a "try" ... "finally" '
+ 'block.\n'}
diff --git a/lib-python/2.7/random.py b/lib-python/2.7/random.py
index ff78219f37..a09819e1b0 100644
--- a/lib-python/2.7/random.py
+++ b/lib-python/2.7/random.py
@@ -97,12 +97,14 @@ class Random(_random.Random):
self.gauss_next = None
def seed(self, a=None):
- """Initialize internal state from hashable object.
+ """Initialize internal state of the random number generator.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
- If a is not None or an int or long, hash(a) is used instead.
+ If a is not None or is an int or long, hash(a) is used instead.
+ Hash values for some types are nondeterministic when the
+ PYTHONHASHSEED environment variable is enabled.
"""
if a is None:
diff --git a/lib-python/2.7/robotparser.py b/lib-python/2.7/robotparser.py
index a7137a3064..4e13f7f780 100644
--- a/lib-python/2.7/robotparser.py
+++ b/lib-python/2.7/robotparser.py
@@ -160,7 +160,10 @@ class RobotFileParser:
def __str__(self):
- return ''.join([str(entry) + "\n" for entry in self.entries])
+ entries = self.entries
+ if self.default_entry is not None:
+ entries = entries + [self.default_entry]
+ return '\n'.join(map(str, entries)) + '\n'
class RuleLine:
diff --git a/lib-python/2.7/shutil.py b/lib-python/2.7/shutil.py
index 83a554de2c..a45436cebc 100644
--- a/lib-python/2.7/shutil.py
+++ b/lib-python/2.7/shutil.py
@@ -13,6 +13,20 @@ import collections
import errno
try:
+ import zlib
+ del zlib
+ _ZLIB_SUPPORTED = True
+except ImportError:
+ _ZLIB_SUPPORTED = False
+
+try:
+ import bz2
+ del bz2
+ _BZ2_SUPPORTED = True
+except ImportError:
+ _BZ2_SUPPORTED = False
+
+try:
from pwd import getpwnam
except ImportError:
getpwnam = None
@@ -91,7 +105,13 @@ def copymode(src, dst):
os.chmod(dst, mode)
def copystat(src, dst):
- """Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
+ """Copy file metadata
+
+ Copy the permission bits, last access time, last modification time, and
+ flags from `src` to `dst`. On Linux, copystat() also copies the "extended
+ attributes" where possible. The file contents, owner, and group are
+ unaffected. `src` and `dst` are path names given as strings.
+ """
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
@@ -120,7 +140,10 @@ def copy(src, dst):
copymode(src, dst)
def copy2(src, dst):
- """Copy data and all stat info ("cp -p src dst").
+ """Copy data and metadata. Return the file's destination.
+
+ Metadata is copied with copystat(). Please see the copystat function
+ for more information.
The destination may be a directory.
@@ -351,15 +374,18 @@ def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
Returns the output filename.
"""
- tar_compression = {'gzip': 'gz', 'bzip2': 'bz2', None: ''}
- compress_ext = {'gzip': '.gz', 'bzip2': '.bz2'}
-
- # flags for compression program, each element of list will be an argument
- if compress is not None and compress not in compress_ext.keys():
- raise ValueError, \
- ("bad value for 'compress': must be None, 'gzip' or 'bzip2'")
+ if compress is None:
+ tar_compression = ''
+ elif _ZLIB_SUPPORTED and compress == 'gzip':
+ tar_compression = 'gz'
+ elif _BZ2_SUPPORTED and compress == 'bzip2':
+ tar_compression = 'bz2'
+ else:
+ raise ValueError("bad value for 'compress', or compression format not "
+ "supported : {0}".format(compress))
- archive_name = base_name + '.tar' + compress_ext.get(compress, '')
+ compress_ext = '.' + tar_compression if compress else ''
+ archive_name = base_name + '.tar' + compress_ext
archive_dir = os.path.dirname(archive_name)
if archive_dir and not os.path.exists(archive_dir):
@@ -388,7 +414,7 @@ def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
return tarinfo
if not dry_run:
- tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
+ tar = tarfile.open(archive_name, 'w|%s' % tar_compression)
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
@@ -439,6 +465,7 @@ def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
+ import zlib
import zipfile
except ImportError:
zipfile = None
@@ -474,11 +501,17 @@ def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
return zip_filename
_ARCHIVE_FORMATS = {
- 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
- 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
- 'zip': (_make_zipfile, [],"ZIP file")
- }
+ 'zip': (_make_zipfile, [], "ZIP file")
+}
+
+if _ZLIB_SUPPORTED:
+ _ARCHIVE_FORMATS['gztar'] = (_make_tarball, [('compress', 'gzip')],
+ "gzip'ed tar-file")
+
+if _BZ2_SUPPORTED:
+ _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
+ "bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
@@ -519,8 +552,8 @@ def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
- extension; 'format' is the archive format: one of "zip", "tar", "bztar"
- or "gztar".
+ extension; 'format' is the archive format: one of "zip", "tar", "gztar",
+ or "bztar". Or any other registered format.
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
diff --git a/lib-python/2.7/smtplib.py b/lib-python/2.7/smtplib.py
index e1651c0a8b..0c61065432 100755
--- a/lib-python/2.7/smtplib.py
+++ b/lib-python/2.7/smtplib.py
@@ -255,6 +255,7 @@ class SMTP:
if host:
(code, msg) = self.connect(host, port)
if code != 220:
+ self.close()
raise SMTPConnectError(code, msg)
if local_hostname is not None:
self.local_hostname = local_hostname
diff --git a/lib-python/2.7/sqlite3/test/factory.py b/lib-python/2.7/sqlite3/test/factory.py
index b9e9cd7fd7..b8e0f645a0 100644
--- a/lib-python/2.7/sqlite3/test/factory.py
+++ b/lib-python/2.7/sqlite3/test/factory.py
@@ -159,19 +159,24 @@ class RowFactoryTests(unittest.TestCase):
row_1 = self.con.execute("select 1 as a, 2 as b").fetchone()
row_2 = self.con.execute("select 1 as a, 2 as b").fetchone()
row_3 = self.con.execute("select 1 as a, 3 as b").fetchone()
+ row_4 = self.con.execute("select 1 as b, 2 as a").fetchone()
+ row_5 = self.con.execute("select 2 as b, 1 as a").fetchone()
- self.assertEqual(row_1, row_1)
- self.assertEqual(row_1, row_2)
- self.assertTrue(row_2 != row_3)
+ self.assertTrue(row_1 == row_1)
+ self.assertTrue(row_1 == row_2)
+ self.assertFalse(row_1 == row_3)
+ self.assertFalse(row_1 == row_4)
+ self.assertFalse(row_1 == row_5)
+ self.assertFalse(row_1 == object())
self.assertFalse(row_1 != row_1)
self.assertFalse(row_1 != row_2)
- self.assertFalse(row_2 == row_3)
+ self.assertTrue(row_1 != row_3)
+ self.assertTrue(row_1 != row_4)
+ self.assertTrue(row_1 != row_5)
+ self.assertTrue(row_1 != object())
- self.assertEqual(row_1, row_2)
self.assertEqual(hash(row_1), hash(row_2))
- self.assertNotEqual(row_1, row_3)
- self.assertNotEqual(hash(row_1), hash(row_3))
def CheckSqliteRowAsSequence(self):
""" Checks if the row object can act like a sequence """
diff --git a/lib-python/2.7/sqlite3/test/regression.py b/lib-python/2.7/sqlite3/test/regression.py
index e4fc66a354..42fc7c278b 100644
--- a/lib-python/2.7/sqlite3/test/regression.py
+++ b/lib-python/2.7/sqlite3/test/regression.py
@@ -24,7 +24,8 @@
import datetime
import unittest
import sqlite3 as sqlite
-from test import test_support
+import weakref
+from test import support
class RegressionTests(unittest.TestCase):
def setUp(self):
@@ -176,6 +177,9 @@ class RegressionTests(unittest.TestCase):
pass
except:
self.fail("should have raised ProgrammingError")
+ with self.assertRaisesRegexp(sqlite.ProgrammingError,
+ r'^Base Cursor\.__init__ not called\.$'):
+ cur.close()
def CheckConnectionConstructorCallCheck(self):
"""
@@ -242,24 +246,6 @@ class RegressionTests(unittest.TestCase):
cur.execute("pragma page_size")
row = cur.fetchone()
- def CheckSetDict(self):
- """
- See http://bugs.python.org/issue7478
-
- It was possible to successfully register callbacks that could not be
- hashed. Return codes of PyDict_SetItem were not checked properly.
- """
- class NotHashable:
- def __call__(self, *args, **kw):
- pass
- def __hash__(self):
- raise TypeError()
- var = NotHashable()
- self.assertRaises(TypeError, self.con.create_function, var)
- self.assertRaises(TypeError, self.con.create_aggregate, var)
- self.assertRaises(TypeError, self.con.set_authorizer, var)
- self.assertRaises(TypeError, self.con.set_progress_handler, var)
-
def CheckConnectionCall(self):
"""
Call a connection with a non-string SQL request: check error handling
@@ -267,28 +253,6 @@ class RegressionTests(unittest.TestCase):
"""
self.assertRaises(sqlite.Warning, self.con, 1)
- def CheckUpdateDescriptionNone(self):
- """
- Call Cursor.update with an UPDATE query and check that it sets the
- cursor's description to be None.
- """
- cur = self.con.cursor()
- cur.execute("CREATE TABLE foo (id INTEGER)")
- cur.execute("UPDATE foo SET id = 3 WHERE id = 1")
- self.assertEqual(cur.description, None)
-
- def CheckStatementCache(self):
- cur = self.con.cursor()
- cur.execute("CREATE TABLE foo (id INTEGER)")
- values = [(i,) for i in xrange(5)]
- cur.executemany("INSERT INTO foo (id) VALUES (?)", values)
-
- cur.execute("SELECT id FROM foo")
- self.assertEqual(list(cur), values)
- self.con.commit()
- cur.execute("SELECT id FROM foo")
- self.assertEqual(list(cur), values)
-
def CheckRecursiveCursorUse(self):
"""
http://bugs.python.org/issue10811
@@ -381,10 +345,94 @@ class RegressionTests(unittest.TestCase):
counter += 1
self.assertEqual(counter, 3, "should have returned exactly three rows")
+ def CheckBpo31770(self):
+ """
+ The interpreter shouldn't crash in case Cursor.__init__() is called
+ more than once.
+ """
+ def callback(*args):
+ pass
+ con = sqlite.connect(":memory:")
+ cur = sqlite.Cursor(con)
+ ref = weakref.ref(cur, callback)
+ cur.__init__(con)
+ del cur
+ # The interpreter shouldn't crash when ref is collected.
+ del ref
+ support.gc_collect()
+
+ def CheckDelIsolation_levelSegfault(self):
+ with self.assertRaises(AttributeError):
+ del self.con.isolation_level
+
+
+class UnhashableFunc:
+ def __hash__(self):
+ raise TypeError('unhashable type')
+
+ def __init__(self, return_value=None):
+ self.calls = 0
+ self.return_value = return_value
+
+ def __call__(self, *args, **kwargs):
+ self.calls += 1
+ return self.return_value
+
+
+class UnhashableCallbacksTestCase(unittest.TestCase):
+ """
+ https://bugs.python.org/issue34052
+
+ Registering unhashable callbacks raises TypeError, callbacks are not
+ registered in SQLite after such registration attempt.
+ """
+ def setUp(self):
+ self.con = sqlite.connect(':memory:')
+
+ def tearDown(self):
+ self.con.close()
+
+ def test_progress_handler(self):
+ f = UnhashableFunc(return_value=0)
+ with self.assertRaisesRegexp(TypeError, 'unhashable type'):
+ self.con.set_progress_handler(f, 1)
+ self.con.execute('SELECT 1')
+ self.assertFalse(f.calls)
+
+ def test_func(self):
+ func_name = 'func_name'
+ f = UnhashableFunc()
+ with self.assertRaisesRegexp(TypeError, 'unhashable type'):
+ self.con.create_function(func_name, 0, f)
+ msg = 'no such function: %s' % func_name
+ with self.assertRaisesRegexp(sqlite.OperationalError, msg):
+ self.con.execute('SELECT %s()' % func_name)
+ self.assertFalse(f.calls)
+
+ def test_authorizer(self):
+ f = UnhashableFunc(return_value=sqlite.SQLITE_DENY)
+ with self.assertRaisesRegexp(TypeError, 'unhashable type'):
+ self.con.set_authorizer(f)
+ self.con.execute('SELECT 1')
+ self.assertFalse(f.calls)
+
+ def test_aggr(self):
+ class UnhashableType(type):
+ __hash__ = None
+ aggr_name = 'aggr_name'
+ with self.assertRaisesRegexp(TypeError, 'unhashable type'):
+ self.con.create_aggregate(aggr_name, 0, UnhashableType('Aggr', (), {}))
+ msg = 'no such function: %s' % aggr_name
+ with self.assertRaisesRegexp(sqlite.OperationalError, msg):
+ self.con.execute('SELECT %s()' % aggr_name)
+
def suite():
regression_suite = unittest.makeSuite(RegressionTests, "Check")
- return unittest.TestSuite((regression_suite,))
+ return unittest.TestSuite((
+ regression_suite,
+ unittest.makeSuite(UnhashableCallbacksTestCase),
+ ))
def test():
runner = unittest.TextTestRunner()
diff --git a/lib-python/2.7/sqlite3/test/types.py b/lib-python/2.7/sqlite3/test/types.py
index a31446eaa6..fdc21aee72 100644
--- a/lib-python/2.7/sqlite3/test/types.py
+++ b/lib-python/2.7/sqlite3/test/types.py
@@ -383,8 +383,7 @@ class DateTimeTests(unittest.TestCase):
if sqlite.sqlite_version_info < (3, 1):
return
- # SQLite's current_timestamp uses UTC time, while datetime.datetime.now() uses local time.
- now = datetime.datetime.now()
+ now = datetime.datetime.utcnow()
self.cur.execute("insert into test(ts) values (current_timestamp)")
self.cur.execute("select ts from test")
ts = self.cur.fetchone()[0]
diff --git a/lib-python/2.7/sre_compile.py b/lib-python/2.7/sre_compile.py
index c5a7e89d07..b6689fa7a7 100644
--- a/lib-python/2.7/sre_compile.py
+++ b/lib-python/2.7/sre_compile.py
@@ -435,7 +435,7 @@ def _compile_info(code, pattern, flags):
# this contains min/max pattern width, and an optional literal
# prefix or a character map
lo, hi = pattern.getwidth()
- if lo == 0:
+ if not lo and hi:
return # not worth it
# look for a literal prefix
prefix = []
diff --git a/lib-python/2.7/sre_parse.py b/lib-python/2.7/sre_parse.py
index 8702450134..55a0906b3f 100644
--- a/lib-python/2.7/sre_parse.py
+++ b/lib-python/2.7/sre_parse.py
@@ -29,6 +29,7 @@ DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
+ASCIILETTERS = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
WHITESPACE = set(" \t\n\r\v\f")
@@ -239,7 +240,7 @@ def isname(name):
return False
return True
-def _class_escape(source, escape):
+def _class_escape(source, escape, nested):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
@@ -266,12 +267,21 @@ def _class_escape(source, escape):
elif c in DIGITS:
raise error, "bogus escape: %s" % repr(escape)
if len(escape) == 2:
+ if sys.py3kwarning and c in ASCIILETTERS:
+ import warnings
+ if c in 'Uu':
+ warnings.warn('bad escape %s; Unicode escapes are '
+ 'supported only since Python 3.3' % escape,
+ FutureWarning, stacklevel=nested + 6)
+ else:
+ warnings.warnpy3k('bad escape %s' % escape,
+ DeprecationWarning, stacklevel=nested + 6)
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
-def _escape(source, escape, state):
+def _escape(source, escape, state, nested):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
@@ -311,23 +321,32 @@ def _escape(source, escape, state):
import warnings
warnings.warn('group references in lookbehind '
'assertions are not supported',
- RuntimeWarning)
+ RuntimeWarning, stacklevel=nested + 6)
return GROUPREF, group
raise ValueError
if len(escape) == 2:
+ if sys.py3kwarning and c in ASCIILETTERS:
+ import warnings
+ if c in 'Uu':
+ warnings.warn('bad escape %s; Unicode escapes are '
+ 'supported only since Python 3.3' % escape,
+ FutureWarning, stacklevel=nested + 6)
+ else:
+ warnings.warnpy3k('bad escape %s' % escape,
+ DeprecationWarning, stacklevel=nested + 6)
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
-def _parse_sub(source, state, nested=1):
+def _parse_sub(source, state, nested):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
- itemsappend(_parse(source, state))
+ itemsappend(_parse(source, state, nested + 1))
if sourcematch("|"):
continue
if not nested:
@@ -379,10 +398,10 @@ def _parse_sub(source, state, nested=1):
subpattern.append((BRANCH, (None, items)))
return subpattern
-def _parse_sub_cond(source, state, condgroup):
- item_yes = _parse(source, state)
+def _parse_sub_cond(source, state, condgroup, nested):
+ item_yes = _parse(source, state, nested + 1)
if source.match("|"):
- item_no = _parse(source, state)
+ item_no = _parse(source, state, nested + 1)
if source.match("|"):
raise error, "conditional backref with more than two branches"
else:
@@ -398,7 +417,7 @@ _ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
-def _parse(source, state):
+def _parse(source, state, nested):
# parse a simple pattern
subpattern = SubPattern(state)
@@ -449,7 +468,7 @@ def _parse(source, state):
if this == "]" and set != start:
break
elif this and this[0] == "\\":
- code1 = _class_escape(source, this)
+ code1 = _class_escape(source, this, nested + 1)
elif this:
code1 = LITERAL, ord(this)
else:
@@ -465,7 +484,7 @@ def _parse(source, state):
break
elif this:
if this[0] == "\\":
- code2 = _class_escape(source, this)
+ code2 = _class_escape(source, this, nested + 1)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
@@ -595,7 +614,7 @@ def _parse(source, state):
import warnings
warnings.warn('group references in lookbehind '
'assertions are not supported',
- RuntimeWarning)
+ RuntimeWarning, stacklevel=nested + 6)
subpatternappend((GROUPREF, gid))
continue
else:
@@ -625,7 +644,7 @@ def _parse(source, state):
dir = -1 # lookbehind
char = sourceget()
state.lookbehind += 1
- p = _parse_sub(source, state)
+ p = _parse_sub(source, state, nested + 1)
if dir < 0:
state.lookbehind -= 1
if not sourcematch(")"):
@@ -662,7 +681,7 @@ def _parse(source, state):
import warnings
warnings.warn('group references in lookbehind '
'assertions are not supported',
- RuntimeWarning)
+ RuntimeWarning, stacklevel=nested + 6)
else:
# flags
if not source.next in FLAGS:
@@ -677,9 +696,9 @@ def _parse(source, state):
else:
group = state.opengroup(name)
if condgroup:
- p = _parse_sub_cond(source, state, condgroup)
+ p = _parse_sub_cond(source, state, condgroup, nested + 1)
else:
- p = _parse_sub(source, state)
+ p = _parse_sub(source, state, nested + 1)
if not sourcematch(")"):
raise error, "unbalanced parenthesis"
if group is not None:
@@ -701,7 +720,7 @@ def _parse(source, state):
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
- code = _escape(source, this, state)
+ code = _escape(source, this, state, nested + 1)
subpatternappend(code)
else:
@@ -720,6 +739,12 @@ def parse(str, flags=0, pattern=None):
pattern.str = str
p = _parse_sub(source, pattern, 0)
+ if (sys.py3kwarning and
+ (p.pattern.flags & SRE_FLAG_LOCALE) and
+ (p.pattern.flags & SRE_FLAG_UNICODE)):
+ import warnings
+ warnings.warnpy3k("LOCALE and UNICODE flags are incompatible",
+ DeprecationWarning, stacklevel=5)
tail = source.get()
if tail == ")":
@@ -807,7 +832,10 @@ def parse_template(source, pattern):
try:
this = makechar(ESCAPES[this][1])
except KeyError:
- pass
+ if sys.py3kwarning and c in ASCIILETTERS:
+ import warnings
+ warnings.warnpy3k('bad escape %s' % this,
+ DeprecationWarning, stacklevel=4)
literal(this)
else:
literal(this)
diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py
index cc484d9af7..b7e8f6fc96 100644
--- a/lib-python/2.7/ssl.py
+++ b/lib-python/2.7/ssl.py
@@ -123,7 +123,7 @@ _import_symbols('SSL_ERROR_')
_import_symbols('PROTOCOL_')
_import_symbols('VERIFY_')
-from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN, HAS_ALPN
+from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN, HAS_ALPN, HAS_TLSv1_3
from _ssl import _OPENSSL_API_VERSION
@@ -157,6 +157,7 @@ else:
# (OpenSSL's default setting is 'DEFAULT:!aNULL:!eNULL')
# Enable a better set of ciphers by default
# This list has been explicitly chosen to:
+# * TLS 1.3 ChaCha20 and AES-GCM cipher suites
# * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE)
# * Prefer ECDHE over DHE for better performance
# * Prefer AEAD over CBC for better performance and security
@@ -168,6 +169,8 @@ else:
# * Disable NULL authentication, NULL encryption, 3DES and MD5 MACs
# for security reasons
_DEFAULT_CIPHERS = (
+ 'TLS13-AES-256-GCM-SHA384:TLS13-CHACHA20-POLY1305-SHA256:'
+ 'TLS13-AES-128-GCM-SHA256:'
'ECDH+AESGCM:ECDH+CHACHA20:DH+AESGCM:DH+CHACHA20:ECDH+AES256:DH+AES256:'
'ECDH+AES128:DH+AES:ECDH+HIGH:DH+HIGH:RSA+AESGCM:RSA+AES:RSA+HIGH:'
'!aNULL:!eNULL:!MD5:!3DES'
@@ -175,6 +178,7 @@ _DEFAULT_CIPHERS = (
# Restricted and more secure ciphers for the server side
# This list has been explicitly chosen to:
+# * TLS 1.3 ChaCha20 and AES-GCM cipher suites
# * Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE)
# * Prefer ECDHE over DHE for better performance
# * Prefer AEAD over CBC for better performance and security
@@ -185,6 +189,8 @@ _DEFAULT_CIPHERS = (
# * Disable NULL authentication, NULL encryption, MD5 MACs, DSS, RC4, and
# 3DES for security reasons
_RESTRICTED_SERVER_CIPHERS = (
+ 'TLS13-AES-256-GCM-SHA384:TLS13-CHACHA20-POLY1305-SHA256:'
+ 'TLS13-AES-128-GCM-SHA256:'
'ECDH+AESGCM:ECDH+CHACHA20:DH+AESGCM:DH+CHACHA20:ECDH+AES256:DH+AES256:'
'ECDH+AES128:DH+AES:ECDH+HIGH:DH+HIGH:RSA+AESGCM:RSA+AES:RSA+HIGH:'
'!aNULL:!eNULL:!MD5:!DSS:!RC4:!3DES'
@@ -418,32 +424,16 @@ def create_default_context(purpose=Purpose.SERVER_AUTH, cafile=None,
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
+ # SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION,
+ # OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE
+ # by default.
context = SSLContext(PROTOCOL_TLS)
- # SSLv2 considered harmful.
- context.options |= OP_NO_SSLv2
-
- # SSLv3 has problematic security and is only required for really old
- # clients such as IE6 on Windows XP
- context.options |= OP_NO_SSLv3
-
- # disable compression to prevent CRIME attacks (OpenSSL 1.0+)
- context.options |= getattr(_ssl, "OP_NO_COMPRESSION", 0)
-
if purpose == Purpose.SERVER_AUTH:
# verify certs and host name in client mode
context.verify_mode = CERT_REQUIRED
context.check_hostname = True
elif purpose == Purpose.CLIENT_AUTH:
- # Prefer the server's ciphers by default so that we get stronger
- # encryption
- context.options |= getattr(_ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
-
- # Use single use keys in order to improve forward secrecy
- context.options |= getattr(_ssl, "OP_SINGLE_DH_USE", 0)
- context.options |= getattr(_ssl, "OP_SINGLE_ECDH_USE", 0)
-
- # disallow ciphers with known vulnerabilities
context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)
if cafile or capath or cadata:
@@ -469,12 +459,10 @@ def _create_unverified_context(protocol=PROTOCOL_TLS, cert_reqs=None,
if not isinstance(purpose, _ASN1Object):
raise TypeError(purpose)
+ # SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION,
+ # OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE
+ # by default.
context = SSLContext(protocol)
- # SSLv2 considered harmful.
- context.options |= OP_NO_SSLv2
- # SSLv3 has problematic security and is only required for really old
- # clients such as IE6 on Windows XP
- context.options |= OP_NO_SSLv3
if cert_reqs is not None:
context.verify_mode = cert_reqs
@@ -622,8 +610,8 @@ class SSLSocket(socket):
self._sslobj.context = ctx
def dup(self):
- raise NotImplemented("Can't dup() %s instances" %
- self.__class__.__name__)
+ raise NotImplementedError("Can't dup() %s instances" %
+ self.__class__.__name__)
def _checkClosed(self, msg=None):
# raise an exception here if you wish to check for spurious closes
diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py
index 8c85d58c22..344da03b57 100644
--- a/lib-python/2.7/subprocess.py
+++ b/lib-python/2.7/subprocess.py
@@ -71,6 +71,10 @@ if mswindows:
else:
import select
_has_poll = hasattr(select, 'poll')
+ try:
+ import threading
+ except ImportError:
+ threading = None
import fcntl
import pickle
@@ -516,6 +520,7 @@ class Popen(object):
c2pread, c2pwrite = None, None
errread, errwrite = None, None
+ # ispread, ispwrite changes are pypy-specific
ispread = False
if stdin is None:
p2cread = _subprocess.GetStdHandle(_subprocess.STD_INPUT_HANDLE)
@@ -525,7 +530,7 @@ class Popen(object):
elif stdin == PIPE:
p2cread, p2cwrite = _subprocess.CreatePipe(None, 0)
ispread = True
- elif isinstance(stdin, int):
+ elif isinstance(stdin, (int, long)):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
@@ -545,7 +550,7 @@ class Popen(object):
elif stdout == PIPE:
c2pread, c2pwrite = _subprocess.CreatePipe(None, 0)
ispwrite = True
- elif isinstance(stdout, int):
+ elif isinstance(stdout, (int, long)):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
@@ -567,7 +572,7 @@ class Popen(object):
ispwrite = True
elif stderr == STDOUT:
errwrite = c2pwrite
- elif isinstance(stderr, int):
+ elif isinstance(stderr, (int, long)):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
@@ -749,10 +754,11 @@ class Popen(object):
if e.errno == errno.EPIPE:
# communicate() should ignore broken pipe error
pass
- elif (e.errno == errno.EINVAL
- and self.poll() is not None):
- # Issue #19612: stdin.write() fails with EINVAL
- # if the process already exited before the write
+ elif e.errno == errno.EINVAL:
+ # bpo-19612, bpo-30418: On Windows, stdin.write()
+ # fails with EINVAL if the child process exited or
+ # if the child process is still running but closed
+ # the pipe.
pass
else:
raise
@@ -829,7 +835,7 @@ class Popen(object):
elif stdin == PIPE:
p2cread, p2cwrite = self.pipe_cloexec()
to_close.update((p2cread, p2cwrite))
- elif isinstance(stdin, int):
+ elif isinstance(stdin, (int, long)):
p2cread = stdin
else:
# Assuming file-like object
@@ -840,7 +846,7 @@ class Popen(object):
elif stdout == PIPE:
c2pread, c2pwrite = self.pipe_cloexec()
to_close.update((c2pread, c2pwrite))
- elif isinstance(stdout, int):
+ elif isinstance(stdout, (int, long)):
c2pwrite = stdout
else:
# Assuming file-like object
@@ -856,7 +862,7 @@ class Popen(object):
errwrite = c2pwrite
else: # child's stdout is not set, use parent's stdout
errwrite = sys.__stdout__.fileno()
- elif isinstance(stderr, int):
+ elif isinstance(stderr, (int, long)):
errwrite = stderr
else:
# Assuming file-like object
@@ -906,6 +912,21 @@ class Popen(object):
pass
+ # Used as a bandaid workaround for https://bugs.python.org/issue27448
+ # to prevent multiple simultaneous subprocess launches from interfering
+ # with one another and leaving gc disabled.
+ if threading:
+ _disabling_gc_lock = threading.Lock()
+ else:
+ class _noop_context_manager(object):
+ # A dummy context manager that does nothing for the rare
+ # user of a --without-threads build.
+ def __enter__(self): pass
+ def __exit__(self, *args): pass
+
+ _disabling_gc_lock = _noop_context_manager()
+
+
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell, to_close,
@@ -937,10 +958,12 @@ class Popen(object):
errpipe_read, errpipe_write = self.pipe_cloexec()
try:
try:
- gc_was_enabled = gc.isenabled()
- # Disable gc to avoid bug where gc -> file_dealloc ->
- # write to stderr -> hang. http://bugs.python.org/issue1336
- gc.disable()
+ with self._disabling_gc_lock:
+ gc_was_enabled = gc.isenabled()
+ # Disable gc to avoid bug where gc -> file_dealloc ->
+ # write to stderr -> hang.
+ # https://bugs.python.org/issue1336
+ gc.disable()
try:
self.pid = os.fork()
except:
@@ -1003,6 +1026,10 @@ class Popen(object):
if env is None:
os.execvp(executable, args)
else:
+ for k, v in env.items():
+ if '=' in k:
+ raise ValueError(
+ "illegal environment variable name")
os.execvpe(executable, args, env)
except:
@@ -1014,9 +1041,10 @@ class Popen(object):
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
- # This exitcode won't be reported to applications, so it
- # really doesn't matter what we return.
- os._exit(255)
+ finally:
+ # This exitcode won't be reported to applications, so it
+ # really doesn't matter what we return.
+ os._exit(255)
# Parent
if gc_was_enabled:
@@ -1055,13 +1083,16 @@ class Popen(object):
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
- _WEXITSTATUS=os.WEXITSTATUS):
+ _WEXITSTATUS=os.WEXITSTATUS, _WIFSTOPPED=os.WIFSTOPPED,
+ _WSTOPSIG=os.WSTOPSIG):
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope.
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
+ elif _WIFSTOPPED(sts):
+ self.returncode = -_WSTOPSIG(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py
index 2e5448d344..44d721db24 100644
--- a/lib-python/2.7/sysconfig.py
+++ b/lib-python/2.7/sysconfig.py
@@ -137,6 +137,11 @@ if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
+# PC/VS9.0/amd64
+if (os.name == "nt"
+ and os.path.basename(os.path.dirname(os.path.dirname(_PROJECT_BASE))).lower() == "pc"
+ and os.path.basename(os.path.dirname(_PROJECT_BASE)).lower() == "vs9.0"):
+ _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
diff --git a/lib-python/2.7/tabnanny.py b/lib-python/2.7/tabnanny.py
index 76665ac91a..e48b72a229 100755
--- a/lib-python/2.7/tabnanny.py
+++ b/lib-python/2.7/tabnanny.py
@@ -59,7 +59,7 @@ def main():
class NannyNag(Exception):
"""
- Raised by tokeneater() if detecting an ambiguous indent.
+ Raised by process_tokens() if detecting an ambiguous indent.
Captured and handled in check().
"""
def __init__(self, lineno, msg, line):
diff --git a/lib-python/2.7/telnetlib.py b/lib-python/2.7/telnetlib.py
index 2eaa8e3709..d0246c0aea 100644
--- a/lib-python/2.7/telnetlib.py
+++ b/lib-python/2.7/telnetlib.py
@@ -317,7 +317,7 @@ class Telnet:
ready = poller.poll(None if timeout is None
else 1000 * call_timeout)
except select.error as e:
- if e.errno == errno.EINTR:
+ if e[0] == errno.EINTR:
if timeout is not None:
elapsed = time() - time_start
call_timeout = timeout-elapsed
@@ -688,7 +688,7 @@ class Telnet:
ready = poller.poll(None if timeout is None
else 1000 * call_timeout)
except select.error as e:
- if e.errno == errno.EINTR:
+ if e[0] == errno.EINTR:
if timeout is not None:
elapsed = time() - time_start
call_timeout = timeout-elapsed
diff --git a/lib-python/2.7/test/__main__.py b/lib-python/2.7/test/__main__.py
new file mode 100644
index 0000000000..d5fbe159d7
--- /dev/null
+++ b/lib-python/2.7/test/__main__.py
@@ -0,0 +1,3 @@
+from test import regrtest
+
+regrtest.main_in_temp_cwd()
diff --git a/lib-python/2.7/test/allsans.pem b/lib-python/2.7/test/allsans.pem
index 3ee4f59513..6eebde7a57 100644
--- a/lib-python/2.7/test/allsans.pem
+++ b/lib-python/2.7/test/allsans.pem
@@ -1,37 +1,81 @@
-----BEGIN PRIVATE KEY-----
-MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAOoy7/QOtTjQ0niE
-6uDcTwtkC0R2Tvy1AjVnXohCntZfdzbTGDoYTgXSOLsP8A697jUiJ8VCePGH50xG
-Z4DKnAF3a9O3a9nr2pLXb0iY3XOMv+YEBii7CfI+3oxFYgCl0sMgHzDD2ZTVYAsm
-DWgLUVsE2gHEccRwrM2tPf2EgR+FAgMBAAECgYEA3qyfyYVSeTrTYxO93x6ZaVMu
-A2IZp9zSxMQL9bKiI2GRj+cV2ebSCGbg2btFnD6qBor7FWsmYz+8g6FNN/9sY4az
-61rMqMtQvLBe+7L8w70FeTze4qQ4Y1oQri0qD6tBWhDVlpnbI5Py9bkZKD67yVUk
-elcEA/5x4PrYXkuqsAECQQD80NjT0mDvaY0JOOaQFSEpMv6QiUA8GGX8Xli7IoKb
-tAolPG8rQBa+qSpcWfDMTrWw/aWHuMEEQoP/bVDH9W4FAkEA7SYQbBAKnojZ5A3G
-kOHdV7aeivRQxQk/JN8Fb8oKB9Csvpv/BsuGxPKXHdhFa6CBTTsNRtHQw/szPo4l
-xMIjgQJAPoMxqibR+0EBM6+TKzteSL6oPXsCnBl4Vk/J5vPgkbmR7KUl4+7j8N8J
-b2554TrxKEN/w7CGYZRE6UrRd7ATNQJAWD7Yz41sli+wfPdPU2xo1BHljyl4wMk/
-EPZYbI/PCbdyAH/F935WyQTIjNeEhZc1Zkq6FwdOWw8ns3hrv3rKgQJAHXv1BqUa
-czGPIFxX2TNoqtcl6/En4vrxVB1wzsfzkkDAg98kBl7qsF+S3qujSzKikjeaVbI2
-/CyWR2P3yLtOmA==
+MIIG/QIBADANBgkqhkiG9w0BAQEFAASCBucwggbjAgEAAoIBgQCg/pM6dP7BTFNc
+qe6wIJIBB7HjwL42bp0vjcCVl4Z3MRWFswYpfxy+o+8+PguMp4K6zndA5fwNkK/H
+3HmtanncUfPqnV0usN0NHQGh/f9xRoNmB1q2L7kTuO99o0KLQgvonRT2snf8rq9n
+tPRzhHUGYhog7zzNxetYV309PHpPr19BcKepDtM5RMk2aBnoN5vtItorjXiDosFm
+6o5wQHrcupcVydszba6P75BEbc1XIWvq2Fv8muaw4pCe81QYINyLqgcPNO/nF3Os
+5EI4HKjCNRSCOhOcWqYctXLXN9lBdMBBvQc3zDmYzh1eIZewzZXPVEQT33xPkhxz
+HNmhcIctpWX4LTRF6FulkcbeuZDga3gkZYJf/M6IpU1WYXr6q8sNxbgmRRX/NuHo
+V9oDwBzLG07rKUiqRHfjGqoCRmmVeVYpryvXUNjHGH0nlVzz/8lTUxAnJorO3Fdc
+I+6zKLUPICdAlvz51AH6yopgPFhrdgA0pVzPO6L5G8SRQCxKhAUCAwEAAQKCAYAa
+2jtOTcNMFGH3G7TfFZ+kolbuaPCQ/aQkEV2k1dAswzgWw8RsWXI+7fLyi8C7Zhks
+9VD4tyNyU8at7D0zSoYm1Fh9sl+fcQp9rG/gSBA6IYu7EdD0gEM7YeY4K2nm9k4s
+Lz8W4q+WqsBA6PK47cfjF6vKAH1AyRk28+jEtPiln9egf5zHWtyqOanh9D0V+Wh9
+hgmjqAYI1rWxZ7/4Qxj7Bfg7Px7blhi+kzOZ5kKQnNd2JT46hM+jgzah/G3zVE+R
+FFW6ksmJgZ+dCuSbE7HEJmKms1CWq/1Cll0A3uy4JTDZOrK4KcZQ9UjjWJWvlXQm
+uNXSSAp1k287DLVUm9c22SDeXpb9PyKmzyvJvVmMqqBx6QzHZ/L7WPzpUWAoLcU+
+ZHT7vggDymkIO+fcRbUzv8s5R7RnLbcBga51/5OCUvAWDoJXNw0qwYZOIbfTnQgs
+8xbCmbMzllyYM/dK3GxQAwfn8Hzk+DbS/NObMjHLCWLfYeUvutXJSNly6Ny+ZcEC
+gcEAzo5Y1UFOfBX4MZLIZ69LfgaXj9URobMwqlEwKil8pWQMa951ga3moLt91nOe
+SAQz3meFTBX/VAb2ZHLeIf3FoNkiIx48PkxsR/hhLHpvl26zEg3yXs3tv0IFBx2R
+EEnLNpQaAQFR9S1yDOaG2rsb17ZDKyp9isDpAENHAmEnT/XJn+Dc0SOH1EVDjUeM
+JqToAF/fjIx/RF4oUJCAgOPBMlRy5ywLQk8uDi6ft0NCzzCi0eCuk1Ty3KzWFGwx
+7cYRAoHBAMeIPCzHG3No4JGUFunslVwo5TuC7maO6qYKbq0OyvwWfL4b7gjrMBR9
+d5WyZlp/Vf40O463dg8x8qPNOFWp49f3hxTvvfnt2/m3+CQuDOLfqBbHufZApP1J
+U9MubUNnDFHHeJ9l0tg2nhiLw24GHeMARZhA/BimMQPY0OpZPpLVxAUArM2EB7hI
+glQpYCtdXhqwl1pl0u3TZ08y3BXYNg9BycdpGRMWSsAwsApJRgNuI/dfDKu0uMYF
+/pUhXVPatQKBwGgLpAun3dT7bA3sli5ESo6s22OEPGFrVbQ1OUHDrBnTj742TJKJ
++oY0a2q+ypgUJdx94NM2sWquJybqBaKxpf8j4OI3tLjc3h5SqwAwnE13YZRSmifP
+K1cP9mBjMFM4GLjhWUfwVkxeG/kLlhpP7fJ2yNbRjHN8QOH1AavdLGRGts1mA1UF
+xMHUMfbUd3Bv2L13ja/KhcD2fPA4GcLS9tpXV5nCwdkg8V4LdkBmDR04rotx1f44
+6Czokt2usmfHQQKBwFkufxbUd2SB/72Rnxw27hse/DY5My0Lu70y9HzNG9TIiEDA
+YwgBdp/x5D04W58fQuQ3nFcRkOcBwB2OYBuJr5ibvfiRnyvSMHvQykwBeSj+Jjbo
+VinGgvfiimDdY2C48jyrFzLHZBHXd5oo/dRzT3Bicri2cvbhcQ7zHY1hDiK7AL3r
+q1DALmMjpXzQcXdwZ9suCrgQwtIhpw8zAEOTO7ZeBT3nr5lkYUy9djFixrRJyjGK
+fjNQtzVrAHrPStNr8QKBwQDCC0zhsCnTv4sAJmW7LL6Ayd5rbWhUZ6px1xY0yHMA
+hehj+xbaiC6cfVr5Rg0ncvaa8AExu4kXpVsupTyNwvC4NgzLHtfBw6WUdOnd1awE
+kSrDtDReBt2wByAcQwttQsrJ1/Pt6zcNJJI4Z9s8G4NTcQWJwUhU20N55JQKR//l
+OQJqhq9NVhte/ctDjVwOHs/OhDNvxsAWxdjnf/O2up0os+M2bFkmHuaVW0vQbqTQ
+mw7Vbzk2Ff5oT6E3kbC8Ur4=
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
-MIIDcjCCAtugAwIBAgIJAN5dc9TOWjB7MA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV
+MIIHMDCCBZigAwIBAgIJALVVA6v9zJS5MA0GCSqGSIb3DQEBCwUAMF0xCzAJBgNV
BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u
-IFNvZnR3YXJlIEZvdW5kYXRpb24xEDAOBgNVBAMMB2FsbHNhbnMwHhcNMTYwODA1
-MTAyMTExWhcNMjYwODAzMTAyMTExWjBdMQswCQYDVQQGEwJYWTEXMBUGA1UEBwwO
+IFNvZnR3YXJlIEZvdW5kYXRpb24xEDAOBgNVBAMMB2FsbHNhbnMwHhcNMTgwODI5
+MTQyMzE3WhcNMjgwODI2MTQyMzE3WjBdMQswCQYDVQQGEwJYWTEXMBUGA1UEBwwO
Q2FzdGxlIEFudGhyYXgxIzAhBgNVBAoMGlB5dGhvbiBTb2Z0d2FyZSBGb3VuZGF0
-aW9uMRAwDgYDVQQDDAdhbGxzYW5zMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
-gQDqMu/0DrU40NJ4hOrg3E8LZAtEdk78tQI1Z16IQp7WX3c20xg6GE4F0ji7D/AO
-ve41IifFQnjxh+dMRmeAypwBd2vTt2vZ69qS129ImN1zjL/mBAYouwnyPt6MRWIA
-pdLDIB8ww9mU1WALJg1oC1FbBNoBxHHEcKzNrT39hIEfhQIDAQABo4IBODCCATQw
-ggEwBgNVHREEggEnMIIBI4IHYWxsc2Fuc6AeBgMqAwSgFwwVc29tZSBvdGhlciBp
-ZGVudGlmaWVyoDUGBisGAQUCAqArMCmgEBsOS0VSQkVST1MuUkVBTE2hFTAToAMC
-AQGhDDAKGwh1c2VybmFtZYEQdXNlckBleGFtcGxlLm9yZ4IPd3d3LmV4YW1wbGUu
-b3JnpGcwZTELMAkGA1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMw
-IQYDVQQKDBpQeXRob24gU29mdHdhcmUgRm91bmRhdGlvbjEYMBYGA1UEAwwPZGly
-bmFtZSBleGFtcGxlhhdodHRwczovL3d3dy5weXRob24ub3JnL4cEfwAAAYcQAAAA
-AAAAAAAAAAAAAAAAAYgEKgMEBTANBgkqhkiG9w0BAQsFAAOBgQAy16h+F+nOmeiT
-VWR0fc8F/j6FcadbLseAUaogcC15OGxCl4UYpLV88HBkABOoGCpP155qwWTwOrdG
-iYPGJSusf1OnJEbvzFejZf6u078bPd9/ZL4VWLjv+FPGkjd+N+/OaqMvgj8Lu99f
-3Y/C4S7YbHxxwff6C6l2Xli+q6gnuQ==
+aW9uMRAwDgYDVQQDDAdhbGxzYW5zMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIB
+igKCAYEAoP6TOnT+wUxTXKnusCCSAQex48C+Nm6dL43AlZeGdzEVhbMGKX8cvqPv
+Pj4LjKeCus53QOX8DZCvx9x5rWp53FHz6p1dLrDdDR0Bof3/cUaDZgdati+5E7jv
+faNCi0IL6J0U9rJ3/K6vZ7T0c4R1BmIaIO88zcXrWFd9PTx6T69fQXCnqQ7TOUTJ
+NmgZ6Deb7SLaK414g6LBZuqOcEB63LqXFcnbM22uj++QRG3NVyFr6thb/JrmsOKQ
+nvNUGCDci6oHDzTv5xdzrORCOByowjUUgjoTnFqmHLVy1zfZQXTAQb0HN8w5mM4d
+XiGXsM2Vz1REE998T5IccxzZoXCHLaVl+C00RehbpZHG3rmQ4Gt4JGWCX/zOiKVN
+VmF6+qvLDcW4JkUV/zbh6FfaA8AcyxtO6ylIqkR34xqqAkZplXlWKa8r11DYxxh9
+J5Vc8//JU1MQJyaKztxXXCPusyi1DyAnQJb8+dQB+sqKYDxYa3YANKVczzui+RvE
+kUAsSoQFAgMBAAGjggLxMIIC7TCCATAGA1UdEQSCAScwggEjggdhbGxzYW5zoB4G
+AyoDBKAXDBVzb21lIG90aGVyIGlkZW50aWZpZXKgNQYGKwYBBQICoCswKaAQGw5L
+RVJCRVJPUy5SRUFMTaEVMBOgAwIBAaEMMAobCHVzZXJuYW1lgRB1c2VyQGV4YW1w
+bGUub3Jngg93d3cuZXhhbXBsZS5vcmekZzBlMQswCQYDVQQGEwJYWTEXMBUGA1UE
+BwwOQ2FzdGxlIEFudGhyYXgxIzAhBgNVBAoMGlB5dGhvbiBTb2Z0d2FyZSBGb3Vu
+ZGF0aW9uMRgwFgYDVQQDDA9kaXJuYW1lIGV4YW1wbGWGF2h0dHBzOi8vd3d3LnB5
+dGhvbi5vcmcvhwR/AAABhxAAAAAAAAAAAAAAAAAAAAABiAQqAwQFMA4GA1UdDwEB
+/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/
+BAIwADAdBgNVHQ4EFgQUoLHAHNTWrHkSCUYkhn5NH0S40CAwgY8GA1UdIwSBhzCB
+hIAUoLHAHNTWrHkSCUYkhn5NH0S40CChYaRfMF0xCzAJBgNVBAYTAlhZMRcwFQYD
+VQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9uIFNvZnR3YXJlIEZv
+dW5kYXRpb24xEDAOBgNVBAMMB2FsbHNhbnOCCQC1VQOr/cyUuTCBgwYIKwYBBQUH
+AQEEdzB1MDwGCCsGAQUFBzAChjBodHRwOi8vdGVzdGNhLnB5dGhvbnRlc3QubmV0
+L3Rlc3RjYS9weWNhY2VydC5jZXIwNQYIKwYBBQUHMAGGKWh0dHA6Ly90ZXN0Y2Eu
+cHl0aG9udGVzdC5uZXQvdGVzdGNhL29jc3AvMEMGA1UdHwQ8MDowOKA2oDSGMmh0
+dHA6Ly90ZXN0Y2EucHl0aG9udGVzdC5uZXQvdGVzdGNhL3Jldm9jYXRpb24uY3Js
+MA0GCSqGSIb3DQEBCwUAA4IBgQAeKJKycO2DES98gyR2e/GzPYEw87cCS0cEpiiP
+3CEUgzfEbF0X89GDKEey4H3Irvosbvt2hEcf2RNpahLUL/fUv53bDmHNmL8qJg5E
+UJVMOHvOpSOjqoqeRuSyG0GnnAuUwcxdrZY6UzLdslhuq9F8UjgHr6KSMx56G9uK
+LmTy5njMab0in2xL/YRX/0nogK3BHqpUHrfCdEYZkciRxtAa+OPpWn4dcZi+Fpf7
+ZYSgPLNt+djtFDMIAk5Bo+XDaQdW3dhF0w44enrGAOV0xPE+/jOuenNhKBafjuNb
+lkeSr45+QZsi1rd18ny8z3uuaGqIAziFgmllZOH2D8giTn6+5jZcCNZCoGKUkPI9
+l/GMWwxg4HQYYlZcsZzTCem9Rb2XcrasAbmhFapMtR+QAwSed5vKE7ZdtQhj74kB
+7Q0E7Lkgpp6BaObb2As8/f0K/UlSVSvrYk+i3JT9wK/qqkRGxsTFEF7N9t0rKu8y
+4JdQDtZCI552MsFvYW6m+IOYgxg=
-----END CERTIFICATE-----
diff --git a/lib-python/2.7/test/bisect_cmd.py b/lib-python/2.7/test/bisect_cmd.py
new file mode 100755
index 0000000000..5028ed214f
--- /dev/null
+++ b/lib-python/2.7/test/bisect_cmd.py
@@ -0,0 +1,181 @@
+#!/usr/bin/env python2
+"""
+Command line tool to bisect failing CPython tests.
+
+Find the test_os test method which alters the environment:
+
+ ./python -m test.bisect_cmd --fail-env-changed test_os
+
+Find a reference leak in "test_os", write the list of failing tests into the
+"bisect" file:
+
+ ./python -m test.bisect_cmd -o bisect -R 3:3 test_os
+
+Load an existing list of tests from a file using -i option:
+
+ ./python -m test --list-cases -m FileTests test_os > tests
+ ./python -m test.bisect_cmd -i tests test_os
+"""
+from __future__ import print_function
+
+import argparse
+import datetime
+import os.path
+import math
+import random
+import subprocess
+import sys
+import tempfile
+import time
+
+
+def write_tests(filename, tests):
+ with open(filename, "w") as fp:
+ for name in tests:
+ print(name, file=fp)
+ fp.flush()
+
+
+def write_output(filename, tests):
+ if not filename:
+ return
+ print("Write %s tests into %s" % (len(tests), filename))
+ write_tests(filename, tests)
+ return filename
+
+
+def format_shell_args(args):
+ return ' '.join(args)
+
+
+def list_cases(args):
+ cmd = [sys.executable, '-m', 'test', '--list-cases']
+ cmd.extend(args.test_args)
+ proc = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ universal_newlines=True)
+ try:
+ stdout = proc.communicate()[0]
+ except:
+ proc.stdout.close()
+ proc.kill()
+ proc.wait()
+ raise
+ exitcode = proc.wait()
+ if exitcode:
+ cmd = format_shell_args(cmd)
+ print("Failed to list tests: %s failed with exit code %s"
+ % (cmd, exitcode))
+ sys.exit(exitcode)
+ tests = stdout.splitlines()
+ return tests
+
+
+def run_tests(args, tests, huntrleaks=None):
+ tmp = tempfile.mktemp()
+ try:
+ write_tests(tmp, tests)
+
+ cmd = [sys.executable, '-m', 'test', '--matchfile', tmp]
+ cmd.extend(args.test_args)
+ print("+ %s" % format_shell_args(cmd))
+ proc = subprocess.Popen(cmd)
+ try:
+ exitcode = proc.wait()
+ except:
+ proc.kill()
+ proc.wait()
+ raise
+ return exitcode
+ finally:
+ if os.path.exists(tmp):
+ os.unlink(tmp)
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-i', '--input',
+ help='Test names produced by --list-tests written '
+ 'into a file. If not set, run --list-tests')
+ parser.add_argument('-o', '--output',
+ help='Result of the bisection')
+ parser.add_argument('-n', '--max-tests', type=int, default=1,
+ help='Maximum number of tests to stop the bisection '
+ '(default: 1)')
+ parser.add_argument('-N', '--max-iter', type=int, default=100,
+ help='Maximum number of bisection iterations '
+ '(default: 100)')
+ # FIXME: document that following arguments are test arguments
+
+ args, test_args = parser.parse_known_args()
+ args.test_args = test_args
+ return args
+
+
+def main():
+ args = parse_args()
+
+ if args.input:
+ with open(args.input) as fp:
+ tests = [line.strip() for line in fp]
+ else:
+ tests = list_cases(args)
+
+ print("Start bisection with %s tests" % len(tests))
+ print("Test arguments: %s" % format_shell_args(args.test_args))
+ print("Bisection will stop when getting %s or less tests "
+ "(-n/--max-tests option), or after %s iterations "
+ "(-N/--max-iter option)"
+ % (args.max_tests, args.max_iter))
+ output = write_output(args.output, tests)
+ print()
+
+ start_time = time.time()
+ iteration = 1
+ try:
+ while len(tests) > args.max_tests and iteration <= args.max_iter:
+ ntest = len(tests)
+ ntest = max(ntest // 2, 1)
+ subtests = random.sample(tests, ntest)
+
+ print("[+] Iteration %s: run %s tests/%s"
+ % (iteration, len(subtests), len(tests)))
+ print()
+
+ exitcode = run_tests(args, subtests)
+
+ print("ran %s tests/%s" % (ntest, len(tests)))
+ print("exit", exitcode)
+ if exitcode:
+ print("Tests failed: use this new subtest")
+ tests = subtests
+ output = write_output(args.output, tests)
+ else:
+ print("Tests succeeded: skip this subtest, try a new subbset")
+ print()
+ iteration += 1
+ except KeyboardInterrupt:
+ print()
+ print("Bisection interrupted!")
+ print()
+
+ print("Tests (%s):" % len(tests))
+ for test in tests:
+ print("* %s" % test)
+ print()
+
+ if output:
+ print("Output written into %s" % output)
+
+ dt = math.ceil(time.time() - start_time)
+ if len(tests) <= args.max_tests:
+ print("Bisection completed in %s iterations and %s"
+ % (iteration, datetime.timedelta(seconds=dt)))
+ sys.exit(1)
+ else:
+ print("Bisection failed after %s iterations and %s"
+ % (iteration, datetime.timedelta(seconds=dt)))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib-python/2.7/test/crashers/warnings_del_crasher.py b/lib-python/2.7/test/crashers/warnings_del_crasher.py
new file mode 100644
index 0000000000..02e6805202
--- /dev/null
+++ b/lib-python/2.7/test/crashers/warnings_del_crasher.py
@@ -0,0 +1,29 @@
+"""
+Expose a race in the _warnings module, which is the C backend for the
+warnings module. The "_warnings" module tries to access attributes of the
+"warnings" module (because of the API it has to support), but doing so
+during interpreter shutdown is problematic. Specifically, the call to
+PyImport_GetModuleDict() in Python/_warnings.c:get_warnings_attr will
+abort() if the modules dict has already been cleaned up.
+
+This crasher is timing-dependent, and more threads (NUM_THREADS) may be
+necessary to expose it reliably on different systems.
+"""
+
+import threading
+import warnings
+
+NUM_THREADS = 10
+
+class WarnOnDel(object):
+ def __del__(self):
+ warnings.warn("oh no something went wrong", UserWarning)
+
+def do_work():
+ while True:
+ w = WarnOnDel()
+
+for i in range(NUM_THREADS):
+ t = threading.Thread(target=do_work)
+ t.setDaemon(1)
+ t.start()
diff --git a/lib-python/2.7/test/dh1024.pem b/lib-python/2.7/test/dh1024.pem
deleted file mode 100644
index a391176b5f..0000000000
--- a/lib-python/2.7/test/dh1024.pem
+++ /dev/null
@@ -1,7 +0,0 @@
------BEGIN DH PARAMETERS-----
-MIGHAoGBAIbzw1s9CT8SV5yv6L7esdAdZYZjPi3qWFs61CYTFFQnf2s/d09NYaJt
-rrvJhIzWavqnue71qXCf83/J3nz3FEwUU/L0mGyheVbsSHiI64wUo3u50wK5Igo0
-RNs/LD0irs7m0icZ//hijafTU+JOBiuA8zMI+oZfU7BGuc9XrUprAgEC
------END DH PARAMETERS-----
-
-Generated with: openssl dhparam -out dh1024.pem 1024
diff --git a/lib-python/2.7/test/ffdh3072.pem b/lib-python/2.7/test/ffdh3072.pem
new file mode 100644
index 0000000000..ad69bac8d0
--- /dev/null
+++ b/lib-python/2.7/test/ffdh3072.pem
@@ -0,0 +1,41 @@
+ DH Parameters: (3072 bit)
+ prime:
+ 00:ff:ff:ff:ff:ff:ff:ff:ff:ad:f8:54:58:a2:bb:
+ 4a:9a:af:dc:56:20:27:3d:3c:f1:d8:b9:c5:83:ce:
+ 2d:36:95:a9:e1:36:41:14:64:33:fb:cc:93:9d:ce:
+ 24:9b:3e:f9:7d:2f:e3:63:63:0c:75:d8:f6:81:b2:
+ 02:ae:c4:61:7a:d3:df:1e:d5:d5:fd:65:61:24:33:
+ f5:1f:5f:06:6e:d0:85:63:65:55:3d:ed:1a:f3:b5:
+ 57:13:5e:7f:57:c9:35:98:4f:0c:70:e0:e6:8b:77:
+ e2:a6:89:da:f3:ef:e8:72:1d:f1:58:a1:36:ad:e7:
+ 35:30:ac:ca:4f:48:3a:79:7a:bc:0a:b1:82:b3:24:
+ fb:61:d1:08:a9:4b:b2:c8:e3:fb:b9:6a:da:b7:60:
+ d7:f4:68:1d:4f:42:a3:de:39:4d:f4:ae:56:ed:e7:
+ 63:72:bb:19:0b:07:a7:c8:ee:0a:6d:70:9e:02:fc:
+ e1:cd:f7:e2:ec:c0:34:04:cd:28:34:2f:61:91:72:
+ fe:9c:e9:85:83:ff:8e:4f:12:32:ee:f2:81:83:c3:
+ fe:3b:1b:4c:6f:ad:73:3b:b5:fc:bc:2e:c2:20:05:
+ c5:8e:f1:83:7d:16:83:b2:c6:f3:4a:26:c1:b2:ef:
+ fa:88:6b:42:38:61:1f:cf:dc:de:35:5b:3b:65:19:
+ 03:5b:bc:34:f4:de:f9:9c:02:38:61:b4:6f:c9:d6:
+ e6:c9:07:7a:d9:1d:26:91:f7:f7:ee:59:8c:b0:fa:
+ c1:86:d9:1c:ae:fe:13:09:85:13:92:70:b4:13:0c:
+ 93:bc:43:79:44:f4:fd:44:52:e2:d7:4d:d3:64:f2:
+ e2:1e:71:f5:4b:ff:5c:ae:82:ab:9c:9d:f6:9e:e8:
+ 6d:2b:c5:22:36:3a:0d:ab:c5:21:97:9b:0d:ea:da:
+ 1d:bf:9a:42:d5:c4:48:4e:0a:bc:d0:6b:fa:53:dd:
+ ef:3c:1b:20:ee:3f:d5:9d:7c:25:e4:1d:2b:66:c6:
+ 2e:37:ff:ff:ff:ff:ff:ff:ff:ff
+ generator: 2 (0x2)
+ recommended-private-length: 276 bits
+-----BEGIN DH PARAMETERS-----
+MIIBjAKCAYEA//////////+t+FRYortKmq/cViAnPTzx2LnFg84tNpWp4TZBFGQz
++8yTnc4kmz75fS/jY2MMddj2gbICrsRhetPfHtXV/WVhJDP1H18GbtCFY2VVPe0a
+87VXE15/V8k1mE8McODmi3fipona8+/och3xWKE2rec1MKzKT0g6eXq8CrGCsyT7
+YdEIqUuyyOP7uWrat2DX9GgdT0Kj3jlN9K5W7edjcrsZCwenyO4KbXCeAvzhzffi
+7MA0BM0oNC9hkXL+nOmFg/+OTxIy7vKBg8P+OxtMb61zO7X8vC7CIAXFjvGDfRaD
+ssbzSibBsu/6iGtCOGEfz9zeNVs7ZRkDW7w09N75nAI4YbRvydbmyQd62R0mkff3
+7lmMsPrBhtkcrv4TCYUTknC0EwyTvEN5RPT9RFLi103TZPLiHnH1S/9croKrnJ32
+nuhtK8UiNjoNq8Uhl5sN6todv5pC1cRITgq80Gv6U93vPBsg7j/VnXwl5B0rZsYu
+N///////////AgECAgIBFA==
+-----END DH PARAMETERS-----
diff --git a/lib-python/2.7/test/fork_wait.py b/lib-python/2.7/test/fork_wait.py
index 2646cbd581..b900463da6 100644
--- a/lib-python/2.7/test/fork_wait.py
+++ b/lib-python/2.7/test/fork_wait.py
@@ -13,8 +13,9 @@ the same application, the present example should work just fine. DC
"""
import os, sys, time, unittest
-import test.test_support as test_support
-thread = test_support.import_module('thread')
+import test.support as support
+
+threading = support.import_module('threading')
LONGSLEEP = 2
SHORTSLEEP = 0.5
@@ -23,8 +24,19 @@ NUM_THREADS = 4
class ForkWait(unittest.TestCase):
def setUp(self):
+ self._threading_key = support.threading_setup()
self.alive = {}
self.stop = 0
+ self.threads = []
+
+ def tearDown(self):
+ # Stop threads
+ self.stop = 1
+ for thread in self.threads:
+ thread.join()
+ thread = None
+ del self.threads[:]
+ support.threading_cleanup(*self._threading_key)
def f(self, id):
while not self.stop:
@@ -48,7 +60,9 @@ class ForkWait(unittest.TestCase):
def test_wait(self):
for i in range(NUM_THREADS):
- thread.start_new(self.f, (i,))
+ thread = threading.Thread(target=self.f, args=(i,))
+ thread.start()
+ self.threads.append(thread)
time.sleep(LONGSLEEP)
@@ -74,6 +88,3 @@ class ForkWait(unittest.TestCase):
else:
# Parent
self.wait_impl(cpid)
- # Tell threads to die
- self.stop = 1
- time.sleep(2*SHORTSLEEP) # Wait for threads to die
diff --git a/lib-python/2.7/test/inspect_fodder.py b/lib-python/2.7/test/inspect_fodder.py
index 5c87ae6f82..548765c797 100644
--- a/lib-python/2.7/test/inspect_fodder.py
+++ b/lib-python/2.7/test/inspect_fodder.py
@@ -56,3 +56,9 @@ class ParrotDroppings:
class FesteringGob(MalodorousPervert, ParrotDroppings):
pass
+
+currentframe = inspect.currentframe()
+try:
+ raise Exception()
+except:
+ tb = sys.exc_info()[2]
diff --git a/lib-python/2.7/test/keycert.passwd.pem b/lib-python/2.7/test/keycert.passwd.pem
index c330c36d8f..cbb3c3bccd 100644
--- a/lib-python/2.7/test/keycert.passwd.pem
+++ b/lib-python/2.7/test/keycert.passwd.pem
@@ -1,45 +1,45 @@
------BEGIN ENCRYPTED PRIVATE KEY-----
-MIIHbTBXBgkqhkiG9w0BBQ0wSjApBgkqhkiG9w0BBQwwHAQIhD+rJdxqb6ECAggA
-MAwGCCqGSIb3DQIJBQAwHQYJYIZIAWUDBAEqBBDTdyjCP3riOSUfxix4aXEvBIIH
-ECGkbsFabrcFMZcplw5jHMaOlG7rYjUzwDJ80JM8uzbv2Jb8SvNlns2+xmnEvH/M
-mNvRmnXmplbVjH3XBMK8o2Psnr2V/a0j7/pgqpRxHykG+koOY4gzdt3MAg8JPbS2
-hymSl+Y5EpciO3xLfz4aFL1ZNqspQbO/TD13Ij7DUIy7xIRBMp4taoZCrP0cEBAZ
-+wgu9m23I4dh3E8RUBzWyFFNic2MVVHrui6JbHc4dIHfyKLtXJDhUcS0vIC9PvcV
-jhorh3UZC4lM+/jjXV5AhzQ0VrJ2tXAUX2dA144XHzkSH2QmwfnajPsci7BL2CGC
-rjyTy4NfB/lDwU+55dqJZQSKXMxAapJMrtgw7LD5CKQcN6zmfhXGssJ7HQUXKkaX
-I1YOFzuUD7oo56BVCnVswv0jX9RxrE5QYNreMlOP9cS+kIYH65N+PAhlURuQC14K
-PgDkHn5knSa2UQA5tc5f7zdHOZhGRUfcjLP+KAWA3nh+/2OKw/X3zuPx75YT/FKe
-tACPw5hjEpl62m9Xa0eWepZXwqkIOkzHMmCyNCsbC0mmRoEjmvfnslfsmnh4Dg/c
-4YsTYMOLLIeCa+WIc38aA5W2lNO9lW0LwLhX1rP+GRVPv+TVHXlfoyaI+jp0iXrJ
-t3xxT0gaiIR/VznyS7Py68QV/zB7VdqbsNzS7LdquHK1k8+7OYiWjY3gqyU40Iu2
-d1eSnIoDvQJwyYp7XYXbOlXNLY+s1Qb7yxcW3vXm0Bg3gKT8r1XHWJ9rj+CxAn5r
-ysfkPs1JsesxzzQjwTiDNvHnBnZnwxuxfBr26ektEHmuAXSl8V6dzLN/aaPjpTj4
-CkE7KyqX3U9bLkp+ztl4xWKEmW44nskzm0+iqrtrxMyTfvvID4QrABjZL4zmWIqc
-e3ZfA3AYk9VDIegk/YKGC5VZ8YS7ZXQ0ASK652XqJ7QlMKTxxV7zda6Fp4uW6/qN
-ezt5wgbGGhZQXj2wDQmWNQYyG/juIgYTpCUA54U5XBIjuR6pg+Ytm0UrvNjsUoAC
-wGelyqaLDq8U8jdIFYVTJy9aJjQOYXjsUJ0dZN2aGHSlju0ZGIZc49cTIVQ9BTC5
-Yc0Vlwzpl+LuA25DzKZNSb/ci0lO/cQGJ2uXQQgaNgdsHlu8nukENGJhnIzx4fzK
-wEh3yHxhTRCzPPwDfXmx0IHXrPqJhSpAgaXBVIm8OjvmMxO+W75W4uLfNY/B7e2H
-3cjklGuvkofOf7sEOrGUYf4cb6Obg8FpvHgpKo5Twwmoh/qvEKckBFqNhZXDDl88
-GbGlSEgyaAV1Ig8s1NJKBolWFa0juyPAwJ8vT1T4iwW7kQ7KXKt2UNn96K/HxkLu
-pikvukz8oRHMlfVHa0R48UB1fFHwZLzPmwkpu6ancIxk3uO3yfhf6iDk3bmnyMlz
-g3k/b6MrLYaOVByRxay85jH3Vvgqfgn6wa6BJ7xQ81eZ8B45gFuTH0J5JtLL7SH8
-darRPLCYfA+Ums9/H6pU5EXfd3yfjMIbvhCXHkJrrljkZ+th3p8dyto6wmYqIY6I
-qR9sU+o6DhRaiP8tCICuhHxQpXylUM6WeJkJwduTJ8KWIvzsj4mReIKOl/oC2jSd
-gIdKhb9Q3zj9ce4N5m6v66tyvjxGZ+xf3BvUPDD+LwZeXgf7OBsNVbXzQbzto594
-nbCzPocFi3gERE50ru4K70eQCy08TPG5NpOz+DDdO5vpAuMLYEuI7O3L+3GjW40Q
-G5bu7H5/i7o/RWR67qhG/7p9kPw3nkUtYgnvnWaPMIuTfb4c2d069kjlfgWjIbbI
-tpSKmm5DHlqTE4/ECAbIEDtSaw9dXHCdL3nh5+n428xDdGbjN4lT86tfu17EYKzl
-ydH1RJ1LX3o3TEj9UkmDPt7LnftvwybMFEcP7hM2xD4lC++wKQs7Alg6dTkBnJV4
-5xU78WRntJkJTU7kFkpPKA0QfyCuSF1fAMoukDBkqUdOj6jE0BlJQlHk5iwgnJlt
-uEdkTjHZEjIUxWC6llPcAzaPNlmnD45AgfEW+Jn21IvutmJiQAz5lm9Z9PXaR0C8
-hXB6owRY67C0YKQwXhoNf6xQun2xGBGYy5rPEEezX1S1tUH5GR/KW1Lh+FzFqHXI
-ZEb5avfDqHKehGAjPON+Br7akuQ125M9LLjKuSyPaQzeeCAy356Xd7XzVwbPddbm
-9S9WSPqzaPgh10chIHoNoC8HMd33dB5j9/Q6jrbU/oPlptu/GlorWblvJdcTuBGI
-IVn45RFnkG8hCz0GJSNzW7+70YdESQbfJW79vssWMaiSjFE0pMyFXrFR5lBywBTx
-PiGEUWtvrKG94X1TMlGUzDzDJOQNZ9dT94bonNe9pVmP5BP4/DzwwiWh6qrzWk6p
-j8OE4cfCSh2WvHnhJbH7/N0v+JKjtxeIeJ16jx/K2oK5
------END ENCRYPTED PRIVATE KEY-----
+-----BEGIN RSA PRIVATE KEY-----
+Proc-Type: 4,ENCRYPTED
+DEK-Info: DES-EDE3-CBC,D134E931C96D9DEC
+
+nuGFEej7vIjkYWSMz5OJeVTNntDRQi6ZM4DBm3g8T7i/0odr3WFqGMMKZcIhLYQf
+rgRq7RSKtrJ1y5taVucMV+EuCjyfzDo0TsYt+ZrXv/D08eZhjRmkhoHnGVF0TqQm
+nQEXM/ERT4J2RM78dnG+homMkI76qOqxgGbRqQqJo6AiVRcAZ45y8s96bru2TAB8
++pWjO/v0Je7AFVdwSU52N8OOY6uoSAygW+0UY1WVxbVGJF2XfRsNpPX+YQHYl6e+
+3xM5XBVCgr6kmdAyub5qUJ38X3TpdVGoR0i+CVS9GTr2pSRib1zURAeeHnlqiUZM
+4m0Gn9s72nJevU1wxED8pwOhR8fnHEmMKGD2HPhKoOCbzDhwwBZO27TNa1uWeM3f
+M5oixKDi2PqMn3y2cDx1NjJtP661688EcJ5a2Ih9BgO9xpnhSyzBWEKcAn0tJB0H
+/56M0FW6cdOOIzMveGGL7sHW5E+iOdI1n5e7C6KJUzew78Y9qJnhS53EdI6qTz9R
+wsIsj1i070Fk6RbPo6zpLlF6w7Zj8GlZaZA7OZZv9wo5VEV/0ST8gmiiBOBc4C6Y
+u9hyLIIu4dFEBKyQHRvBnQSLNpKx6or1OGFDVBay2In9Yh2BHh1+vOj/OIz/wq48
+EHOIV27fRJxLu4jeK5LIGDhuPnMJ8AJYQ0bQOUP6fd7p+TxWkAQZPB/Dx/cs3hxr
+nFEdzx+eO+IAsObx/b1EGZyEJyETBslu4GwYX7/KK3HsJhDJ1bdZ//28jOCaoir6
+ZOMT72GRwmVoQTJ0XpccfjHfKJDRLT7C1xvzo4Eibth0hpTZkA75IUYUp6qK/PuJ
+kH/qdiC7QIkRKtsrawW4vEDna3YtxIYhQqz9+KwO6u/0gzooZtv1RU4U3ifMDB5u
+5P5GAzACRqlY8QYBkM869lvWqzQPHvybC4ak9Yx6/heMO9ddjdIW9BaK8BLxvN/6
+UCD936Y4fWltt09jHZIoxWFykouBwmd7bXooNYXmDRNmjTdVhKJuOEOQw8hDzx7e
+pWFJ9Z/V4Qm1tvXbCD7QFqMCDoY3qFvVG8DBqXpmxe1yPfz21FWrT7IuqDXAD3ns
+vxfN/2a+Cy04U9FBNVCvWqWIs5AgNpdCMJC2FlXKTy+H3/7rIjNyFyvbX0vxIXtK
+liOVNXiyVM++KZXqktqMUDlsJENmIHV9B046luqbgW018fHkyEYlL3iRZGbYegwr
+XO9VVIKVPw1BEvJ8VNdGFGuZGepd8qX2ezfYADrNR+4t85HDm8inbjTobSjWuljs
+ftUNkOeCHqAvWCFQTLCfdykvV08EJfVY79y7yFPtfRV2gxYokXFifjo3su9sVQr1
+UiIS5ZAsIC1hBXWeXoBN7QVTkFi7Yto6E1q2k10LiT3obpUUUQ/oclhrJOCJVjrS
+oRcj2QBy8OT4T9slJr5maTWdgd7Lt6+I6cGQXPaDvjGOJl0eBYM14vhx4rRQWytJ
+k07hhHFO4+9CGCuHS8AAy2gR6acYFWt2ZiiNZ0z/iPIHNK4YEyy9aLf6uZH/KQjE
+jmHToo7XD6QvCAEC5qTHby3o3LfHIhyZi/4L+AhS4FKUHF6M0peeyYt4z3HaK2d2
+N6mHLPdjwNjra7GOmcns4gzcrdfoF+R293KpPal4PjknvR3dZL4kKP/ougTAM5zv
+qDIvRbkHzjP8ChTpoLcJsNVXykNcNkjcSi0GHtIpYjh6QX6P2uvR/S4+Bbb9p9rn
+hIy/ovu9tWN2hiPxGPe6torF6BulAxsTYlDercC204AyzsrdA0pr6HBgJH9C6ML1
+TchwodbFJqn9rSv91i1liusAGoOvE81AGBdrXY7LxfSNhYY1IK6yR/POJPTd53sA
+uX2/j6Rtoksd/2BHPM6AUnI/2B9slhuzWX2aCtWLeuwvXDS6rYuTigaQmLkzTRfM
+dlMI3s9KLXxgi5YVumUZleJWXwBNP7KiKajd+VTSD+7WAhyhM5FIG5wVOaxmy4G2
+TyqZ/Ax9d2VEjTQHWvQlLPQ4Mp0EIz0aEl94K/S8CK8bJRH6+PRkar+dJi1xqlL+
+BYb42At9mEJ8odLlFikvNi1+t7jqXk5jRi5C0xFKx3nTtzoH2zNUeuA3R6vSocVK
+45jnze9IkKmxMlJ4loR5sgszdpDCD3kXqjtCcbMTmcrGyzJek3HSOTpiEORoTFOe
+Rhg6jH5lm+QcC263oipojS0qEQcnsWJP2CylNYMYHR9O/9NQxT3o2lsRHqZTMELV
+uQa/SFH+paQNbZOj8MRwPSqqiIxJFuLswKte1R+W7LKn1yBSM7Pp39lNbzGvJD2E
+YRfnCwFpJ54voVAuQ4jXJvigCW2qeCjXlxeD6K2j4eGJEEOmIjIW1wjubyBY6OI3
+-----END RSA PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIIEWTCCAsGgAwIBAgIJAJinz4jHSjLtMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV
BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u
@@ -66,4 +66,3 @@ jMqTFlmO7kpf/jpCSmamp3/JSEE1BJKHwQ6Ql4nzRA2N1mnvWH7Zxcv043gkHeAu
9Wc2uXpw9xF8itV4Uvcdr3dwqByvIqn7iI/gB+4l41e0u8OmH2MKOx4Nxlly5TNW
HcVKQHyOeyvnINuBAQ==
-----END CERTIFICATE-----
-
diff --git a/lib-python/2.7/test/list_tests.py b/lib-python/2.7/test/list_tests.py
index cd103e0900..166b88a9fe 100644
--- a/lib-python/2.7/test/list_tests.py
+++ b/lib-python/2.7/test/list_tests.py
@@ -45,14 +45,15 @@ class CommonTest(seq_tests.CommonTest):
self.assertEqual(str(a2), "[0, 1, 2, [...], 3]")
self.assertEqual(repr(a2), "[0, 1, 2, [...], 3]")
+ def test_repr_deep(self):
if test_support.check_impl_detail():
depth = sys.getrecursionlimit() + 100
else:
depth = 1000 * 1000 # should be enough to exhaust the stack
- l0 = []
+ a = self.type2test([])
for i in xrange(depth):
- l0 = [l0]
- self.assertRaises(RuntimeError, repr, l0)
+ a = self.type2test([a])
+ self.assertRaises(RuntimeError, repr, a)
def test_print(self):
d = self.type2test(xrange(200))
diff --git a/lib-python/2.7/test/lock_tests.py b/lib-python/2.7/test/lock_tests.py
index efc464f403..836f3e9a3d 100644
--- a/lib-python/2.7/test/lock_tests.py
+++ b/lib-python/2.7/test/lock_tests.py
@@ -347,22 +347,40 @@ class ConditionTests(BaseTestCase):
self.assertRaises(RuntimeError, cond.notify)
def _check_notify(self, cond):
+ # Note that this test is sensitive to timing. If the worker threads
+ # don't execute in a timely fashion, the main thread may think they
+ # are further along then they are. The main thread therefore issues
+ # _wait() statements to try to make sure that it doesn't race ahead
+ # of the workers.
+ # Secondly, this test assumes that condition variables are not subject
+ # to spurious wakeups. The absence of spurious wakeups is an implementation
+ # detail of Condition Cariables in current CPython, but in general, not
+ # a guaranteed property of condition variables as a programming
+ # construct. In particular, it is possible that this can no longer
+ # be conveniently guaranteed should their implementation ever change.
N = 5
+ ready = []
results1 = []
results2 = []
phase_num = 0
def f():
cond.acquire()
+ ready.append(phase_num)
cond.wait()
cond.release()
results1.append(phase_num)
cond.acquire()
+ ready.append(phase_num)
cond.wait()
cond.release()
results2.append(phase_num)
b = Bunch(f, N)
b.wait_for_started()
- _wait()
+ # first wait, to ensure all workers settle into cond.wait() before
+ # we continue. See issues #8799 and #30727.
+ while len(ready) < 5:
+ _wait()
+ ready = []
self.assertEqual(results1, [])
# Notify 3 threads at first
cond.acquire()
@@ -374,6 +392,9 @@ class ConditionTests(BaseTestCase):
_wait()
self.assertEqual(results1, [1] * 3)
self.assertEqual(results2, [])
+ # make sure all awaken workers settle into cond.wait()
+ while len(ready) < 3:
+ _wait()
# Notify 5 threads: they might be in their first or second wait
cond.acquire()
cond.notify(5)
@@ -384,6 +405,9 @@ class ConditionTests(BaseTestCase):
_wait()
self.assertEqual(results1, [1] * 3 + [2] * 2)
self.assertEqual(results2, [2] * 3)
+ # make sure all workers settle into cond.wait()
+ while len(ready) < 5:
+ _wait()
# Notify all threads: they are all in their second wait
cond.acquire()
cond.notify_all()
diff --git a/lib-python/2.7/test/make_ssl_certs.py b/lib-python/2.7/test/make_ssl_certs.py
index a1f298de34..ca2c12cada 100644
--- a/lib-python/2.7/test/make_ssl_certs.py
+++ b/lib-python/2.7/test/make_ssl_certs.py
@@ -50,7 +50,7 @@ req_template = """
dir = cadir
database = $dir/index.txt
crlnumber = $dir/crl.txt
- default_md = sha1
+ default_md = sha256
default_days = 3600
default_crl_days = 3600
certificate = pycacert.pem
@@ -88,7 +88,9 @@ req_template = """
here = os.path.abspath(os.path.dirname(__file__))
-def make_cert_key(hostname, sign=False, extra_san=''):
+
+def make_cert_key(hostname, sign=False, extra_san='',
+ ext='req_x509_extensions_full', key='rsa:3072'):
print("creating cert for " + hostname)
tempnames = []
for i in range(3):
@@ -146,7 +148,7 @@ def make_ca():
t.flush()
with tempfile.NamedTemporaryFile() as f:
args = ['req', '-new', '-days', '3650', '-extensions', 'v3_ca', '-nodes',
- '-newkey', 'rsa:2048', '-keyout', 'pycakey.pem',
+ '-newkey', 'rsa:3072', '-keyout', 'pycakey.pem',
'-out', f.name,
'-subj', '/C=XY/L=Castle Anthrax/O=Python Software Foundation CA/CN=our-ca-server']
check_call(['openssl'] + args)
diff --git a/lib-python/2.7/test/mapping_tests.py b/lib-python/2.7/test/mapping_tests.py
index 12d9f25632..59a9634820 100644
--- a/lib-python/2.7/test/mapping_tests.py
+++ b/lib-python/2.7/test/mapping_tests.py
@@ -2,6 +2,7 @@
import unittest
import UserDict
import test_support
+import sys
class BasicTestMappingProtocol(unittest.TestCase):
@@ -648,6 +649,14 @@ class TestHashMappingProtocol(TestMappingProtocol):
d = self._full_mapping({1: BadRepr()})
self.assertRaises(Exc, repr, d)
+ def test_repr_deep(self):
+ d = self._empty_mapping()
+ for i in range(sys.getrecursionlimit() + 100):
+ d0 = d
+ d = self._empty_mapping()
+ d[1] = d0
+ self.assertRaises(RuntimeError, repr, d)
+
def test_le(self):
self.assertTrue(not (self._empty_mapping() < self._empty_mapping()))
self.assertTrue(not (self._full_mapping({1: 2}) < self._full_mapping({1L: 2L})))
diff --git a/lib-python/2.7/test/test_multibytecodec_support.py b/lib-python/2.7/test/multibytecodec_support.py
index f15986ab64..fe44fe9c85 100644
--- a/lib-python/2.7/test/test_multibytecodec_support.py
+++ b/lib-python/2.7/test/multibytecodec_support.py
@@ -1,4 +1,4 @@
-# test_multibytecodec_support.py
+# multibytecodec_support.py
# Common Unittest Routines for CJK codecs
#
diff --git a/lib-python/2.7/test/pickletester.py b/lib-python/2.7/test/pickletester.py
index b6514439ee..c622ec16dd 100644
--- a/lib-python/2.7/test/pickletester.py
+++ b/lib-python/2.7/test/pickletester.py
@@ -149,6 +149,17 @@ class E(C):
class H(object):
pass
+class MyErr(Exception):
+ def __init__(self):
+ pass
+
+class I:
+ def __init__(self, *args, **kwargs):
+ raise MyErr()
+
+ def __getinitargs__(self):
+ return ()
+
# Hashable mutable key
class K(object):
def __init__(self, value):
@@ -167,6 +178,8 @@ __main__.E = E
E.__module__ = "__main__"
__main__.H = H
H.__module__ = "__main__"
+__main__.I = I
+I.__module__ = "__main__"
__main__.K = K
K.__module__ = "__main__"
@@ -625,6 +638,36 @@ class AbstractUnpickleTests(unittest.TestCase):
'q\x00oq\x01}q\x02b.').replace('X', xname)
self.assert_is_copy(X(*args), self.loads(pickle2))
+ def test_load_classic_instance_error(self):
+ # Issue #28925.
+ # Protocol 0 (text mode pickle):
+ """
+ 0: ( MARK
+ 1: i INST '__main__ I' (MARK at 0)
+ 13: ( MARK
+ 14: d DICT (MARK at 13)
+ 15: b BUILD
+ 16: . STOP
+ """
+ pickle0 = ('(i__main__\n'
+ 'I\n'
+ '(db.')
+ self.assertRaises(MyErr, self.loads, pickle0)
+
+ # Protocol 1 (binary mode pickle)
+ """
+ 0: ( MARK
+ 1: c GLOBAL '__main__ I'
+ 13: o OBJ (MARK at 0)
+ 14: } EMPTY_DICT
+ 15: b BUILD
+ 16: . STOP
+ """
+ pickle1 = ('(c__main__\n'
+ 'I\n'
+ 'o}b.')
+ self.assertRaises(MyErr, self.loads, pickle1)
+
def test_load_str(self):
# From Python 2: pickle.dumps('a\x00\xa0', protocol=0)
self.assertEqual(self.loads("S'a\\x00\\xa0'\n."), 'a\x00\xa0')
diff --git a/lib-python/2.7/test/pycacert.pem b/lib-python/2.7/test/pycacert.pem
index 09b1f3e08a..73150c960f 100644
--- a/lib-python/2.7/test/pycacert.pem
+++ b/lib-python/2.7/test/pycacert.pem
@@ -1,78 +1,99 @@
Certificate:
Data:
Version: 3 (0x2)
- Serial Number: 12723342612721443280 (0xb09264b1f2da21d0)
- Signature Algorithm: sha1WithRSAEncryption
+ Serial Number:
+ cb:2d:80:99:5a:69:52:5b
+ Signature Algorithm: sha256WithRSAEncryption
Issuer: C=XY, O=Python Software Foundation CA, CN=our-ca-server
Validity
- Not Before: Jan 4 19:47:07 2013 GMT
- Not After : Jan 2 19:47:07 2023 GMT
+ Not Before: Aug 29 14:23:16 2018 GMT
+ Not After : Aug 26 14:23:16 2028 GMT
Subject: C=XY, O=Python Software Foundation CA, CN=our-ca-server
Subject Public Key Info:
Public Key Algorithm: rsaEncryption
- Public-Key: (2048 bit)
+ Public-Key: (3072 bit)
Modulus:
- 00:e7:de:e9:e3:0c:9f:00:b6:a1:fd:2b:5b:96:d2:
- 6f:cc:e0:be:86:b9:20:5e:ec:03:7a:55:ab:ea:a4:
- e9:f9:49:85:d2:66:d5:ed:c7:7a:ea:56:8e:2d:8f:
- e7:42:e2:62:28:a9:9f:d6:1b:8e:eb:b5:b4:9c:9f:
- 14:ab:df:e6:94:8b:76:1d:3e:6d:24:61:ed:0c:bf:
- 00:8a:61:0c:df:5c:c8:36:73:16:00:cd:47:ba:6d:
- a4:a4:74:88:83:23:0a:19:fc:09:a7:3c:4a:4b:d3:
- e7:1d:2d:e4:ea:4c:54:21:f3:26:db:89:37:18:d4:
- 02:bb:40:32:5f:a4:ff:2d:1c:f7:d4:bb:ec:8e:cf:
- 5c:82:ac:e6:7c:08:6c:48:85:61:07:7f:25:e0:5c:
- e0:bc:34:5f:e0:b9:04:47:75:c8:47:0b:8d:bc:d6:
- c8:68:5f:33:83:62:d2:20:44:35:b1:ad:81:1a:8a:
- cd:bc:35:b0:5c:8b:47:d6:18:e9:9c:18:97:cc:01:
- 3c:29:cc:e8:1e:e4:e4:c1:b8:de:e7:c2:11:18:87:
- 5a:93:34:d8:a6:25:f7:14:71:eb:e4:21:a2:d2:0f:
- 2e:2e:d4:62:00:35:d3:d6:ef:5c:60:4b:4c:a9:14:
- e2:dd:15:58:46:37:33:26:b7:e7:2e:5d:ed:42:e4:
- c5:4d
+ 00:97:ed:55:41:ba:36:17:95:db:71:1c:d3:e1:61:
+ ac:58:73:e3:c6:96:cf:2b:1f:b8:08:f5:9d:4b:4b:
+ c7:30:f6:b8:0b:b3:52:72:a0:bb:c9:4d:3b:8e:df:
+ 22:8e:01:57:81:c9:92:73:cc:00:c6:ec:70:b0:3a:
+ 17:40:c1:df:f2:8c:36:4c:c4:a7:81:e7:b6:24:68:
+ e2:a0:7e:35:07:2f:a0:5b:f9:45:46:f7:1e:f0:46:
+ 11:fe:ca:1a:3c:50:f1:26:a9:5f:9c:22:9c:f8:41:
+ e1:df:4f:12:95:19:2f:5c:90:01:17:6e:7e:3e:7d:
+ cf:e9:09:af:25:f8:f8:42:77:2d:6d:5f:36:f2:78:
+ 1e:7d:4a:87:68:63:6c:06:71:1b:8d:fa:25:fe:d4:
+ d3:f5:a5:17:b1:ef:ea:17:cb:54:c8:27:99:80:cb:
+ 3c:45:f1:2c:52:1c:dd:1f:51:45:20:50:1e:5e:ab:
+ 57:73:1b:41:78:96:de:84:a4:7a:dd:8f:30:85:36:
+ 58:79:76:a0:d2:61:c8:1b:a9:94:99:63:c6:ee:f8:
+ 14:bf:b4:52:56:31:97:fa:eb:ac:53:9e:95:ce:4c:
+ c4:5a:4a:b7:ca:03:27:5b:35:57:ce:02:dc:ec:ca:
+ 69:f8:8a:5a:39:cb:16:20:15:03:24:61:6c:f4:7a:
+ fc:b6:48:e5:59:10:5c:49:d0:23:9f:fb:71:5e:3a:
+ e9:68:9f:34:72:80:27:b6:3f:4c:b1:d9:db:63:7f:
+ 67:68:4a:6e:11:f8:e8:c0:f4:5a:16:39:53:0b:68:
+ de:77:fa:45:e7:f8:91:cd:78:cd:28:94:97:71:54:
+ fb:cf:f0:37:de:c9:26:c5:dc:1b:9e:89:6d:09:ac:
+ c8:44:71:cb:6d:f1:97:31:d5:4c:20:33:bf:75:4a:
+ a0:e0:dc:69:11:ed:2a:b4:64:10:11:30:8b:0e:b0:
+ a7:10:d8:8a:c5:aa:1b:c8:26:8a:25:e7:66:9f:a5:
+ 6a:1a:2f:7c:5f:83:c6:78:4f:1f
Exponent: 65537 (0x10001)
X509v3 extensions:
X509v3 Subject Key Identifier:
- BC:DD:62:D9:76:DA:1B:D2:54:6B:CF:E0:66:9B:1E:1E:7B:56:0C:0B
+ DD:BF:CA:DA:E6:D1:34:BA:37:75:21:CA:6F:9A:08:28:F2:35:B6:48
X509v3 Authority Key Identifier:
- keyid:BC:DD:62:D9:76:DA:1B:D2:54:6B:CF:E0:66:9B:1E:1E:7B:56:0C:0B
+ keyid:DD:BF:CA:DA:E6:D1:34:BA:37:75:21:CA:6F:9A:08:28:F2:35:B6:48
X509v3 Basic Constraints:
CA:TRUE
- Signature Algorithm: sha1WithRSAEncryption
- 7d:0a:f5:cb:8d:d3:5d:bd:99:8e:f8:2b:0f:ba:eb:c2:d9:a6:
- 27:4f:2e:7b:2f:0e:64:d8:1c:35:50:4e:ee:fc:90:b9:8d:6d:
- a8:c5:c6:06:b0:af:f3:2d:bf:3b:b8:42:07:dd:18:7d:6d:95:
- 54:57:85:18:60:47:2f:eb:78:1b:f9:e8:17:fd:5a:0d:87:17:
- 28:ac:4c:6a:e6:bc:29:f4:f4:55:70:29:42:de:85:ea:ab:6c:
- 23:06:64:30:75:02:8e:53:bc:5e:01:33:37:cc:1e:cd:b8:a4:
- fd:ca:e4:5f:65:3b:83:1c:86:f1:55:02:a0:3a:8f:db:91:b7:
- 40:14:b4:e7:8d:d2:ee:73:ba:e3:e5:34:2d:bc:94:6f:4e:24:
- 06:f7:5f:8b:0e:a7:8e:6b:de:5e:75:f4:32:9a:50:b1:44:33:
- 9a:d0:05:e2:78:82:ff:db:da:8a:63:eb:a9:dd:d1:bf:a0:61:
- ad:e3:9e:8a:24:5d:62:0e:e7:4c:91:7f:ef:df:34:36:3b:2f:
- 5d:f5:84:b2:2f:c4:6d:93:96:1a:6f:30:28:f1:da:12:9a:64:
- b4:40:33:1d:bd:de:2b:53:a8:ea:be:d6:bc:4e:96:f5:44:fb:
- 32:18:ae:d5:1f:f6:69:af:b6:4e:7b:1d:58:ec:3b:a9:53:a3:
- 5e:58:c8:9e
+ Signature Algorithm: sha256WithRSAEncryption
+ 33:6a:54:d3:6b:c0:d7:01:5f:9d:f4:05:c1:93:66:90:50:d0:
+ b7:18:e9:b0:1e:4a:a0:b6:da:76:93:af:84:db:ad:15:54:31:
+ 15:13:e4:de:7e:4e:0c:d5:09:1c:34:35:b6:e5:4c:d6:6f:65:
+ 7d:32:5f:eb:fc:a9:6b:07:f7:49:82:e5:81:7e:07:80:9a:63:
+ f8:2c:c3:40:bc:8f:d4:2a:da:3e:d1:ee:08:b7:4d:a7:84:ca:
+ f4:3f:a1:98:45:be:b1:05:69:e7:df:d7:99:ab:1b:ee:8b:30:
+ cc:f7:fc:e7:d4:0b:17:ae:97:bf:e4:7b:fd:0f:a7:b4:85:79:
+ e3:59:e2:16:87:bf:1f:29:45:2c:23:93:76:be:c0:87:1d:de:
+ ec:2b:42:6a:e5:bb:c8:f4:0a:4a:08:0a:8c:5c:d8:7d:4d:d1:
+ b8:bf:d5:f7:29:ed:92:d1:94:04:e8:35:06:57:7f:2c:23:97:
+ 87:a5:35:8d:26:d3:1a:47:f2:16:d7:d9:c6:d4:1f:23:43:d3:
+ 26:99:39:ca:20:f4:71:23:6f:0c:4a:76:76:f7:76:1f:b3:fe:
+ bf:47:b0:fc:2a:56:81:e1:d2:dd:ee:08:d8:f4:ff:5a:dc:25:
+ 61:8a:91:02:b9:86:1c:f2:50:73:76:25:35:fc:b6:25:26:15:
+ cb:eb:c4:2b:61:0c:1c:e7:ee:2f:17:9b:ec:f0:d4:a1:84:e7:
+ d2:af:de:e4:1b:24:14:a7:01:87:e3:ab:29:58:46:a0:d9:c0:
+ 0a:e0:8d:d7:59:d3:1b:f8:54:20:3e:78:a5:a5:c8:4f:8b:03:
+ c4:96:9f:ec:fb:47:cf:76:2d:8d:65:34:27:bf:fa:ae:01:05:
+ 8a:f3:92:0a:dd:89:6c:97:a1:c7:e7:60:51:e7:ac:eb:4b:7d:
+ 2c:b8:65:c9:fe:5d:6a:48:55:8e:e4:c7:f9:6a:40:e1:b8:64:
+ 45:e9:b5:59:29:a5:5f:cf:7d:58:7d:64:79:e5:a4:09:ac:1e:
+ 76:65:3d:94:c4:68
-----BEGIN CERTIFICATE-----
-MIIDbTCCAlWgAwIBAgIJALCSZLHy2iHQMA0GCSqGSIb3DQEBBQUAME0xCzAJBgNV
+MIIEbTCCAtWgAwIBAgIJAMstgJlaaVJbMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNV
BAYTAlhZMSYwJAYDVQQKDB1QeXRob24gU29mdHdhcmUgRm91bmRhdGlvbiBDQTEW
-MBQGA1UEAwwNb3VyLWNhLXNlcnZlcjAeFw0xMzAxMDQxOTQ3MDdaFw0yMzAxMDIx
-OTQ3MDdaME0xCzAJBgNVBAYTAlhZMSYwJAYDVQQKDB1QeXRob24gU29mdHdhcmUg
-Rm91bmRhdGlvbiBDQTEWMBQGA1UEAwwNb3VyLWNhLXNlcnZlcjCCASIwDQYJKoZI
-hvcNAQEBBQADggEPADCCAQoCggEBAOfe6eMMnwC2of0rW5bSb8zgvoa5IF7sA3pV
-q+qk6flJhdJm1e3HeupWji2P50LiYiipn9Ybjuu1tJyfFKvf5pSLdh0+bSRh7Qy/
-AIphDN9cyDZzFgDNR7ptpKR0iIMjChn8Cac8SkvT5x0t5OpMVCHzJtuJNxjUArtA
-Ml+k/y0c99S77I7PXIKs5nwIbEiFYQd/JeBc4Lw0X+C5BEd1yEcLjbzWyGhfM4Ni
-0iBENbGtgRqKzbw1sFyLR9YY6ZwYl8wBPCnM6B7k5MG43ufCERiHWpM02KYl9xRx
-6+QhotIPLi7UYgA109bvXGBLTKkU4t0VWEY3Mya35y5d7ULkxU0CAwEAAaNQME4w
-HQYDVR0OBBYEFLzdYtl22hvSVGvP4GabHh57VgwLMB8GA1UdIwQYMBaAFLzdYtl2
-2hvSVGvP4GabHh57VgwLMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
-AH0K9cuN0129mY74Kw+668LZpidPLnsvDmTYHDVQTu78kLmNbajFxgawr/Mtvzu4
-QgfdGH1tlVRXhRhgRy/reBv56Bf9Wg2HFyisTGrmvCn09FVwKULeheqrbCMGZDB1
-Ao5TvF4BMzfMHs24pP3K5F9lO4MchvFVAqA6j9uRt0AUtOeN0u5zuuPlNC28lG9O
-JAb3X4sOp45r3l519DKaULFEM5rQBeJ4gv/b2opj66nd0b+gYa3jnookXWIO50yR
-f+/fNDY7L131hLIvxG2TlhpvMCjx2hKaZLRAMx293itTqOq+1rxOlvVE+zIYrtUf
-9mmvtk57HVjsO6lTo15YyJ4=
+MBQGA1UEAwwNb3VyLWNhLXNlcnZlcjAeFw0xODA4MjkxNDIzMTZaFw0yODA4MjYx
+NDIzMTZaME0xCzAJBgNVBAYTAlhZMSYwJAYDVQQKDB1QeXRob24gU29mdHdhcmUg
+Rm91bmRhdGlvbiBDQTEWMBQGA1UEAwwNb3VyLWNhLXNlcnZlcjCCAaIwDQYJKoZI
+hvcNAQEBBQADggGPADCCAYoCggGBAJftVUG6NheV23Ec0+FhrFhz48aWzysfuAj1
+nUtLxzD2uAuzUnKgu8lNO47fIo4BV4HJknPMAMbscLA6F0DB3/KMNkzEp4HntiRo
+4qB+NQcvoFv5RUb3HvBGEf7KGjxQ8SapX5winPhB4d9PEpUZL1yQARdufj59z+kJ
+ryX4+EJ3LW1fNvJ4Hn1Kh2hjbAZxG436Jf7U0/WlF7Hv6hfLVMgnmYDLPEXxLFIc
+3R9RRSBQHl6rV3MbQXiW3oSket2PMIU2WHl2oNJhyBuplJljxu74FL+0UlYxl/rr
+rFOelc5MxFpKt8oDJ1s1V84C3OzKafiKWjnLFiAVAyRhbPR6/LZI5VkQXEnQI5/7
+cV466WifNHKAJ7Y/TLHZ22N/Z2hKbhH46MD0WhY5Uwto3nf6Ref4kc14zSiUl3FU
++8/wN97JJsXcG56JbQmsyERxy23xlzHVTCAzv3VKoODcaRHtKrRkEBEwiw6wpxDY
+isWqG8gmiiXnZp+lahovfF+DxnhPHwIDAQABo1AwTjAdBgNVHQ4EFgQU3b/K2ubR
+NLo3dSHKb5oIKPI1tkgwHwYDVR0jBBgwFoAU3b/K2ubRNLo3dSHKb5oIKPI1tkgw
+DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAYEAM2pU02vA1wFfnfQFwZNm
+kFDQtxjpsB5KoLbadpOvhNutFVQxFRPk3n5ODNUJHDQ1tuVM1m9lfTJf6/ypawf3
+SYLlgX4HgJpj+CzDQLyP1CraPtHuCLdNp4TK9D+hmEW+sQVp59/Xmasb7oswzPf8
+59QLF66Xv+R7/Q+ntIV541niFoe/HylFLCOTdr7Ahx3e7CtCauW7yPQKSggKjFzY
+fU3RuL/V9yntktGUBOg1Bld/LCOXh6U1jSbTGkfyFtfZxtQfI0PTJpk5yiD0cSNv
+DEp2dvd2H7P+v0ew/CpWgeHS3e4I2PT/WtwlYYqRArmGHPJQc3YlNfy2JSYVy+vE
+K2EMHOfuLxeb7PDUoYTn0q/e5BskFKcBh+OrKVhGoNnACuCN11nTG/hUID54paXI
+T4sDxJaf7PtHz3YtjWU0J7/6rgEFivOSCt2JbJehx+dgUees60t9LLhlyf5dakhV
+juTH+WpA4bhkRem1WSmlX899WH1keeWkCawedmU9lMRo
-----END CERTIFICATE-----
diff --git a/lib-python/2.7/test/pythoninfo.py b/lib-python/2.7/test/pythoninfo.py
new file mode 100644
index 0000000000..12de99b09d
--- /dev/null
+++ b/lib-python/2.7/test/pythoninfo.py
@@ -0,0 +1,757 @@
+"""
+Collect various information about Python to help debugging test failures.
+"""
+from __future__ import print_function
+import errno
+import re
+import sys
+import traceback
+import warnings
+
+
+def normalize_text(text):
+ if text is None:
+ return None
+ text = str(text)
+ text = re.sub(r'\s+', ' ', text)
+ return text.strip()
+
+
+class PythonInfo:
+ def __init__(self):
+ self.info = {}
+
+ def add(self, key, value):
+ if key in self.info:
+ raise ValueError("duplicate key: %r" % key)
+
+ if value is None:
+ return
+
+ if not isinstance(value, int):
+ if not isinstance(value, str):
+ # convert other objects like sys.flags to string
+ value = str(value)
+
+ value = value.strip()
+ if not value:
+ return
+
+ self.info[key] = value
+
+ def get_infos(self):
+ """
+ Get information as a key:value dictionary where values are strings.
+ """
+ return {key: str(value) for key, value in self.info.items()}
+
+
+def copy_attributes(info_add, obj, name_fmt, attributes, formatter=None):
+ for attr in attributes:
+ value = getattr(obj, attr, None)
+ if value is None:
+ continue
+ name = name_fmt % attr
+ if formatter is not None:
+ value = formatter(attr, value)
+ info_add(name, value)
+
+
+def copy_attr(info_add, name, mod, attr_name):
+ try:
+ value = getattr(mod, attr_name)
+ except AttributeError:
+ return
+ info_add(name, value)
+
+
+def call_func(info_add, name, mod, func_name, formatter=None):
+ try:
+ func = getattr(mod, func_name)
+ except AttributeError:
+ return
+ value = func()
+ if formatter is not None:
+ value = formatter(value)
+ info_add(name, value)
+
+
+def collect_sys(info_add):
+ attributes = (
+ '_framework',
+ 'abiflags',
+ 'api_version',
+ 'builtin_module_names',
+ 'byteorder',
+ 'dont_write_bytecode',
+ 'executable',
+ 'flags',
+ 'float_info',
+ 'float_repr_style',
+ 'hash_info',
+ 'hexversion',
+ 'implementation',
+ 'int_info',
+ 'maxsize',
+ 'maxunicode',
+ 'path',
+ 'platform',
+ 'prefix',
+ 'thread_info',
+ 'version',
+ 'version_info',
+ 'winver',
+ )
+ copy_attributes(info_add, sys, 'sys.%s', attributes)
+
+ call_func(info_add, 'sys.androidapilevel', sys, 'getandroidapilevel')
+ call_func(info_add, 'sys.windowsversion', sys, 'getwindowsversion')
+
+ encoding = sys.getfilesystemencoding()
+ if hasattr(sys, 'getfilesystemencodeerrors'):
+ encoding = '%s/%s' % (encoding, sys.getfilesystemencodeerrors())
+ info_add('sys.filesystem_encoding', encoding)
+
+ for name in ('stdin', 'stdout', 'stderr'):
+ stream = getattr(sys, name)
+ if stream is None:
+ continue
+ encoding = getattr(stream, 'encoding', None)
+ if not encoding:
+ continue
+ errors = getattr(stream, 'errors', None)
+ if errors:
+ encoding = '%s/%s' % (encoding, errors)
+ info_add('sys.%s.encoding' % name, encoding)
+
+ # Were we compiled --with-pydebug or with #define Py_DEBUG?
+ Py_DEBUG = hasattr(sys, 'gettotalrefcount')
+ if Py_DEBUG:
+ text = 'Yes (sys.gettotalrefcount() present)'
+ else:
+ text = 'No (sys.gettotalrefcount() missing)'
+ info_add('Py_DEBUG', text)
+
+
+def collect_platform(info_add):
+ import platform
+
+ arch = platform.architecture()
+ arch = ' '.join(filter(bool, arch))
+ info_add('platform.architecture', arch)
+
+ info_add('platform.python_implementation',
+ platform.python_implementation())
+ info_add('platform.platform',
+ platform.platform(aliased=True))
+
+ libc_ver = ('%s %s' % platform.libc_ver()).strip()
+ if libc_ver:
+ info_add('platform.libc_ver', libc_ver)
+
+
+def collect_locale(info_add):
+ import locale
+
+ info_add('locale.encoding', locale.getpreferredencoding(False))
+
+
+def collect_builtins(info_add):
+ info_add('builtins.float.float_format', float.__getformat__("float"))
+ info_add('builtins.float.double_format', float.__getformat__("double"))
+
+
+def collect_os(info_add):
+ import os
+
+ def format_attr(attr, value):
+ if attr in ('supports_follow_symlinks', 'supports_fd',
+ 'supports_effective_ids'):
+ return str(sorted(func.__name__ for func in value))
+ else:
+ return value
+
+ attributes = (
+ 'name',
+ 'supports_bytes_environ',
+ 'supports_effective_ids',
+ 'supports_fd',
+ 'supports_follow_symlinks',
+ )
+ copy_attributes(info_add, os, 'os.%s', attributes, formatter=format_attr)
+
+ call_func(info_add, 'os.cwd', os, 'getcwd')
+
+ call_func(info_add, 'os.uid', os, 'getuid')
+ call_func(info_add, 'os.gid', os, 'getgid')
+ call_func(info_add, 'os.uname', os, 'uname')
+
+ def format_groups(groups):
+ return ', '.join(map(str, groups))
+
+ call_func(info_add, 'os.groups', os, 'getgroups', formatter=format_groups)
+
+ if hasattr(os, 'getlogin'):
+ try:
+ login = os.getlogin()
+ except OSError:
+ # getlogin() fails with "OSError: [Errno 25] Inappropriate ioctl
+ # for device" on Travis CI
+ pass
+ else:
+ info_add("os.login", login)
+
+ call_func(info_add, 'os.cpu_count', os, 'cpu_count')
+ call_func(info_add, 'os.loadavg', os, 'getloadavg')
+
+ # Environment variables used by the stdlib and tests. Don't log the full
+ # environment: filter to list to not leak sensitive information.
+ #
+ # HTTP_PROXY is not logged because it can contain a password.
+ ENV_VARS = frozenset((
+ "APPDATA",
+ "AR",
+ "ARCHFLAGS",
+ "ARFLAGS",
+ "AUDIODEV",
+ "CC",
+ "CFLAGS",
+ "COLUMNS",
+ "COMPUTERNAME",
+ "COMSPEC",
+ "CPP",
+ "CPPFLAGS",
+ "DISPLAY",
+ "DISTUTILS_DEBUG",
+ "DISTUTILS_USE_SDK",
+ "DYLD_LIBRARY_PATH",
+ "ENSUREPIP_OPTIONS",
+ "HISTORY_FILE",
+ "HOME",
+ "HOMEDRIVE",
+ "HOMEPATH",
+ "IDLESTARTUP",
+ "LANG",
+ "LDFLAGS",
+ "LDSHARED",
+ "LD_LIBRARY_PATH",
+ "LINES",
+ "MACOSX_DEPLOYMENT_TARGET",
+ "MAILCAPS",
+ "MAKEFLAGS",
+ "MIXERDEV",
+ "MSSDK",
+ "PATH",
+ "PATHEXT",
+ "PIP_CONFIG_FILE",
+ "PLAT",
+ "POSIXLY_CORRECT",
+ "PY_SAX_PARSER",
+ "ProgramFiles",
+ "ProgramFiles(x86)",
+ "RUNNING_ON_VALGRIND",
+ "SDK_TOOLS_BIN",
+ "SERVER_SOFTWARE",
+ "SHELL",
+ "SOURCE_DATE_EPOCH",
+ "SYSTEMROOT",
+ "TEMP",
+ "TERM",
+ "TILE_LIBRARY",
+ "TIX_LIBRARY",
+ "TMP",
+ "TMPDIR",
+ "TRAVIS",
+ "TZ",
+ "USERPROFILE",
+ "VIRTUAL_ENV",
+ "WAYLAND_DISPLAY",
+ "WINDIR",
+ "_PYTHON_HOST_PLATFORM",
+ "_PYTHON_PROJECT_BASE",
+ "_PYTHON_SYSCONFIGDATA_NAME",
+ "__PYVENV_LAUNCHER__",
+ ))
+ for name, value in os.environ.items():
+ uname = name.upper()
+ if (uname in ENV_VARS
+ # Copy PYTHON* and LC_* variables
+ or uname.startswith(("PYTHON", "LC_"))
+ # Visual Studio: VS140COMNTOOLS
+ or (uname.startswith("VS") and uname.endswith("COMNTOOLS"))):
+ info_add('os.environ[%s]' % name, value)
+
+ if hasattr(os, 'umask'):
+ mask = os.umask(0)
+ os.umask(mask)
+ info_add("os.umask", '%03o' % mask)
+
+ if hasattr(os, 'getrandom'):
+ # PEP 524: Check if system urandom is initialized
+ try:
+ try:
+ os.getrandom(1, os.GRND_NONBLOCK)
+ state = 'ready (initialized)'
+ except BlockingIOError as exc:
+ state = 'not seeded yet (%s)' % exc
+ info_add('os.getrandom', state)
+ except OSError as exc:
+ # Python was compiled on a more recent Linux version
+ # than the current Linux kernel: ignore OSError(ENOSYS)
+ if exc.errno != errno.ENOSYS:
+ raise
+
+
+def collect_readline(info_add):
+ try:
+ import readline
+ except ImportError:
+ return
+
+ def format_attr(attr, value):
+ if isinstance(value, int):
+ return "%#x" % value
+ else:
+ return value
+
+ attributes = (
+ "_READLINE_VERSION",
+ "_READLINE_RUNTIME_VERSION",
+ "_READLINE_LIBRARY_VERSION",
+ )
+ copy_attributes(info_add, readline, 'readline.%s', attributes,
+ formatter=format_attr)
+
+ if not hasattr(readline, "_READLINE_LIBRARY_VERSION"):
+ # _READLINE_LIBRARY_VERSION has been added to CPython 3.7
+ doc = getattr(readline, '__doc__', '')
+ if 'libedit readline' in doc:
+ info_add('readline.library', 'libedit readline')
+ elif 'GNU readline' in doc:
+ info_add('readline.library', 'GNU readline')
+
+
+def collect_gdb(info_add):
+ import subprocess
+
+ try:
+ proc = subprocess.Popen(["gdb", "-nx", "--version"],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True)
+ version = proc.communicate()[0]
+ except OSError:
+ return
+
+ # Only keep the first line
+ version = version.splitlines()[0]
+ info_add('gdb_version', version)
+
+
+def collect_tkinter(info_add):
+ try:
+ import _tkinter
+ except ImportError:
+ pass
+ else:
+ attributes = ('TK_VERSION', 'TCL_VERSION')
+ copy_attributes(info_add, _tkinter, 'tkinter.%s', attributes)
+
+ try:
+ import tkinter
+ except ImportError:
+ pass
+ else:
+ tcl = tkinter.Tcl()
+ patchlevel = tcl.call('info', 'patchlevel')
+ info_add('tkinter.info_patchlevel', patchlevel)
+
+
+def collect_time(info_add):
+ import time
+
+ info_add('time.time', time.time())
+
+ attributes = (
+ 'altzone',
+ 'daylight',
+ 'timezone',
+ 'tzname',
+ )
+ copy_attributes(info_add, time, 'time.%s', attributes)
+
+ if hasattr(time, 'get_clock_info'):
+ for clock in ('clock', 'monotonic', 'perf_counter',
+ 'process_time', 'thread_time', 'time'):
+ try:
+ # prevent DeprecatingWarning on get_clock_info('clock')
+ with warnings.catch_warnings(record=True):
+ clock_info = time.get_clock_info(clock)
+ except ValueError:
+ # missing clock like time.thread_time()
+ pass
+ else:
+ info_add('time.get_clock_info(%s)' % clock, clock_info)
+
+
+def collect_datetime(info_add):
+ try:
+ import datetime
+ except ImportError:
+ return
+
+ info_add('datetime.datetime.now', datetime.datetime.now())
+
+
+def collect_sysconfig(info_add):
+ import sysconfig
+
+ for name in (
+ 'ABIFLAGS',
+ 'ANDROID_API_LEVEL',
+ 'CC',
+ 'CCSHARED',
+ 'CFLAGS',
+ 'CFLAGSFORSHARED',
+ 'CONFIG_ARGS',
+ 'HOST_GNU_TYPE',
+ 'MACHDEP',
+ 'MULTIARCH',
+ 'OPT',
+ 'PY_CFLAGS',
+ 'PY_CFLAGS_NODIST',
+ 'PY_CORE_LDFLAGS',
+ 'PY_LDFLAGS',
+ 'PY_LDFLAGS_NODIST',
+ 'PY_STDMODULE_CFLAGS',
+ 'Py_DEBUG',
+ 'Py_ENABLE_SHARED',
+ 'SHELL',
+ 'SOABI',
+ 'prefix',
+ ):
+ value = sysconfig.get_config_var(name)
+ if name == 'ANDROID_API_LEVEL' and not value:
+ # skip ANDROID_API_LEVEL=0
+ continue
+ value = normalize_text(value)
+ info_add('sysconfig[%s]' % name, value)
+
+
+def collect_ssl(info_add):
+ import os
+ try:
+ import ssl
+ except ImportError:
+ return
+ try:
+ import _ssl
+ except ImportError:
+ _ssl = None
+
+ def format_attr(attr, value):
+ if attr.startswith('OP_'):
+ return '%#8x' % value
+ else:
+ return value
+
+ attributes = (
+ 'OPENSSL_VERSION',
+ 'OPENSSL_VERSION_INFO',
+ 'HAS_SNI',
+ 'OP_ALL',
+ 'OP_NO_TLSv1_1',
+ )
+ copy_attributes(info_add, ssl, 'ssl.%s', attributes, formatter=format_attr)
+
+ options_names = []
+ protocol_names = {}
+ verify_modes = {}
+ for name in dir(ssl):
+ if name.startswith('OP_'):
+ options_names.append((name, getattr(ssl, name)))
+ elif name.startswith('PROTOCOL_'):
+ protocol_names[getattr(ssl, name)] = name
+ elif name.startswith('CERT_'):
+ verify_modes[getattr(ssl, name)] = name
+ options_names.sort(key=lambda item: item[1], reverse=True)
+
+ def formatter(attr_name, value):
+ if attr_name == 'options':
+ options_text = []
+ for opt_name, opt_value in options_names:
+ if value & opt_value:
+ options_text.append(opt_name)
+ value &= ~opt_value
+ if value:
+ options_text.append(str(value))
+ return '|' .join(options_text)
+ elif attr_name == 'verify_mode':
+ return verify_modes.get(value, value)
+ elif attr_name == 'protocol':
+ return protocol_names.get(value, value)
+ else:
+ return value
+
+ for name, ctx in (
+ ('SSLContext(PROTOCOL_TLS)', ssl.SSLContext(ssl.PROTOCOL_TLS)),
+ ('default_https_context', ssl._create_default_https_context()),
+ ('stdlib_context', ssl._create_stdlib_context()),
+ ):
+ attributes = (
+ 'minimum_version',
+ 'maximum_version',
+ 'protocol',
+ 'options',
+ 'verify_mode',
+ )
+ copy_attributes(info_add, ctx, 'ssl.%s.%%s' % name, attributes, formatter=formatter)
+
+ env_names = ["OPENSSL_CONF", "SSLKEYLOGFILE"]
+ if _ssl is not None and hasattr(_ssl, 'get_default_verify_paths'):
+ parts = _ssl.get_default_verify_paths()
+ env_names.extend((parts[0], parts[2]))
+
+ for name in env_names:
+ try:
+ value = os.environ[name]
+ except KeyError:
+ continue
+ info_add('ssl.environ[%s]' % name, value)
+
+
+def collect_socket(info_add):
+ import socket
+
+ hostname = socket.gethostname()
+ info_add('socket.hostname', hostname)
+
+
+def collect_sqlite(info_add):
+ try:
+ import sqlite3
+ except ImportError:
+ return
+
+ attributes = ('version', 'sqlite_version')
+ copy_attributes(info_add, sqlite3, 'sqlite3.%s', attributes)
+
+
+def collect_zlib(info_add):
+ try:
+ import zlib
+ except ImportError:
+ return
+
+ attributes = ('ZLIB_VERSION', 'ZLIB_RUNTIME_VERSION')
+ copy_attributes(info_add, zlib, 'zlib.%s', attributes)
+
+
+def collect_expat(info_add):
+ try:
+ from xml.parsers import expat
+ except ImportError:
+ return
+
+ attributes = ('EXPAT_VERSION',)
+ copy_attributes(info_add, expat, 'expat.%s', attributes)
+
+
+def collect_decimal(info_add):
+ try:
+ import _decimal
+ except ImportError:
+ return
+
+ attributes = ('__libmpdec_version__',)
+ copy_attributes(info_add, _decimal, '_decimal.%s', attributes)
+
+
+def collect_testcapi(info_add):
+ try:
+ import _testcapi
+ except ImportError:
+ return
+
+ call_func(info_add, 'pymem.allocator', _testcapi, 'pymem_getallocatorsname')
+ copy_attr(info_add, 'pymem.with_pymalloc', _testcapi, 'WITH_PYMALLOC')
+
+
+def collect_resource(info_add):
+ try:
+ import resource
+ except ImportError:
+ return
+
+ limits = [attr for attr in dir(resource) if attr.startswith('RLIMIT_')]
+ for name in limits:
+ key = getattr(resource, name)
+ value = resource.getrlimit(key)
+ info_add('resource.%s' % name, value)
+
+ call_func(info_add, 'resource.pagesize', resource, 'getpagesize')
+
+
+def collect_test_socket(info_add):
+ try:
+ from test import test_socket
+ except ImportError:
+ return
+
+ # all check attributes like HAVE_SOCKET_CAN
+ attributes = [name for name in dir(test_socket)
+ if name.startswith('HAVE_')]
+ copy_attributes(info_add, test_socket, 'test_socket.%s', attributes)
+
+
+def collect_test_support(info_add):
+ try:
+ from test import support
+ except ImportError:
+ return
+
+ attributes = ('IPV6_ENABLED',)
+ copy_attributes(info_add, support, 'test_support.%s', attributes)
+
+ call_func(info_add, 'test_support._is_gui_available', support, '_is_gui_available')
+ call_func(info_add, 'test_support.python_is_optimized', support, 'python_is_optimized')
+
+
+def collect_cc(info_add):
+ import subprocess
+ import sysconfig
+
+ CC = sysconfig.get_config_var('CC')
+ if not CC:
+ return
+
+ try:
+ import shlex
+ args = shlex.split(CC)
+ except ImportError:
+ args = CC.split()
+ args.append('--version')
+ try:
+ proc = subprocess.Popen(args,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ except OSError:
+ # Cannot run the compiler, for example when Python has been
+ # cross-compiled and installed on the target platform where the
+ # compiler is missing.
+ return
+
+ stdout = proc.communicate()[0]
+ if proc.returncode:
+ # CC --version failed: ignore error
+ return
+
+ text = stdout.splitlines()[0]
+ text = normalize_text(text)
+ info_add('CC.version', text)
+
+
+def collect_gdbm(info_add):
+ try:
+ from _gdbm import _GDBM_VERSION
+ except ImportError:
+ return
+
+ info_add('gdbm.GDBM_VERSION', '.'.join(map(str, _GDBM_VERSION)))
+
+
+def collect_get_config(info_add):
+ # Dump global configuration variables, _PyCoreConfig
+ # and _PyMainInterpreterConfig
+ try:
+ from _testinternalcapi import get_configs
+ except ImportError:
+ return
+
+ all_configs = get_configs()
+ for config_type in sorted(all_configs):
+ config = all_configs[config_type]
+ for key in sorted(config):
+ info_add('%s[%s]' % (config_type, key), repr(config[key]))
+
+
+def collect_subprocess(info_add):
+ import subprocess
+ copy_attributes(info_add, subprocess, 'subprocess.%s', ('_USE_POSIX_SPAWN',))
+
+
+def collect_info(info):
+ error = False
+ info_add = info.add
+
+ for collect_func in (
+ # collect_os() should be the first, to check the getrandom() status
+ collect_os,
+
+ collect_builtins,
+ collect_gdb,
+ collect_locale,
+ collect_platform,
+ collect_readline,
+ collect_socket,
+ collect_sqlite,
+ collect_ssl,
+ collect_sys,
+ collect_sysconfig,
+ collect_time,
+ collect_datetime,
+ collect_tkinter,
+ collect_zlib,
+ collect_expat,
+ collect_decimal,
+ collect_testcapi,
+ collect_resource,
+ collect_cc,
+ collect_gdbm,
+ collect_get_config,
+ collect_subprocess,
+
+ # Collecting from tests should be last as they have side effects.
+ collect_test_socket,
+ collect_test_support,
+ ):
+ try:
+ collect_func(info_add)
+ except Exception as exc:
+ error = True
+ print("ERROR: %s() failed" % (collect_func.__name__),
+ file=sys.stderr)
+ traceback.print_exc(file=sys.stderr)
+ print(file=sys.stderr)
+ sys.stderr.flush()
+
+ return error
+
+
+def dump_info(info, file=None):
+ title = "Python debug information"
+ print(title)
+ print("=" * len(title))
+ print()
+
+ infos = info.get_infos()
+ infos = sorted(infos.items())
+ for key, value in infos:
+ value = value.replace("\n", " ")
+ print("%s: %s" % (key, value))
+ print()
+
+
+def main():
+ info = PythonInfo()
+ error = collect_info(info)
+ dump_info(info)
+
+ if error:
+ print("Collection failed: exit with error", file=sys.stderr)
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib-python/2.7/test/regrtest.py b/lib-python/2.7/test/regrtest.py
index f32f4ed149..5b0b3e4422 100755
--- a/lib-python/2.7/test/regrtest.py
+++ b/lib-python/2.7/test/regrtest.py
@@ -27,7 +27,7 @@ Verbosity
-w/--verbose2 -- re-run failed tests in verbose mode
-W/--verbose3 -- re-run failed tests in verbose mode immediately
-q/--quiet -- no output unless one or more tests fail
--S/--slow -- print the slowest 10 tests
+-S/--slowest -- print the slowest 10 tests
--header -- print header with interpreter info
Selecting tests
@@ -37,6 +37,9 @@ Selecting tests
-f/--fromfile -- read names of tests to run from a file (see below)
-x/--exclude -- arguments are tests to *exclude*
-s/--single -- single step through a set of tests (see below)
+-m/--match PAT -- match test cases and methods with glob pattern PAT
+--matchfile FILENAME -- filters tests using a text file, one pattern per line
+-G/--failfast -- fail as soon as a test fails (only with -v or -W)
-u/--use RES1,RES2,...
-- specify which special resource intensive tests to run
-M/--memlimit LIMIT
@@ -58,6 +61,14 @@ Special runs
-- call gc.set_threshold(THRESHOLD)
-F/--forever -- run the specified tests in a loop, until an error happens
-P/--pgo -- enable Profile Guided Optimization training
+--testdir -- execute test files in the specified directory
+ (instead of the Python stdlib test suite)
+--list-tests -- only write the name of tests that will be run,
+ don't execute them
+--list-cases -- only write the name of test cases that will be run,
+ don't execute them
+--fail-env-changed -- if a test file alters the environment, mark the test
+ as failed
Additional Option Details:
@@ -151,24 +162,33 @@ resources to test. Currently only the following are defined:
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the bsddb tests, give the
option '-uall,-bsddb'.
+
+--matchfile filters tests using a text file, one pattern per line.
+Pattern examples:
+
+- test method: test_stat_attributes
+- test class: FileTests
+- test identifier: test_os.FileTests.test_stat_attributes
"""
import StringIO
+import datetime
import getopt
+import imp
import json
+import math
import os
+import platform
import random
import re
import shutil
import sys
+import sysconfig
+import tempfile
import time
import traceback
-import warnings
import unittest
-import tempfile
-import imp
-import platform
-import sysconfig
+import warnings
# Some times __path__ and __file__ are not absolute (e.g. while running from
@@ -220,12 +240,27 @@ ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
+CHILD_ERROR = -5 # error in a child process
+TEST_DID_NOT_RUN = -6 # error in a child process
+
+# Minimum duration of a test to display its duration or to mention that
+# the test is running in background
+PROGRESS_MIN_TIME = 30.0 # seconds
+
+# Display the running tests if nothing happened last N seconds
+PROGRESS_UPDATE = 30.0 # seconds
-from test import test_support
+from test import support
-RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network', 'bsddb',
- 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui',
- 'xpickle')
+ALL_RESOURCES = ('audio', 'curses', 'largefile', 'network', 'bsddb',
+ 'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui',
+ 'xpickle')
+
+# Other resources excluded from --use=all:
+#
+# - extralagefile (ex: test_zipfile64): really too slow to be enabled
+# "by default"
+RESOURCE_NAMES = ALL_RESOURCES + ('extralargefile',)
TEMPDIR = os.path.abspath(tempfile.gettempdir())
@@ -236,12 +271,76 @@ def usage(code, msg=''):
sys.exit(code)
+def format_duration(seconds):
+ ms = int(math.ceil(seconds * 1e3))
+ seconds, ms = divmod(ms, 1000)
+ minutes, seconds = divmod(seconds, 60)
+ hours, minutes = divmod(minutes, 60)
+
+ parts = []
+ if hours:
+ parts.append('%s hour' % hours)
+ if minutes:
+ parts.append('%s min' % minutes)
+ if seconds:
+ parts.append('%s sec' % seconds)
+ if ms:
+ parts.append('%s ms' % ms)
+ if not parts:
+ return '0 ms'
+
+ parts = parts[:2]
+ return ' '.join(parts)
+
+
+_FORMAT_TEST_RESULT = {
+ PASSED: '%s passed',
+ FAILED: '%s failed',
+ ENV_CHANGED: '%s failed (env changed)',
+ SKIPPED: '%s skipped',
+ RESOURCE_DENIED: '%s skipped (resource denied)',
+ INTERRUPTED: '%s interrupted',
+ CHILD_ERROR: '%s crashed',
+ TEST_DID_NOT_RUN: '%s run no tests',
+}
+
+
+def format_test_result(test_name, result):
+ fmt = _FORMAT_TEST_RESULT.get(result, "%s")
+ return fmt % test_name
+
+
+def cpu_count():
+ # first try os.sysconf() to prevent loading the big multiprocessing module
+ try:
+ return os.sysconf('SC_NPROCESSORS_ONLN')
+ except (AttributeError, ValueError):
+ pass
+
+ # try multiprocessing.cpu_count()
+ try:
+ import multiprocessing
+ except ImportError:
+ pass
+ else:
+ return multiprocessing.cpu_count()
+
+ return None
+
+
+def unload_test_modules(save_modules):
+ # Unload the newly imported modules (best effort finalization)
+ for module in sys.modules.keys():
+ if module not in save_modules and module.startswith("test."):
+ support.unload(module)
+
+
def main(tests=None, testdir=None, verbose=0, quiet=False,
exclude=False, single=False, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
- header=False, pgo=False):
+ header=False, pgo=False, failfast=False, match_tests=None):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
@@ -264,15 +363,19 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
directly to set the values that would normally be set by flags
on the command line.
"""
+ regrtest_start_time = time.time()
- test_support.record_original_stdout(sys.stdout)
+ support.record_original_stdout(sys.stdout)
try:
- opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:FwWM:j:P',
+ opts, args = getopt.getopt(sys.argv[1:], 'hvqxsSrf:lu:t:TD:NLR:FwWM:j:PGm:',
['help', 'verbose', 'verbose2', 'verbose3', 'quiet',
- 'exclude', 'single', 'slow', 'randomize', 'fromfile=', 'findleaks',
+ 'exclude', 'single', 'slow', 'slowest', 'randomize', 'fromfile=',
+ 'findleaks',
'use=', 'threshold=', 'trace', 'coverdir=', 'nocoverdir',
'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
- 'multiprocess=', 'slaveargs=', 'forever', 'header', 'pgo'])
+ 'multiprocess=', 'slaveargs=', 'forever', 'header', 'pgo',
+ 'failfast', 'match=', 'testdir=', 'list-tests', 'list-cases',
+ 'coverage', 'matchfile=', 'fail-env-changed', 'cleanup'])
except getopt.error, msg:
usage(2, msg)
@@ -281,6 +384,11 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
random_seed = random.randrange(10000000)
if use_resources is None:
use_resources = []
+ slaveargs = None
+ list_tests = False
+ list_cases_opt = False
+ fail_env_changed = False
+ cleanup_tests = False
for o, a in opts:
if o in ('-h', '--help'):
usage(0)
@@ -290,6 +398,8 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
verbose2 = True
elif o in ('-W', '--verbose3'):
verbose3 = True
+ elif o in ('-G', '--failfast'):
+ failfast = True
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
@@ -297,7 +407,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
exclude = True
elif o in ('-s', '--single'):
single = True
- elif o in ('-S', '--slow'):
+ elif o in ('-S', '--slow', '--slowest'):
print_slow = True
elif o in ('-r', '--randomize'):
randomize = True
@@ -305,6 +415,17 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
random_seed = int(a)
elif o in ('-f', '--fromfile'):
fromfile = a
+ elif o in ('-m', '--match'):
+ if match_tests is None:
+ match_tests = []
+ match_tests.append(a)
+ elif o == '--matchfile':
+ if match_tests is None:
+ match_tests = []
+ filename = os.path.join(support.SAVEDCWD, a)
+ with open(filename) as fp:
+ for line in fp:
+ match_tests.append(line.strip())
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
@@ -334,12 +455,12 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
if len(huntrleaks) == 2 or not huntrleaks[2]:
huntrleaks[2:] = ["reflog.txt"]
elif o in ('-M', '--memlimit'):
- test_support.set_memlimit(a)
+ support.set_memlimit(a)
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
- use_resources[:] = RESOURCE_NAMES
+ use_resources[:] = ALL_RESOURCES
continue
remove = False
if r[0] == '-':
@@ -359,16 +480,19 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
elif o == '--header':
header = True
elif o == '--slaveargs':
- args, kwargs = json.loads(a)
- try:
- result = runtest(*args, **kwargs)
- except BaseException, e:
- result = INTERRUPTED, e.__class__.__name__
- print # Force a newline (just in case)
- print json.dumps(result)
- sys.exit(0)
+ slaveargs = a
elif o in ('-P', '--pgo'):
pgo = True
+ elif o == '--testdir':
+ testdir = a
+ elif o == '--list-tests':
+ list_tests = True
+ elif o == '--list-cases':
+ list_cases_opt = True
+ elif o == '--fail-env-changed':
+ fail_env_changed = True
+ elif o == '--cleanup':
+ cleanup_tests = True
else:
print >>sys.stderr, ("No handler for option {}. Please "
"report this as a bug at http://bugs.python.org.").format(o)
@@ -379,12 +503,69 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
usage(2, "-T and -j don't go together!")
if use_mp and findleaks:
usage(2, "-l and -j don't go together!")
+ if failfast and not (verbose or verbose3):
+ usage("-G/--failfast needs either -v or -W")
+
+ if testdir:
+ testdir = os.path.abspath(testdir)
+
+ # Prepend test directory to sys.path, so runtest() will be able
+ # to locate tests
+ sys.path.insert(0, testdir)
+
+ # Make sure that '' and Lib/test/ are not in sys.path
+ regrtest_dir = os.path.abspath(os.path.dirname(__file__))
+ for path in ('', regrtest_dir):
+ try:
+ sys.path.remove(path)
+ except ValueError:
+ pass
+
+ if huntrleaks:
+ warmup, repetitions, _ = huntrleaks
+ if warmup < 1 or repetitions < 1:
+ msg = ("Invalid values for the --huntrleaks/-R parameters. The "
+ "number of warmups and repetitions must be at least 1 "
+ "each (1:1).")
+ print >>sys.stderr, msg
+ sys.exit(2)
+
+ if cleanup_tests:
+ import glob
+
+ os.chdir(support.SAVEDCWD)
+ path = os.path.join(TEMPDIR, 'test_python_*')
+ print("Cleanup %s directory" % TEMPDIR)
+ for name in glob.glob(path):
+ if os.path.isdir(name):
+ print("Remove directory: %s" % name)
+ support.rmtree(name)
+ else:
+ print("Remove file: %s" % name)
+ support.unlink(name)
+ sys.exit(0)
+
+
+ if slaveargs is not None:
+ args, kwargs = json.loads(slaveargs)
+ if testdir:
+ kwargs['testdir'] = testdir
+ try:
+ result = runtest(*args, **kwargs)
+ except BaseException, e:
+ result = INTERRUPTED, e.__class__.__name__
+ print # Force a newline (just in case)
+ print json.dumps(result)
+ sys.exit(0)
good = []
bad = []
skipped = []
resource_denieds = []
environment_changed = []
+ rerun = []
+ run_no_tests = []
+ first_result = None
interrupted = False
if findleaks:
@@ -412,7 +593,7 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
if fromfile:
tests = []
- fp = open(os.path.join(test_support.SAVEDCWD, fromfile))
+ fp = open(os.path.join(support.SAVEDCWD, fromfile))
for line in fp:
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
@@ -432,17 +613,13 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
nottests.add(arg)
args = []
- # For a partial run, we do not need to clutter the output.
- if verbose or header or not (quiet or single or tests or args):
- if not pgo:
- # Print basic platform information
- print "==", platform.python_implementation(), \
- " ".join(sys.version.split())
- print "== ", platform.platform(aliased=True), \
- "%s-endian" % sys.byteorder
- print "== ", os.getcwd()
- print "Testing with flags:", sys.flags
+ if huntrleaks:
+ # FIXME: bpo-31731: test_io hangs with --huntrleaks
+ print("Warning: bpo-31731: test_io hangs with --huntrleaks: "
+ "exclude the test")
+ nottests.add('test_io')
+ display_header = (verbose or header or not (quiet or single or tests or args)) and (not pgo)
alltests = findtests(testdir, stdtests, nottests)
selected = tests or args or alltests
if single:
@@ -451,25 +628,31 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
next_single_test = alltests[alltests.index(selected[0])+1]
except IndexError:
next_single_test = None
- if randomize:
- random.seed(random_seed)
- print "Using random seed", random_seed
- random.shuffle(selected)
+
+ if list_tests:
+ for name in selected:
+ print(name)
+ sys.exit(0)
+
+ if list_cases_opt:
+ list_cases(testdir, selected, match_tests)
+ sys.exit(0)
+
if trace:
import trace
- tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],
- trace=False, count=True)
+ tracer = trace.Trace(trace=False, count=True)
test_times = []
- test_support.use_resources = use_resources
+ support.use_resources = use_resources
save_modules = set(sys.modules)
def accumulate_result(test, result):
ok, test_time = result
- test_times.append((test_time, test))
+ if ok not in (CHILD_ERROR, INTERRUPTED):
+ test_times.append((test_time, test))
if ok == PASSED:
good.append(test)
- elif ok == FAILED:
+ elif ok in (FAILED, CHILD_ERROR):
bad.append(test)
elif ok == ENV_CHANGED:
environment_changed.append(test)
@@ -478,6 +661,10 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
elif ok == RESOURCE_DENIED:
skipped.append(test)
resource_denieds.append(test)
+ elif ok == TEST_DID_NOT_RUN:
+ run_no_tests.append(test)
+ elif ok != INTERRUPTED:
+ raise ValueError("invalid test result: %r" % ok)
if forever:
def test_forever(tests=list(selected)):
@@ -486,6 +673,8 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
yield test
if bad:
return
+ if fail_env_changed and environment_changed:
+ return
tests = test_forever()
test_count = ''
test_count_width = 3
@@ -494,13 +683,60 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
test_count = '/{}'.format(len(selected))
test_count_width = len(test_count) - 1
+ def display_progress(test_index, test):
+ # "[ 51/405/1] test_tcl"
+ line = "{1:{0}}{2}".format(test_count_width, test_index, test_count)
+ fails = len(bad) + len(environment_changed)
+ if fails and not pgo:
+ line = '{}/{}'.format(line, fails)
+ line = '[{}]'.format(line)
+
+ # add the system load prefix: "load avg: 1.80 "
+ if hasattr(os, 'getloadavg'):
+ load_avg_1min = os.getloadavg()[0]
+ line = "load avg: {:.2f} {}".format(load_avg_1min, line)
+
+ # add the timestamp prefix: "0:01:05 "
+ test_time = time.time() - regrtest_start_time
+ test_time = datetime.timedelta(seconds=int(test_time))
+ line = "%s %s" % (test_time, line)
+
+ # add the test name
+ line = "{} {}".format(line, test)
+
+ print(line)
+ sys.stdout.flush()
+
+ # For a partial run, we do not need to clutter the output.
+ if display_header:
+ # Print basic platform information
+ print "==", platform.python_implementation(), \
+ " ".join(sys.version.split())
+ print "== ", platform.platform(aliased=True), \
+ "%s-endian" % sys.byteorder
+ print "== ", os.getcwd()
+ ncpu = cpu_count()
+ if ncpu:
+ print "== CPU count:", ncpu
+
+ if huntrleaks:
+ warmup, repetitions, _ = huntrleaks
+ if warmup < 3:
+ print("WARNING: Running tests with --huntrleaks/-R and less than "
+ "3 warmup repetitions can give false positives!")
+
+ if randomize:
+ random.seed(random_seed)
+ print "Using random seed", random_seed
+ random.shuffle(selected)
+
if use_mp:
try:
from threading import Thread
except ImportError:
print "Multiprocess option requires thread support"
sys.exit(2)
- from Queue import Queue
+ from Queue import Queue, Empty
from subprocess import Popen, PIPE
debug_output_pat = re.compile(r"\[\d+ refs\]$")
output = Queue()
@@ -509,68 +745,130 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
args_tuple = (
(test, verbose, quiet),
dict(huntrleaks=huntrleaks, use_resources=use_resources,
+ failfast=failfast,
+ match_tests=match_tests,
pgo=pgo)
)
yield (test, args_tuple)
pending = tests_and_args()
- opt_args = test_support.args_from_interpreter_flags()
+ opt_args = support.args_from_interpreter_flags()
base_cmd = [sys.executable] + opt_args + ['-m', 'test.regrtest']
# required to spawn a new process with PGO flag on/off
if pgo:
base_cmd = base_cmd + ['--pgo']
- def work():
- # A worker thread.
- try:
- while True:
- try:
- test, args_tuple = next(pending)
- except StopIteration:
- output.put((None, None, None, None))
- return
- # -E is needed by some tests, e.g. test_import
- popen = Popen(base_cmd + ['--slaveargs', json.dumps(args_tuple)],
- stdout=PIPE, stderr=PIPE,
- universal_newlines=True,
- close_fds=(os.name != 'nt'))
+
+ class MultiprocessThread(Thread):
+ current_test = None
+ start_time = None
+
+ def runtest(self):
+ try:
+ test, args_tuple = next(pending)
+ except StopIteration:
+ output.put((None, None, None, None))
+ return True
+
+ # -E is needed by some tests, e.g. test_import
+ args = base_cmd + ['--slaveargs', json.dumps(args_tuple)]
+ if testdir:
+ args.extend(('--testdir', testdir))
+ try:
+ self.start_time = time.time()
+ self.current_test = test
+ popen = Popen(args,
+ stdout=PIPE, stderr=PIPE,
+ universal_newlines=True,
+ close_fds=(os.name != 'nt'))
stdout, stderr = popen.communicate()
- # Strip last refcount output line if it exists, since it
- # comes from the shutdown of the interpreter in the subcommand.
- stderr = debug_output_pat.sub("", stderr)
+ retcode = popen.wait()
+ finally:
+ self.current_test = None
+
+ # Strip last refcount output line if it exists, since it
+ # comes from the shutdown of the interpreter in the subcommand.
+ stderr = debug_output_pat.sub("", stderr)
+
+ if retcode == 0:
stdout, _, result = stdout.strip().rpartition("\n")
if not result:
output.put((None, None, None, None))
- return
+ return True
+
result = json.loads(result)
- output.put((test, stdout.rstrip(), stderr.rstrip(), result))
- except BaseException:
- output.put((None, None, None, None))
- raise
- workers = [Thread(target=work) for i in range(use_mp)]
+ else:
+ result = (CHILD_ERROR, "Exit code %s" % retcode)
+
+ output.put((test, stdout.rstrip(), stderr.rstrip(), result))
+ return False
+
+ def run(self):
+ try:
+ stop = False
+ while not stop:
+ stop = self.runtest()
+ except BaseException:
+ output.put((None, None, None, None))
+ raise
+
+ workers = [MultiprocessThread() for i in range(use_mp)]
+ print("Run tests in parallel using %s child processes"
+ % len(workers))
for worker in workers:
worker.start()
+
+ def get_running(workers):
+ running = []
+ for worker in workers:
+ current_test = worker.current_test
+ if not current_test:
+ continue
+ dt = time.time() - worker.start_time
+ if dt >= PROGRESS_MIN_TIME:
+ running.append('%s (%s)' % (current_test, format_duration(dt)))
+ return running
+
finished = 0
test_index = 1
+ get_timeout = max(PROGRESS_UPDATE, PROGRESS_MIN_TIME)
try:
while finished < use_mp:
- test, stdout, stderr, result = output.get()
+ try:
+ item = output.get(timeout=get_timeout)
+ except Empty:
+ running = get_running(workers)
+ if running and not pgo:
+ print('running: %s' % ', '.join(running))
+ sys.stdout.flush()
+ continue
+
+ test, stdout, stderr, result = item
if test is None:
finished += 1
continue
+ accumulate_result(test, result)
+ if not quiet:
+ ok, test_time = result
+ text = format_test_result(test, ok)
+ if (ok not in (CHILD_ERROR, INTERRUPTED)
+ and test_time >= PROGRESS_MIN_TIME
+ and not pgo):
+ text += ' (%s)' % format_duration(test_time)
+ running = get_running(workers)
+ if running and not pgo:
+ text += ' -- running: %s' % ', '.join(running)
+ display_progress(test_index, text)
+
if stdout:
- print stdout
+ print(stdout)
+ sys.stdout.flush()
if stderr and not pgo:
print >>sys.stderr, stderr
- sys.stdout.flush()
sys.stderr.flush()
+
if result[0] == INTERRUPTED:
assert result[1] == 'KeyboardInterrupt'
raise KeyboardInterrupt # What else?
- accumulate_result(test, result)
- if not quiet:
- fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
- print(fmt.format(
- test_count_width, test_index, test_count,
- len(bad), test))
+
test_index += 1
except KeyboardInterrupt:
interrupted = True
@@ -578,30 +876,55 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
for worker in workers:
worker.join()
else:
+ print("Run tests sequentially")
+
+ previous_test = None
for test_index, test in enumerate(tests, 1):
if not quiet:
- fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
- print(fmt.format(
- test_count_width, test_index, test_count, len(bad), test))
- sys.stdout.flush()
+ text = test
+ if previous_test:
+ text = '%s -- %s' % (text, previous_test)
+ display_progress(test_index, text)
+
+ def local_runtest():
+ result = runtest(test, verbose, quiet, huntrleaks, None, pgo,
+ failfast=failfast,
+ match_tests=match_tests,
+ testdir=testdir)
+ accumulate_result(test, result)
+ return result
+
+ start_time = time.time()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
- tracer.runctx('runtest(test, verbose, quiet)',
- globals=globals(), locals=vars())
+ ns = dict(locals())
+ tracer.runctx('result = local_runtest()',
+ globals=globals(), locals=ns)
+ result = ns['result']
else:
try:
- result = runtest(test, verbose, quiet, huntrleaks, None, pgo)
- accumulate_result(test, result)
+ result = local_runtest()
if verbose3 and result[0] == FAILED:
if not pgo:
print "Re-running test %r in verbose mode" % test
- runtest(test, True, quiet, huntrleaks, None, pgo)
+ runtest(test, True, quiet, huntrleaks, None, pgo,
+ testdir=testdir)
except KeyboardInterrupt:
interrupted = True
break
except:
raise
+
+ test_time = time.time() - start_time
+ previous_test = format_test_result(test, result[0])
+ if test_time >= PROGRESS_MIN_TIME:
+ previous_test = "%s in %s" % (previous_test,
+ format_duration(test_time))
+ elif result[0] == PASSED:
+ # be quiet: say nothing if the test passed shortly
+ previous_test = None
+
if findleaks:
gc.collect()
if gc.garbage:
@@ -611,60 +934,112 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
- # Unload the newly imported modules (best effort finalization)
- for module in sys.modules.keys():
- if module not in save_modules and module.startswith("test."):
- test_support.unload(module)
- if interrupted and not pgo:
+ unload_test_modules(save_modules)
+
+
+ def get_tests_result():
+ result = []
+ if bad:
+ result.append("FAILURE")
+ elif fail_env_changed and environment_changed:
+ result.append("ENV CHANGED")
+ elif not any((good, bad, skipped, interrupted, environment_changed)):
+ result.append("NO TEST RUN")
+
+ if interrupted:
+ result.append("INTERRUPTED")
+
+ if not result:
+ result.append("SUCCESS")
+
+ result = ', '.join(result)
+ if first_result:
+ result = '%s then %s' % (first_result, result)
+ return result
+
+
+ def display_result():
# print a newline after ^C
print
- print "Test suite interrupted by signal SIGINT."
- omitted = set(selected) - set(good) - set(bad) - set(skipped)
- print count(len(omitted), "test"), "omitted:"
- printlist(omitted)
- if good and not quiet and not pgo:
- if not bad and not skipped and not interrupted and len(good) > 1:
- print "All",
- print count(len(good), "test"), "OK."
- if print_slow:
- test_times.sort(reverse=True)
- print "10 slowest tests:"
- for time, test in test_times[:10]:
- print "%s: %.1fs" % (test, time)
- if bad and not pgo:
- print count(len(bad), "test"), "failed:"
- printlist(bad)
- if environment_changed and not pgo:
- print "{} altered the execution environment:".format(
- count(len(environment_changed), "test"))
- printlist(environment_changed)
- if skipped and not quiet and not pgo:
- print count(len(skipped), "test"), "skipped:"
- printlist(skipped)
-
- e = _ExpectedSkips()
- plat = sys.platform
- if e.isvalid():
- surprise = set(skipped) - e.getexpected() - set(resource_denieds)
- if surprise:
- print count(len(surprise), "skip"), \
- "unexpected on", plat + ":"
- printlist(surprise)
+ print("== Tests result: %s ==" % get_tests_result())
+
+ if interrupted and not pgo:
+ print
+ print "Test suite interrupted by signal SIGINT."
+ omitted = set(selected) - set(good) - set(bad) - set(skipped)
+ print count(len(omitted), "test"), "omitted:"
+ printlist(omitted)
+
+ if good and not quiet and not pgo:
+ print
+ if not bad and not skipped and not interrupted and len(good) > 1:
+ print "All",
+ print count(len(good), "test"), "OK."
+
+ if print_slow:
+ test_times.sort(reverse=True)
+ print
+ print "10 slowest tests:"
+ for test_time, test in test_times[:10]:
+ print("- %s: %.1fs" % (test, test_time))
+
+ if bad and not pgo:
+ print
+ print count(len(bad), "test"), "failed:"
+ printlist(bad)
+
+ if environment_changed and not pgo:
+ print
+ print "{} altered the execution environment:".format(
+ count(len(environment_changed), "test"))
+ printlist(environment_changed)
+
+ if skipped and not quiet and not pgo:
+ print
+ print count(len(skipped), "test"), "skipped:"
+ printlist(skipped)
+
+ e = _ExpectedSkips()
+ plat = sys.platform
+ if e.isvalid():
+ surprise = set(skipped) - e.getexpected() - set(resource_denieds)
+ if surprise:
+ print count(len(surprise), "skip"), \
+ "unexpected on", plat + ":"
+ printlist(surprise)
+ else:
+ print "Those skips are all expected on", plat + "."
else:
- print "Those skips are all expected on", plat + "."
- else:
- print "Ask someone to teach regrtest.py about which tests are"
- print "expected to get skipped on", plat + "."
+ print "Ask someone to teach regrtest.py about which tests are"
+ print "expected to get skipped on", plat + "."
+
+ if rerun:
+ print("")
+ print("%s:" % count(len(rerun), "re-run test"))
+ printlist(rerun)
+
+ if run_no_tests:
+ print("")
+ print("%s run no tests:" % count(len(run_no_tests), "test"))
+ printlist(run_no_tests)
+
+
+ display_result()
if verbose2 and bad:
+ first_result = get_tests_result()
+
+ print
print "Re-running failed tests in verbose mode"
- for test in bad[:]:
+ rerun = bad[:]
+ for test in rerun:
print "Re-running test %r in verbose mode" % test
sys.stdout.flush()
try:
- test_support.verbose = True
- ok = runtest(test, True, quiet, huntrleaks, None, pgo)
+ support.verbose = True
+ ok = runtest(test, True, quiet, huntrleaks, None, pgo,
+ match_tests=match_tests, testdir=testdir)
except KeyboardInterrupt:
# print a newline separate from the ^C
print
@@ -677,6 +1052,8 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
print count(len(bad), "test"), "failed again:"
printlist(bad)
+ display_result()
+
if single:
if next_single_test:
with open(filename, 'w') as fp:
@@ -691,7 +1068,19 @@ def main(tests=None, testdir=None, verbose=0, quiet=False,
if runleaks:
os.system("leaks %d" % os.getpid())
- sys.exit(len(bad) > 0 or interrupted)
+ print
+ duration = time.time() - regrtest_start_time
+ print("Total duration: %s" % format_duration(duration))
+
+ print("Tests result: %s" % get_tests_result())
+
+ if bad:
+ sys.exit(2)
+ if interrupted:
+ sys.exit(130)
+ if fail_env_changed and environment_changed:
+ sys.exit(3)
+ sys.exit(0)
STDTESTS = [
@@ -725,7 +1114,8 @@ def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
return stdtests + sorted(tests)
def runtest(test, verbose, quiet,
- huntrleaks=False, use_resources=None, pgo=False):
+ huntrleaks=False, use_resources=None, pgo=False,
+ failfast=False, match_tests=None, testdir=None):
"""Run a single test.
test -- the name of the test
@@ -738,19 +1128,28 @@ def runtest(test, verbose, quiet,
for Profile Guided Optimization build
Returns one of the test result constants:
+ CHILD_ERROR Child process crashed
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
+ EMPTY_TEST_SUITE test ran no subtests.
"""
- test_support.verbose = verbose # Tell tests to be moderately quiet
+ support.verbose = verbose # Tell tests to be moderately quiet
if use_resources is not None:
- test_support.use_resources = use_resources
+ support.use_resources = use_resources
try:
- return runtest_inner(test, verbose, quiet, huntrleaks, pgo)
+ support.set_match_tests(match_tests)
+ # reset the environment_altered flag to detect if a test altered
+ # the environment
+ support.environment_altered = False
+ if failfast:
+ support.failfast = True
+
+ return runtest_inner(test, verbose, quiet, huntrleaks, pgo, testdir)
finally:
cleanup_test_droppings(test, verbose)
@@ -849,31 +1248,31 @@ class saved_test_environment:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
- def get_test_support_TESTFN(self):
- if os.path.isfile(test_support.TESTFN):
+ def get_support_TESTFN(self):
+ if os.path.isfile(support.TESTFN):
result = 'f'
- elif os.path.isdir(test_support.TESTFN):
+ elif os.path.isdir(support.TESTFN):
result = 'd'
else:
result = None
return result
- def restore_test_support_TESTFN(self, saved_value):
+ def restore_support_TESTFN(self, saved_value):
if saved_value is None:
- if os.path.isfile(test_support.TESTFN):
- os.unlink(test_support.TESTFN)
- elif os.path.isdir(test_support.TESTFN):
- shutil.rmtree(test_support.TESTFN)
+ if os.path.isfile(support.TESTFN):
+ os.unlink(support.TESTFN)
+ elif os.path.isdir(support.TESTFN):
+ shutil.rmtree(support.TESTFN)
def get_files(self):
return sorted(fn + ('/' if os.path.isdir(fn) else '')
for fn in os.listdir(os.curdir))
def restore_files(self, saved_value):
- fn = test_support.TESTFN
+ fn = support.TESTFN
if fn not in saved_value and (fn + '/') not in saved_value:
if os.path.isfile(fn):
- test_support.unlink(fn)
+ support.unlink(fn)
elif os.path.isdir(fn):
- test_support.rmtree(fn)
+ support.rmtree(fn)
def resource_info(self):
for name in self.resources:
@@ -890,6 +1289,10 @@ class saved_test_environment:
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
+
+ # Read support.environment_altered, set by support helper functions
+ self.changed |= support.environment_altered
+
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
@@ -901,10 +1304,9 @@ class saved_test_environment:
print >>sys.stderr, (
"Warning -- {} was modified by {}".format(
name, self.testname))
- if self.verbose > 1 and not self.pgo:
- print >>sys.stderr, (
- " Before: {}\n After: {} ".format(
- original, current))
+ print >>sys.stderr, (
+ " Before: {}\n After: {} ".format(
+ original, current))
# XXX (ncoghlan): for most resources (e.g. sys.path) identity
# matters at least as much as value. For others (e.g. cwd),
# identity is irrelevant. Should we add a mechanism to check
@@ -912,8 +1314,11 @@ class saved_test_environment:
return False
-def runtest_inner(test, verbose, quiet, huntrleaks=False, pgo=False):
- test_support.unload(test)
+def post_test_cleanup():
+ support.reap_children()
+
+def runtest_inner(test, verbose, quiet, huntrleaks=False, pgo=False, testdir=None):
+ support.unload(test)
if verbose:
capture_stdout = None
else:
@@ -926,29 +1331,30 @@ def runtest_inner(test, verbose, quiet, huntrleaks=False, pgo=False):
try:
if capture_stdout:
sys.stdout = capture_stdout
- if test.startswith('test.'):
- abstest = test
- else:
- # Always import it from the test package
- abstest = 'test.' + test
+ abstest = get_abs_module(testdir, test)
clear_caches()
with saved_test_environment(test, verbose, quiet, pgo) as environment:
start_time = time.time()
the_package = __import__(abstest, globals(), locals(), [])
- the_module = getattr(the_package, test)
+ if abstest.startswith('test.'):
+ the_module = getattr(the_package, test)
+ else:
+ the_module = the_package
# Old tests run to completion simply as a side-effect of
# being imported. For tests based on unittest or doctest,
# explicitly invoke their test_main() function (if it exists).
indirect_test = getattr(the_module, "test_main", None)
- if indirect_test is not None:
- indirect_test()
if huntrleaks:
refleak = dash_R(the_module, test, indirect_test,
- huntrleaks)
+ huntrleaks, quiet)
+ else:
+ if indirect_test is not None:
+ indirect_test()
test_time = time.time() - start_time
+ post_test_cleanup()
finally:
sys.stdout = save_stdout
- except test_support.ResourceDenied, msg:
+ except support.ResourceDenied, msg:
if not quiet and not pgo:
print test, "skipped --", msg
sys.stdout.flush()
@@ -960,11 +1366,13 @@ def runtest_inner(test, verbose, quiet, huntrleaks=False, pgo=False):
return SKIPPED, test_time
except KeyboardInterrupt:
raise
- except test_support.TestFailed, msg:
+ except support.TestFailed, msg:
if not pgo:
print >>sys.stderr, "test", test, "failed --", msg
sys.stderr.flush()
return FAILED, test_time
+ except support.TestDidNotRun:
+ return TEST_DID_NOT_RUN, test_time
except:
type, value = sys.exc_info()[:2]
if not pgo:
@@ -1005,7 +1413,7 @@ def cleanup_test_droppings(testname, verbose):
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
- for name in (test_support.TESTFN,
+ for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
@@ -1031,7 +1439,7 @@ def cleanup_test_droppings(testname, verbose):
print >> sys.stderr, ("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg))
-def dash_R(the_module, test, indirect_test, huntrleaks):
+def dash_R(the_module, test, indirect_test, huntrleaks, quiet):
"""Run a test multiple times, looking for reference leaks.
Returns:
@@ -1044,6 +1452,10 @@ def dash_R(the_module, test, indirect_test, huntrleaks):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
+ # Avoid false positives due to various caches
+ # filling slowly with random data:
+ warm_caches()
+
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copy_reg.dispatch_table.copy()
@@ -1063,6 +1475,14 @@ def dash_R(the_module, test, indirect_test, huntrleaks):
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
+ # bpo-31217: Integer pool to get a single integer object for the same
+ # value. The pool is used to prevent false alarm when checking for memory
+ # block leaks. Fill the pool with values in -1000..1000 which are the most
+ # common (reference, memory block, file descriptor) differences.
+ int_pool = {value: value for value in range(-1000, 1000)}
+ def get_pooled_int(value):
+ return int_pool.setdefault(value, value)
+
if indirect_test:
def run_the_test():
indirect_test()
@@ -1072,28 +1492,74 @@ def dash_R(the_module, test, indirect_test, huntrleaks):
deltas = []
nwarmup, ntracked, fname = huntrleaks
- fname = os.path.join(test_support.SAVEDCWD, fname)
+ fname = os.path.join(support.SAVEDCWD, fname)
+
+ # Pre-allocate to ensure that the loop doesn't allocate anything new
repcount = nwarmup + ntracked
- print >> sys.stderr, "beginning", repcount, "repetitions"
- print >> sys.stderr, ("1234567890"*(repcount//10 + 1))[:repcount]
+ rc_deltas = [0] * repcount
+ fd_deltas = [0] * repcount
+ rep_range = list(range(repcount))
+
+ if not quiet:
+ print >> sys.stderr, "beginning", repcount, "repetitions"
+ print >> sys.stderr, ("1234567890"*(repcount//10 + 1))[:repcount]
+
dash_R_cleanup(fs, ps, pic, zdc, abcs)
- for i in range(repcount):
- rc_before = sys.gettotalrefcount()
+
+ # initialize variables to make pyflakes quiet
+ rc_before = fd_before = 0
+
+ for i in rep_range:
run_the_test()
- sys.stderr.write('.')
+
+ if not quiet:
+ sys.stderr.write('.')
+
dash_R_cleanup(fs, ps, pic, zdc, abcs)
+
rc_after = sys.gettotalrefcount()
- if i >= nwarmup:
- deltas.append(rc_after - rc_before)
- print >> sys.stderr
- if any(deltas):
- msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
- print >> sys.stderr, msg
- with open(fname, "a") as refrep:
- print >> refrep, msg
- refrep.flush()
- return True
- return False
+ fd_after = support.fd_count()
+ rc_deltas[i] = get_pooled_int(rc_after - rc_before)
+ fd_deltas[i] = get_pooled_int(fd_after - fd_before)
+ rc_before = rc_after
+ fd_before = fd_after
+
+ if not quiet:
+ print >> sys.stderr
+
+ # These checkers return False on success, True on failure
+ def check_rc_deltas(deltas):
+ # Checker for reference counters and memomry blocks.
+ #
+ # bpo-30776: Try to ignore false positives:
+ #
+ # [3, 0, 0]
+ # [0, 1, 0]
+ # [8, -8, 1]
+ #
+ # Expected leaks:
+ #
+ # [5, 5, 6]
+ # [10, 1, 1]
+ return all(delta >= 1 for delta in deltas)
+
+ def check_fd_deltas(deltas):
+ return any(deltas)
+
+ failed = False
+ for deltas, item_name, checker in [
+ (rc_deltas, 'references', check_rc_deltas),
+ (fd_deltas, 'file descriptors', check_fd_deltas)
+ ]:
+ deltas = deltas[nwarmup:]
+ if checker(deltas):
+ msg = '%s leaked %s %s, sum=%s' % (test, deltas, item_name, sum(deltas))
+ print >> sys.stderr, msg
+ with open(fname, "a") as refrep:
+ print >> refrep, msg
+ refrep.flush()
+ failed = True
+ return failed
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
import gc, copy_reg
@@ -1220,7 +1686,19 @@ def clear_caches():
ctypes._reset_cache()
# Collect cyclic trash.
- gc.collect()
+ support.gc_collect()
+
+def warm_caches():
+ """Create explicitly internal singletons which are created on demand
+ to prevent false positive when hunting reference leaks."""
+ # char cache
+ for i in range(256):
+ chr(i)
+ # unicode cache
+ for i in range(256):
+ unichr(i)
+ # int cache
+ list(range(-5, 257))
def findtestdir(path=None):
return path or os.path.dirname(__file__) or os.curdir
@@ -1239,7 +1717,7 @@ def count(n, word):
else:
return "%d %ss" % (n, word)
-def printlist(x, width=70, indent=4):
+def printlist(x, width=70, indent=4, file=None):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
@@ -1250,8 +1728,44 @@ def printlist(x, width=70, indent=4):
from textwrap import fill
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
- print fill(' '.join(str(elt) for elt in sorted(x)), width,
- initial_indent=blanks, subsequent_indent=blanks)
+ print >>file, fill(' '.join(str(elt) for elt in sorted(x)), width,
+ initial_indent=blanks, subsequent_indent=blanks)
+
+def get_abs_module(testdir, test):
+ if test.startswith('test.') or testdir:
+ return test
+ else:
+ # Always import it from the test package
+ return 'test.' + test
+
+def _list_cases(suite):
+ for test in suite:
+ if isinstance(test, unittest.TestSuite):
+ _list_cases(test)
+ elif isinstance(test, unittest.TestCase):
+ if support.match_test(test):
+ print(test.id())
+
+def list_cases(testdir, selected, match_tests):
+ support.verbose = False
+ support.set_match_tests(match_tests)
+
+ save_modules = set(sys.modules)
+ skipped = []
+ for test in selected:
+ abstest = get_abs_module(testdir, test)
+ try:
+ suite = unittest.defaultTestLoader.loadTestsFromName(abstest)
+ _list_cases(suite)
+ except unittest.SkipTest:
+ skipped.append(test)
+
+ unload_test_modules(save_modules)
+
+ if skipped:
+ print >>sys.stderr
+ print >>sys.stderr, count(len(skipped), "test"), "skipped:"
+ printlist(skipped, file=sys.stderr)
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
@@ -1531,26 +2045,7 @@ _expectations = {
test_zipimport
test_zlib
""",
- 'openbsd4':
- """
- test_ascii_formatd
- test_bsddb
- test_bsddb3
- test_ctypes
- test_dl
- test_epoll
- test_gdbm
- test_locale
- test_normalization
- test_ossaudiodev
- test_pep277
- test_tcl
- test_tk
- test_ttk_guionly
- test_ttk_textonly
- test_multiprocessing
- """,
- 'openbsd5':
+ 'openbsd3':
"""
test_ascii_formatd
test_bsddb
@@ -1664,9 +2159,9 @@ class _ExpectedSkips:
assert self.isvalid()
return self.expected
-if __name__ == '__main__':
- # Simplification for findtestdir().
- assert __file__ == os.path.abspath(sys.argv[0])
+def main_in_temp_cwd():
+ """Run main() in a temporary working directory."""
+ global TEMPDIR
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. It eases the cleanup of leftover
@@ -1687,6 +2182,19 @@ if __name__ == '__main__':
# Run the tests in a context manager that temporary changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
- # available from test_support.SAVEDCWD.
- with test_support.temp_cwd(TESTCWD, quiet=True):
+ # available from support.SAVEDCWD.
+ with support.temp_cwd(TESTCWD, quiet=True):
main()
+
+if __name__ == '__main__':
+ # findtestdir() gets the dirname out of __file__, so we have to make it
+ # absolute before changing the working directory.
+ # For example __file__ may be relative when running trace or profile.
+ # See issue #9323.
+ global __file__
+ __file__ = os.path.abspath(__file__)
+
+ # sanity check
+ assert __file__ == os.path.abspath(sys.argv[0])
+
+ main_in_temp_cwd()
diff --git a/lib-python/2.7/test/revocation.crl b/lib-python/2.7/test/revocation.crl
index 6d89b08ebe..c05461ca7f 100644
--- a/lib-python/2.7/test/revocation.crl
+++ b/lib-python/2.7/test/revocation.crl
@@ -1,11 +1,14 @@
-----BEGIN X509 CRL-----
-MIIBpjCBjwIBATANBgkqhkiG9w0BAQUFADBNMQswCQYDVQQGEwJYWTEmMCQGA1UE
+MIICJjCBjwIBATANBgkqhkiG9w0BAQsFADBNMQswCQYDVQQGEwJYWTEmMCQGA1UE
CgwdUHl0aG9uIFNvZnR3YXJlIEZvdW5kYXRpb24gQ0ExFjAUBgNVBAMMDW91ci1j
-YS1zZXJ2ZXIXDTEzMTEyMTE3MDg0N1oXDTIzMDkzMDE3MDg0N1qgDjAMMAoGA1Ud
-FAQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQCNJXC2mVKauEeN3LlQ3ZtM5gkH3ExH
-+i4bmJjtJn497WwvvoIeUdrmVXgJQR93RtV37hZwN0SXMLlNmUZPH4rHhihayw4m
-unCzVj/OhCCY7/TPjKuJ1O/0XhaLBpBVjQN7R/1ujoRKbSia/CD3vcn7Fqxzw7LK
-fSRCKRGTj1CZiuxrphtFchwALXSiFDy9mr2ZKhImcyq1PydfgEzU78APpOkMQsIC
-UNJ/cf3c9emzf+dUtcMEcejQ3mynBo4eIGg1EW42bz4q4hSjzQlKcBV0muw5qXhc
-HOxH2iTFhQ7SrvVuK/dM14rYM4B5mSX3nRC1kNmXpS9j3wJDhuwmjHed
+YS1zZXJ2ZXIXDTE4MDgyOTE0MjMxNloXDTI4MDcwNzE0MjMxNlqgDjAMMAoGA1Ud
+FAQDAgEAMA0GCSqGSIb3DQEBCwUAA4IBgQCPhrtGSbuvxPAI3YWQFDB4iOWdBnVk
+ugW1lsifmCsE86FfID0EwUut1SRHlksltMtcoULMEIdu8yMLWci++4ve22EEuMKT
+HUc3T/wBIuQUhA7U4deFG8CZPAxRpNoK470y7dkD4OVf0Gxa6WYDl9z8mXKmWCB9
+hvzqVfLWNSLTAVPsHtkD5PXdi5yRkQr6wYD7poWaIvkpsn7EKCY6Tw5V3rsbRuZq
+AGVCq5TH3mctcmwLloCJ4Xr/1q0DsRrYxeeLYxE+UpvvCbVBKgtjBK7zINS7AbcJ
+CYCYKUwGWv1fYKJ+KQQHf75mT3jQ9lWuzOj/YWK4k1EBnYmVGuKKt73lLFxC6h3y
+MUnaBZc1KZSyJj0IxfHg/o6qx8NgKOl9XRIQ5g5B30cwpPOskGhEhodbTTY3bPtm
+RQ36JvQZngzmkhyhr+MDEV5yUTOShfUiclzQOx26CmLmLHWxOZgXtFZob/oKrvbm
+Gen/+7K7YTw6hfY52U7J2FuQRGOyzBXfBYQ=
-----END X509 CRL-----
diff --git a/lib-python/2.7/test/script_helper.py b/lib-python/2.7/test/script_helper.py
index 6be47bd4ff..99ec114eef 100644
--- a/lib-python/2.7/test/script_helper.py
+++ b/lib-python/2.7/test/script_helper.py
@@ -1,170 +1 @@
-# Common utility functions used by various script execution tests
-# e.g. test_cmd_line, test_cmd_line_script and test_runpy
-
-import sys
-import os
-import re
-import os.path
-import tempfile
-import subprocess
-import py_compile
-import contextlib
-import shutil
-try:
- import zipfile
-except ImportError:
- # If Python is build without Unicode support, importing _io will
- # fail, which, in turn, means that zipfile cannot be imported
- # Most of this module can then still be used.
- pass
-
-from test.test_support import strip_python_stderr
-
-# Executing the interpreter in a subprocess
-def _assert_python(expected_success, *args, **env_vars):
- cmd_line = [sys.executable]
- if not env_vars:
- cmd_line.append('-E')
- cmd_line.extend(args)
- # Need to preserve the original environment, for in-place testing of
- # shared library builds.
- env = os.environ.copy()
- env.update(env_vars)
- p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- env=env)
- try:
- out, err = p.communicate()
- finally:
- subprocess._cleanup()
- p.stdout.close()
- p.stderr.close()
- rc = p.returncode
- err = strip_python_stderr(err)
- if (rc and expected_success) or (not rc and not expected_success):
- raise AssertionError(
- "Process return code is %d, "
- "stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
- return rc, out, err
-
-def assert_python_ok(*args, **env_vars):
- """
- Assert that running the interpreter with `args` and optional environment
- variables `env_vars` is ok and return a (return code, stdout, stderr) tuple.
- """
- return _assert_python(True, *args, **env_vars)
-
-def assert_python_failure(*args, **env_vars):
- """
- Assert that running the interpreter with `args` and optional environment
- variables `env_vars` fails and return a (return code, stdout, stderr) tuple.
- """
- return _assert_python(False, *args, **env_vars)
-
-def python_exit_code(*args):
- cmd_line = [sys.executable, '-E']
- cmd_line.extend(args)
- with open(os.devnull, 'w') as devnull:
- return subprocess.call(cmd_line, stdout=devnull,
- stderr=subprocess.STDOUT)
-
-def spawn_python(*args, **kwargs):
- cmd_line = [sys.executable, '-E']
- cmd_line.extend(args)
- return subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
- **kwargs)
-
-def kill_python(p):
- p.stdin.close()
- data = p.stdout.read()
- p.stdout.close()
- # try to cleanup the child so we don't appear to leak when running
- # with regrtest -R.
- p.wait()
- subprocess._cleanup()
- return data
-
-def run_python(*args, **kwargs):
- if __debug__:
- p = spawn_python(*args, **kwargs)
- else:
- p = spawn_python('-O', *args, **kwargs)
- stdout_data = kill_python(p)
- return p.wait(), stdout_data
-
-# Script creation utilities
-@contextlib.contextmanager
-def temp_dir():
- dirname = tempfile.mkdtemp()
- dirname = os.path.realpath(dirname)
- try:
- yield dirname
- finally:
- shutil.rmtree(dirname)
-
-def make_script(script_dir, script_basename, source):
- script_filename = script_basename+os.extsep+'py'
- script_name = os.path.join(script_dir, script_filename)
- script_file = open(script_name, 'w')
- script_file.write(source)
- script_file.close()
- return script_name
-
-def compile_script(script_name):
- py_compile.compile(script_name, doraise=True)
- if __debug__:
- compiled_name = script_name + 'c'
- else:
- compiled_name = script_name + 'o'
- return compiled_name
-
-def make_zip_script(zip_dir, zip_basename, script_name, name_in_zip=None):
- zip_filename = zip_basename+os.extsep+'zip'
- zip_name = os.path.join(zip_dir, zip_filename)
- zip_file = zipfile.ZipFile(zip_name, 'w')
- if name_in_zip is None:
- name_in_zip = os.path.basename(script_name)
- zip_file.write(script_name, name_in_zip)
- zip_file.close()
- #if test.test_support.verbose:
- # zip_file = zipfile.ZipFile(zip_name, 'r')
- # print 'Contents of %r:' % zip_name
- # zip_file.printdir()
- # zip_file.close()
- return zip_name, os.path.join(zip_name, name_in_zip)
-
-def make_pkg(pkg_dir, init_source=''):
- os.mkdir(pkg_dir)
- make_script(pkg_dir, '__init__', init_source)
-
-def make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
- source, depth=1, compiled=False):
- unlink = []
- init_name = make_script(zip_dir, '__init__', '')
- unlink.append(init_name)
- init_basename = os.path.basename(init_name)
- script_name = make_script(zip_dir, script_basename, source)
- unlink.append(script_name)
- if compiled:
- init_name = compile_script(init_name)
- script_name = compile_script(script_name)
- unlink.extend((init_name, script_name))
- pkg_names = [os.sep.join([pkg_name]*i) for i in range(1, depth+1)]
- script_name_in_zip = os.path.join(pkg_names[-1], os.path.basename(script_name))
- zip_filename = zip_basename+os.extsep+'zip'
- zip_name = os.path.join(zip_dir, zip_filename)
- zip_file = zipfile.ZipFile(zip_name, 'w')
- for name in pkg_names:
- init_name_in_zip = os.path.join(name, init_basename)
- zip_file.write(init_name, init_name_in_zip)
- zip_file.write(script_name, script_name_in_zip)
- zip_file.close()
- for name in unlink:
- os.unlink(name)
- #if test.test_support.verbose:
- # zip_file = zipfile.ZipFile(zip_name, 'r')
- # print 'Contents of %r:' % zip_name
- # zip_file.printdir()
- # zip_file.close()
- return zip_name, os.path.join(zip_name, script_name_in_zip)
+from test.support.script_helper import *
diff --git a/lib-python/2.7/test/sha256.pem b/lib-python/2.7/test/sha256.pem
deleted file mode 100644
index d3db4b85c0..0000000000
--- a/lib-python/2.7/test/sha256.pem
+++ /dev/null
@@ -1,128 +0,0 @@
-# Certificate chain for https://sha256.tbs-internet.com
- 0 s:/C=FR/postalCode=14000/ST=Calvados/L=CAEN/street=22 rue de Bretagne/O=TBS INTERNET/OU=0002 440443810/OU=sha-256 production/CN=sha256.tbs-internet.com
- i:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC
------BEGIN CERTIFICATE-----
-MIIGXDCCBUSgAwIBAgIRAKpVmHgg9nfCodAVwcP4siwwDQYJKoZIhvcNAQELBQAw
-gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl
-bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u
-ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv
-cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg
-Q0EgU0dDMB4XDTEyMDEwNDAwMDAwMFoXDTE0MDIxNzIzNTk1OVowgcsxCzAJBgNV
-BAYTAkZSMQ4wDAYDVQQREwUxNDAwMDERMA8GA1UECBMIQ2FsdmFkb3MxDTALBgNV
-BAcTBENBRU4xGzAZBgNVBAkTEjIyIHJ1ZSBkZSBCcmV0YWduZTEVMBMGA1UEChMM
-VEJTIElOVEVSTkVUMRcwFQYDVQQLEw4wMDAyIDQ0MDQ0MzgxMDEbMBkGA1UECxMS
-c2hhLTI1NiBwcm9kdWN0aW9uMSAwHgYDVQQDExdzaGEyNTYudGJzLWludGVybmV0
-LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKQIX/zdJcyxty0m
-PM1XQSoSSifueS3AVcgqMsaIKS/u+rYzsv4hQ/qA6vLn5m5/ewUcZDj7zdi6rBVf
-PaVNXJ6YinLX0tkaW8TEjeVuZG5yksGZlhCt1CJ1Ho9XLiLaP4uJ7MCoNUntpJ+E
-LfrOdgsIj91kPmwjDJeztVcQCvKzhjVJA/KxdInc0JvOATn7rpaSmQI5bvIjufgo
-qVsTPwVFzuUYULXBk7KxRT7MiEqnd5HvviNh0285QC478zl3v0I0Fb5El4yD3p49
-IthcRnxzMKc0UhU5ogi0SbONyBfm/mzONVfSxpM+MlyvZmJqrbuuLoEDzJD+t8PU
-xSuzgbcCAwEAAaOCAj4wggI6MB8GA1UdIwQYMBaAFAdEdoWTKLx/bXjSCuv6TEvf
-2YIfMB0GA1UdDgQWBBT/qTGYdaj+f61c2IRFL/B1eEsM8DAOBgNVHQ8BAf8EBAMC
-BaAwDAYDVR0TAQH/BAIwADA0BgNVHSUELTArBggrBgEFBQcDAQYIKwYBBQUHAwIG
-CisGAQQBgjcKAwMGCWCGSAGG+EIEATBLBgNVHSAERDBCMEAGCisGAQQB5TcCBAEw
-MjAwBggrBgEFBQcCARYkaHR0cHM6Ly93d3cudGJzLWludGVybmV0LmNvbS9DQS9D
-UFM0MG0GA1UdHwRmMGQwMqAwoC6GLGh0dHA6Ly9jcmwudGJzLWludGVybmV0LmNv
-bS9UQlNYNTA5Q0FTR0MuY3JsMC6gLKAqhihodHRwOi8vY3JsLnRicy14NTA5LmNv
-bS9UQlNYNTA5Q0FTR0MuY3JsMIGmBggrBgEFBQcBAQSBmTCBljA4BggrBgEFBQcw
-AoYsaHR0cDovL2NydC50YnMtaW50ZXJuZXQuY29tL1RCU1g1MDlDQVNHQy5jcnQw
-NAYIKwYBBQUHMAKGKGh0dHA6Ly9jcnQudGJzLXg1MDkuY29tL1RCU1g1MDlDQVNH
-Qy5jcnQwJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLnRicy14NTA5LmNvbTA/BgNV
-HREEODA2ghdzaGEyNTYudGJzLWludGVybmV0LmNvbYIbd3d3LnNoYTI1Ni50YnMt
-aW50ZXJuZXQuY29tMA0GCSqGSIb3DQEBCwUAA4IBAQA0pOuL8QvAa5yksTbGShzX
-ABApagunUGoEydv4YJT1MXy9tTp7DrWaozZSlsqBxrYAXP1d9r2fuKbEniYHxaQ0
-UYaf1VSIlDo1yuC8wE7wxbHDIpQ/E5KAyxiaJ8obtDhFstWAPAH+UoGXq0kj2teN
-21sFQ5dXgA95nldvVFsFhrRUNB6xXAcaj0VZFhttI0ZfQZmQwEI/P+N9Jr40OGun
-aa+Dn0TMeUH4U20YntfLbu2nDcJcYfyurm+8/0Tr4HznLnedXu9pCPYj0TaddrgT
-XO0oFiyy7qGaY6+qKh71yD64Y3ycCJ/HR9Wm39mjZYc9ezYwT4noP6r7Lk8YO7/q
------END CERTIFICATE-----
- 1 s:/C=FR/ST=Calvados/L=Caen/O=TBS INTERNET/OU=Terms and Conditions: http://www.tbs-internet.com/CA/repository/OU=TBS INTERNET CA/CN=TBS X509 CA SGC
- i:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root
------BEGIN CERTIFICATE-----
-MIIFVjCCBD6gAwIBAgIQXpDZ0ETJMV02WTx3GTnhhTANBgkqhkiG9w0BAQUFADBv
-MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFk
-ZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBF
-eHRlcm5hbCBDQSBSb290MB4XDTA1MTIwMTAwMDAwMFoXDTE5MDYyNDE5MDYzMFow
-gcQxCzAJBgNVBAYTAkZSMREwDwYDVQQIEwhDYWx2YWRvczENMAsGA1UEBxMEQ2Fl
-bjEVMBMGA1UEChMMVEJTIElOVEVSTkVUMUgwRgYDVQQLEz9UZXJtcyBhbmQgQ29u
-ZGl0aW9uczogaHR0cDovL3d3dy50YnMtaW50ZXJuZXQuY29tL0NBL3JlcG9zaXRv
-cnkxGDAWBgNVBAsTD1RCUyBJTlRFUk5FVCBDQTEYMBYGA1UEAxMPVEJTIFg1MDkg
-Q0EgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsgOkO3f7wzN6
-rOjg45tR5vjBfzK7qmV9IBxb/QW9EEXxG+E7FNhZqQLtwGBKoSsHTnQqV75wWMk0
-9tinWvftBkSpj5sTi/8cbzJfUvTSVYh3Qxv6AVVjMMH/ruLjE6y+4PoaPs8WoYAQ
-ts5R4Z1g8c/WnTepLst2x0/Wv7GmuoQi+gXvHU6YrBiu7XkeYhzc95QdviWSJRDk
-owhb5K43qhcvjRmBfO/paGlCliDGZp8mHwrI21mwobWpVjTxZRwYO3bd4+TGcI4G
-Ie5wmHwE8F7SK1tgSqbBacKjDa93j7txKkfz/Yd2n7TGqOXiHPsJpG655vrKtnXk
-9vs1zoDeJQIDAQABo4IBljCCAZIwHQYDVR0OBBYEFAdEdoWTKLx/bXjSCuv6TEvf
-2YIfMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMCAGA1UdJQQZ
-MBcGCisGAQQBgjcKAwMGCWCGSAGG+EIEATAYBgNVHSAEETAPMA0GCysGAQQBgOU3
-AgQBMHsGA1UdHwR0MHIwOKA2oDSGMmh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL0Fk
-ZFRydXN0RXh0ZXJuYWxDQVJvb3QuY3JsMDagNKAyhjBodHRwOi8vY3JsLmNvbW9k
-by5uZXQvQWRkVHJ1c3RFeHRlcm5hbENBUm9vdC5jcmwwgYAGCCsGAQUFBwEBBHQw
-cjA4BggrBgEFBQcwAoYsaHR0cDovL2NydC5jb21vZG9jYS5jb20vQWRkVHJ1c3RV
-VE5TR0NDQS5jcnQwNgYIKwYBBQUHMAKGKmh0dHA6Ly9jcnQuY29tb2RvLm5ldC9B
-ZGRUcnVzdFVUTlNHQ0NBLmNydDARBglghkgBhvhCAQEEBAMCAgQwDQYJKoZIhvcN
-AQEFBQADggEBAK2zEzs+jcIrVK9oDkdDZNvhuBYTdCfpxfFs+OAujW0bIfJAy232
-euVsnJm6u/+OrqKudD2tad2BbejLLXhMZViaCmK7D9nrXHx4te5EP8rL19SUVqLY
-1pTnv5dhNgEgvA7n5lIzDSYs7yRLsr7HJsYPr6SeYSuZizyX1SNz7ooJ32/F3X98
-RB0Mlc/E0OyOrkQ9/y5IrnpnaSora8CnUrV5XNOg+kyCz9edCyx4D5wXYcwZPVWz
-8aDqquESrezPyjtfi4WRO4s/VD3HLZvOxzMrWAVYCDG9FxaOhF0QGuuG1F7F3GKV
-v6prNyCl016kRl2j1UT+a7gLd8fA25A4C9E=
------END CERTIFICATE-----
- 2 s:/C=SE/O=AddTrust AB/OU=AddTrust External TTP Network/CN=AddTrust External CA Root
- i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC
------BEGIN CERTIFICATE-----
-MIIEZjCCA06gAwIBAgIQUSYKkxzif5zDpV954HKugjANBgkqhkiG9w0BAQUFADCB
-kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
-Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
-dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw
-IFNHQzAeFw0wNTA2MDcwODA5MTBaFw0xOTA2MjQxOTA2MzBaMG8xCzAJBgNVBAYT
-AlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0
-ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB
-IFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC39xoz5vIABC05
-4E5b7R+8bA/Ntfojts7emxEzl6QpTH2Tn71KvJPtAxrjj8/lbVBa1pcplFqAsEl6
-2y6V/bjKvzc4LR4+kUGtcFbH8E8/6DKedMrIkFTpxl8PeJ2aQDwOrGGqXhSPnoeh
-alDc15pOrwWzpnGUnHGzUGAKxxOdOAeGAqjpqGkmGJCrTLBPI6s6T4TY386f4Wlv
-u9dC12tE5Met7m1BX3JacQg3s3llpFmglDf3AC8NwpJy2tA4ctsUqEXEXSp9t7TW
-xO6szRNEt8kr3UMAJfphuWlqWCMRt6czj1Z1WfXNKddGtworZbbTQm8Vsrh7++/p
-XVPVNFonAgMBAAGjgdgwgdUwHwYDVR0jBBgwFoAUUzLRs89/+uDxoF2FTpLSnkUd
-tE8wHQYDVR0OBBYEFK29mHo0tCb3+sQmVO8DveAky1QaMA4GA1UdDwEB/wQEAwIB
-BjAPBgNVHRMBAf8EBTADAQH/MBEGCWCGSAGG+EIBAQQEAwIBAjAgBgNVHSUEGTAX
-BgorBgEEAYI3CgMDBglghkgBhvhCBAEwPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDov
-L2NybC51c2VydHJ1c3QuY29tL1VUTi1EQVRBQ29ycFNHQy5jcmwwDQYJKoZIhvcN
-AQEFBQADggEBAMbuUxdoFLJRIh6QWA2U/b3xcOWGLcM2MY9USEbnLQg3vGwKYOEO
-rVE04BKT6b64q7gmtOmWPSiPrmQH/uAB7MXjkesYoPF1ftsK5p+R26+udd8jkWjd
-FwBaS/9kbHDrARrQkNnHptZt9hPk/7XJ0h4qy7ElQyZ42TCbTg0evmnv3+r+LbPM
-+bDdtRTKkdSytaX7ARmjR3mfnYyVhzT4HziS2jamEfpr62vp3EV4FTkG101B5CHI
-3C+H0be/SGB1pWLLJN47YaApIKa+xWycxOkKaSLvkTr6Jq/RW0GnOuL4OAdCq8Fb
-+M5tug8EPzI0rNwEKNdwMBQmBsTkm5jVz3g=
------END CERTIFICATE-----
- 3 s:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC
- i:/C=US/ST=UT/L=Salt Lake City/O=The USERTRUST Network/OU=http://www.usertrust.com/CN=UTN - DATACorp SGC
------BEGIN CERTIFICATE-----
-MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB
-kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug
-Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho
-dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw
-IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG
-EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD
-VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu
-dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN
-BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6
-E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ
-D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK
-4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq
-lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW
-bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB
-o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT
-MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js
-LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr
-BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB
-AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft
-Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj
-j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH
-KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv
-2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3
-mfnGV/TJVTl4uix5yaaIK/QI
------END CERTIFICATE-----
diff --git a/lib-python/2.7/test/ssl_cert.pem b/lib-python/2.7/test/ssl_cert.pem
index 47a7d7e37e..de596717bd 100644
--- a/lib-python/2.7/test/ssl_cert.pem
+++ b/lib-python/2.7/test/ssl_cert.pem
@@ -1,15 +1,26 @@
-----BEGIN CERTIFICATE-----
-MIICVDCCAb2gAwIBAgIJANfHOBkZr8JOMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNV
-BAYTAlhZMRcwFQYDVQQHEw5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9u
-IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xMDEw
-MDgyMzAxNTZaFw0yMDEwMDUyMzAxNTZaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH
-Ew5DYXN0bGUgQW50aHJheDEjMCEGA1UEChMaUHl0aG9uIFNvZnR3YXJlIEZvdW5k
-YXRpb24xEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAw
-gYkCgYEA21vT5isq7F68amYuuNpSFlKDPrMUCa4YWYqZRt2OZ+/3NKaZ2xAiSwr7
-6MrQF70t5nLbSPpqE5+5VrS58SY+g/sXLiFd6AplH1wJZwh78DofbFYXUggktFMt
-pTyiX8jtP66bkcPkDADA089RI1TQR6Ca+n7HFa7c1fabVV6i3zkCAwEAAaMYMBYw
-FAYDVR0RBA0wC4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBBQUAA4GBAHPctQBEQ4wd
-BJ6+JcpIraopLn8BGhbjNWj40mmRqWB/NAWF6M5ne7KpGAu7tLeG4hb1zLaldK8G
-lxy2GPSRF6LFS48dpEj2HbMv2nvv6xxalDMJ9+DicWgAKTQ6bcX2j3GUkCR0g/T1
-CRlNBAAlvhKzO7Clpf9l0YKBEfraJByX
+MIIEWTCCAsGgAwIBAgIJAJinz4jHSjLtMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV
+BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u
+IFNvZnR3YXJlIEZvdW5kYXRpb24xEjAQBgNVBAMMCWxvY2FsaG9zdDAeFw0xODA4
+MjkxNDIzMTVaFw0yODA4MjYxNDIzMTVaMF8xCzAJBgNVBAYTAlhZMRcwFQYDVQQH
+DA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9uIFNvZnR3YXJlIEZvdW5k
+YXRpb24xEjAQBgNVBAMMCWxvY2FsaG9zdDCCAaIwDQYJKoZIhvcNAQEBBQADggGP
+ADCCAYoCggGBALKUqUtopT6E68kN+uJNEt34i2EbmG/bwjcD8IaMsgJPSsMO2Bpd
+3S6qWgkCeOyCfmAwBxK2kNbxGb63ouysEv7l8GCTJTWv3hG/HQcejJpnAEGi6K1U
+fDbyE/db6yZ12SoHVTGkadN4vYGCPd1Wj9ZO1F877SHQ8rDWX3xgTWkxN2ojBw44
+T8RHSDiG8D/CvG4uEy+VUszL+Uvny5y2poNSqvI3J56sptWSrh8nIIbkPZPBdUne
+LYMOHTFK3ZjXSmhlXgziTxK71nnzM3Y9K9gxPnRqoXbvu/wFo55hQCkETiRkYgmm
+jXcBMZ0TClQVnQWuLjMthRnWFZs4Lfmwqjs7FZD/61581R2BYehvpWbLvvuOJhwv
+DFzexL2sXcAl7SsxbzeQKRHqGbIDfbnQTXfs3/VC6Ye5P82P2ucj+XC32N9piRmO
+gCBP8L3ub+YzzdxikZN2gZXXE2jsb3QyE/R2LkWdWyshpKe+RsZP1SBRbHShUyOh
+yJ90baoiEwj2mwIDAQABoxgwFjAUBgNVHREEDTALgglsb2NhbGhvc3QwDQYJKoZI
+hvcNAQELBQADggGBAHRUO/UIHl3jXQENewYayHxkIx8t7nu40iO2DXbicSijz5bo
+5//xAB6RxhBAlsDBehgQP1uoZg+WJW+nHu3CIVOU3qZNZRaozxiCl2UFKcNqLOmx
+R3NKpo1jYf4REQIeG8Yw9+hSWLRbshNteP6bKUUf+vanhg9+axyOEOH/iOQvgk/m
+b8wA8wNa4ujWljPbTQnj7ry8RqhTM0GcAN5LSdSvcKcpzLcs3aYwh+Z8e30sQWna
+F40sa5u7izgBTOrwpcDm/w5kC46vpRQ5fnbshVw6pne2by0mdMECASid/p25N103
+jMqTFlmO7kpf/jpCSmamp3/JSEE1BJKHwQ6Ql4nzRA2N1mnvWH7Zxcv043gkHeAu
+0x8evpvwuhdIyproejNFlBpKmW8OX7yKTCPPMC/VkX8Q1rVkxU0DQ6hmvwZlhoKa
+9Wc2uXpw9xF8itV4Uvcdr3dwqByvIqn7iI/gB+4l41e0u8OmH2MKOx4Nxlly5TNW
+HcVKQHyOeyvnINuBAQ==
-----END CERTIFICATE-----
diff --git a/lib-python/2.7/test/ssl_key.passwd.pem b/lib-python/2.7/test/ssl_key.passwd.pem
index 2524672e70..e4f1370ab2 100644
--- a/lib-python/2.7/test/ssl_key.passwd.pem
+++ b/lib-python/2.7/test/ssl_key.passwd.pem
@@ -1,18 +1,42 @@
-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
-DEK-Info: DES-EDE3-CBC,1A8D9D2A02EC698A
+DEK-Info: DES-EDE3-CBC,8064BE1494B24B13
-kJYbfZ8L0sfe9Oty3gw0aloNnY5E8fegRfQLZlNoxTl6jNt0nIwI8kDJ36CZgR9c
-u3FDJm/KqrfUoz8vW+qEnWhSG7QPX2wWGPHd4K94Yz/FgrRzZ0DoK7XxXq9gOtVA
-AVGQhnz32p+6WhfGsCr9ArXEwRZrTk/FvzEPaU5fHcoSkrNVAGX8IpSVkSDwEDQr
-Gv17+cfk99UV1OCza6yKHoFkTtrC+PZU71LomBabivS2Oc4B9hYuSR2hF01wTHP+
-YlWNagZOOVtNz4oKK9x9eNQpmfQXQvPPTfusexKIbKfZrMvJoxcm1gfcZ0H/wK6P
-6wmXSG35qMOOztCZNtperjs1wzEBXznyK8QmLcAJBjkfarABJX9vBEzZV0OUKhy+
-noORFwHTllphbmydLhu6ehLUZMHPhzAS5UN7srtpSN81eerDMy0RMUAwA7/PofX1
-94Me85Q8jP0PC9ETdsJcPqLzAPETEYu0ELewKRcrdyWi+tlLFrpE5KT/s5ecbl9l
-7B61U4Kfd1PIXc/siINhU3A3bYK+845YyUArUOnKf1kEox7p1RpD7yFqVT04lRTo
-cibNKATBusXSuBrp2G6GNuhWEOSafWCKJQAzgCYIp6ZTV2khhMUGppc/2H3CF6cO
-zX0KtlPVZC7hLkB6HT8SxYUwF1zqWY7+/XPPdc37MeEZ87Q3UuZwqORLY+Z0hpgt
-L5JXBCoklZhCAaN2GqwFLXtGiRSRFGY7xXIhbDTlE65Wv1WGGgDLMKGE1gOz3yAo
-2jjG1+yAHJUdE69XTFHSqSkvaloA1W03LdMXZ9VuQJ/ySXCie6ABAQ==
+KJrffOMbo8M0I3PzcYxRZGMpKD1yB3Ii4+bT5XoanxjIJ+4fdx6LfZ0Rsx+riyzs
+tymsQu/iYY9j+4rCvN9+eetsL1X6iZpiimKsLexcid9M3fb0vxED5Sgw0dvunCUA
+xhqjLIKR92MKbODHf6KrDKCpsiPbjq4gZ7P+uCGXAMHL3MXIJSC0hW9rK7Ce6oyO
+CjpIcgB8x+GUWZZZhAFdlzIHMZrteNP2P5HK6QcaT71P034Dz1hhqoj4Q0t+Fta2
+4tfsM/bnTR/l6hwlhPa1e3Uj322tDTDWBScgWANn5+sEWldLmozMaWhZsn22pfk2
+KjRMGXG024JVheV882nbdOBvG7oq+lxkZ/ZP+vvqJqnvYtf7WtM8UivzYpe5Hz5b
+kVvWzPjBLUSZ9whM9rDLqSSqMPyPvDTuEmLkuq+xm7pYJmsLqIMP2klZLqRxLX6K
+uqwplb8UG440qauxgnQ905PId1l2fJEnRtV+7vXprA0L0QotgXLVHBhLmTFM+3PH
+9H3onf31dionUAPrn3nfVE36HhvVgRyvDBnBzJSIMighgq21Qx/d1dk0DRYi1hUI
+nCHl0YJPXheVcXR7JiSF2XQCAaFuS1Mr7NCXfWZOZQC/0dkvmHnl9DUAhuqq9BNZ
+1cKhZXcKHadg2/r0Zup/oDzmHPUEfTAXT0xbqoWlhkdwbF2veWQ96A/ncx3ISTb4
+PkXBlX9rdia8nmtyQDQRn4NuvchbaGkj4WKFC8pF8Hn7naHqwjpHaDUimBc0CoQW
+edNJqruKWwtSVLuwKHCC2gZFX9AXSKJXJz/QRSUlhFGOhuF/J6yKaXj6n5lxWNiQ
+54J+OP/hz2aS95CD2+Zf1SKpxdWiLZSIQqESpmmUrXROixNJZ/Z7gI74Dd9dSJOH
+W+3AU03vrrFZVrJVZhjcINHoH1Skh6JKscH18L6x4U868nSr4SrRLX8BhHllOQyD
+bmU+PZAjF8ZBIaCtTGulDXD29F73MeAZeTSsgQjFu0iKLj1wPiphbx8i/SUtR4YP
+X6PVA04g66r1NBw+3RQASVorZ3g1MSFvITHXcbKkBDeJH2z1+c6t/VVyTONnQhM5
+lLgRSk6HCbetvT9PKxWrWutA12pdBYEHdZhMHVf2+xclky7l09w8hg2/qqcdGRGe
+oAOZ72t0l5ObNyaruDKUS6f4AjOyWq/Xj5xuFtf1n3tQHyslSyCTPcAbQhDfTHUx
+vixb/V9qvYPt7OCn8py7v1M69NH42QVFAvwveDIFjZdqfIKBoJK2V4qPoevJI6uj
+Q5ByMt8OXOjSXNpHXpYQWUiWeCwOEBXJX8rzCHdMtg37jJ0zCmeErR1NTdg+EujM
+TWYgd06jlT67tURST0aB2kg4ijKgUJefD313LW1zC6gVsTbjSZxYyRbPfSP6flQB
+yCi1C19E2OsgleqbkBVC5GlYUzaJT7SGjCRmGx1eqtbrALu+LVH24Wceexlpjydl
++s2nf/DZlKun/tlPh6YioifPCJjByZMQOCEfIox6BkemZETz8uYA4TTWimG13Z03
+gyDGC2jdpEW414J2qcQDvrdUgJ+HlhrAAHaWpMQDbXYxBGoZ+3+ORvQV4kAsCwL8
+k3EIrVpePdik+1xgOWsyLj6QxFXlTMvL6Wc5pnArFPORsgHEolJvxSPTf9aAHNPn
+V2WBvxiLBtYpGrujAUM40Syx/aN2RPtcXYPAusHUBw+S8/p+/8Kg8GZmnIXG3F89
+45Eepl2quZYIrou7a1fwIpIIZ0hFiBQ1mlHVMFtxwVHS1bQb3SU2GeO+JcGjdVXc
+04qeGuQ5M164eQ5C0T7ZQ1ULiUlFWKD30m+cjqmZzt3d7Q0mKpMKuESIuZJo/wpD
+Nas432aLKUhcNx/pOYLkKJRpGZKOupQoD5iUj/j44o8JoFkDK33v2S57XB5QGz28
+9Zuhx49b3W8mbM6EBanlQKLWJGCxXqc/jhYhFWn+b0MhidynFgA0oeWvf6ZDyt6H
+Yi5Etxsar09xp0Do3NxtQXLuSUu0ji2pQzSIKuoqQWKqldm6VrpwojiqJhy4WQBQ
+aVVyFeWBC7G3Zj76dO+yp2sfJ0itJUQ8AIB9Cg0f34rEZu+r9luPmqBoUeL95Tk7
+YvCOU3Jl8Iqysv8aNpVXT8sa8rrSbruWCByEePZ37RIdHLMVBwVY0eVaFQjrjU7E
+mXmM9eaoYLfXOllsQ+M2+qPFUITr/GU3Qig13DhK/+yC1R6V2a0l0WRhMltIPYKW
+Ztvvr4hK5LcYCeS113BLiMbDIMMZZYGDZGMdC8DnnVbT2loF0Rfmp80Af31KmMQ4
+6XvMatW9UDjBoY5a/YMpdm7SRwm+MgV2KNPpc2kST87/yi9oprGAb8qiarHiHTM0
-----END RSA PRIVATE KEY-----
diff --git a/lib-python/2.7/test/ssl_key.pem b/lib-python/2.7/test/ssl_key.pem
index 3fd3bbd54a..1ea4578d81 100644
--- a/lib-python/2.7/test/ssl_key.pem
+++ b/lib-python/2.7/test/ssl_key.pem
@@ -1,16 +1,40 @@
-----BEGIN PRIVATE KEY-----
-MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANtb0+YrKuxevGpm
-LrjaUhZSgz6zFAmuGFmKmUbdjmfv9zSmmdsQIksK++jK0Be9LeZy20j6ahOfuVa0
-ufEmPoP7Fy4hXegKZR9cCWcIe/A6H2xWF1IIJLRTLaU8ol/I7T+um5HD5AwAwNPP
-USNU0Eegmvp+xxWu3NX2m1Veot85AgMBAAECgYA3ZdZ673X0oexFlq7AAmrutkHt
-CL7LvwrpOiaBjhyTxTeSNWzvtQBkIU8DOI0bIazA4UreAFffwtvEuPmonDb3F+Iq
-SMAu42XcGyVZEl+gHlTPU9XRX7nTOXVt+MlRRRxL6t9GkGfUAXI3XxJDXW3c0vBK
-UL9xqD8cORXOfE06rQJBAP8mEX1ERkR64Ptsoe4281vjTlNfIbs7NMPkUnrn9N/Y
-BLhjNIfQ3HFZG8BTMLfX7kCS9D593DW5tV4Z9BP/c6cCQQDcFzCcVArNh2JSywOQ
-ZfTfRbJg/Z5Lt9Fkngv1meeGNPgIMLN8Sg679pAOOWmzdMO3V706rNPzSVMME7E5
-oPIfAkEA8pDddarP5tCvTTgUpmTFbakm0KoTZm2+FzHcnA4jRh+XNTjTOv98Y6Ik
-eO5d1ZnKXseWvkZncQgxfdnMqqpj5wJAcNq/RVne1DbYlwWchT2Si65MYmmJ8t+F
-0mcsULqjOnEMwf5e+ptq5LzwbyrHZYq5FNk7ocufPv/ZQrcSSC+cFwJBAKvOJByS
-x56qyGeZLOQlWS2JS3KJo59XuLFGqcbgN9Om9xFa41Yb4N9NvplFivsvZdw3m1Q/
-SPIXQuT8RMPDVNQ=
+MIIG/wIBADANBgkqhkiG9w0BAQEFAASCBukwggblAgEAAoIBgQCylKlLaKU+hOvJ
+DfriTRLd+IthG5hv28I3A/CGjLICT0rDDtgaXd0uqloJAnjsgn5gMAcStpDW8Rm+
+t6LsrBL+5fBgkyU1r94Rvx0HHoyaZwBBouitVHw28hP3W+smddkqB1UxpGnTeL2B
+gj3dVo/WTtRfO+0h0PKw1l98YE1pMTdqIwcOOE/ER0g4hvA/wrxuLhMvlVLMy/lL
+58uctqaDUqryNyeerKbVkq4fJyCG5D2TwXVJ3i2DDh0xSt2Y10poZV4M4k8Su9Z5
+8zN2PSvYMT50aqF277v8BaOeYUApBE4kZGIJpo13ATGdEwpUFZ0Fri4zLYUZ1hWb
+OC35sKo7OxWQ/+tefNUdgWHob6Vmy777jiYcLwxc3sS9rF3AJe0rMW83kCkR6hmy
+A3250E137N/1QumHuT/Nj9rnI/lwt9jfaYkZjoAgT/C97m/mM83cYpGTdoGV1xNo
+7G90MhP0di5FnVsrIaSnvkbGT9UgUWx0oVMjocifdG2qIhMI9psCAwEAAQKCAYBT
+sHmaPmNaZj59jZCqp0YVQlpHWwBYQ5vD3pPE6oCttm0p9nXt/VkfenQRTthOtmT1
+POzDp00/feP7zeGLmqSYUjgRekPw4gdnN7Ip2PY5kdW77NWwDSzdLxuOS8Rq1MW9
+/Yu+ZPe3RBlDbT8C0IM+Atlh/BqIQ3zIxN4g0pzUlF0M33d6AYfYSzOcUhibOO7H
+j84r+YXBNkIRgYKZYbutRXuZYaGuqejRpBj3voVu0d3Ntdb6lCWuClpB9HzfGN0c
+RTv8g6UYO4sK3qyFn90ibIR/1GB9watvtoWVZqggiWeBzSWVWRsGEf9O+Cx4oJw1
+IphglhmhbgNksbj7bD24on/icldSOiVkoUemUOFmHWhCm4PnB1GmbD8YMfEdSbks
+qDr1Ps1zg4mGOinVD/4cY7vuPFO/HCH07wfeaUGzRt4g0/yLr+XjVofOA3oowyxv
+JAzr+niHA3lg5ecj4r7M68efwzN1OCyjMrVJw2RAzwvGxE+rm5NiT08SWlKQZnkC
+gcEA4wvyLpIur/UB84nV3XVJ89UMNBLm++aTFzld047BLJtMaOhvNqx6Cl5c8VuW
+l261KHjiVzpfNM3/A2LBQJcYkhX7avkqEXlj57cl+dCWAVwUzKmLJTPjfaTTZnYJ
+xeN3dMYjJz2z2WtgvfvDoJLukVwIMmhTY8wtqqYyQBJ/l06pBsfw5TNvmVIOQHds
+8ASOiFt+WRLk2bl9xrGGayqt3VV93KVRzF27cpjOgEcG74F3c0ZW9snERN7vIYwB
+JfrlAoHBAMlahPwMP2TYylG8OzHe7EiehTekSO26LGh0Cq3wTGXYsK/q8hQCzL14
+kWW638vpwXL6L9ntvrd7hjzWRO3vX/VxnYEA6f0bpqHq1tZi6lzix5CTUN5McpDg
+QnjenSJNrNjS1zEF8WeY9iLEuDI/M/iUW4y9R6s3WpgQhPDXpSvd2g3gMGRUYhxQ
+Xna8auiJeYFq0oNaOxvJj+VeOfJ3ZMJttd+Y7gTOYZcbg3SdRb/kdxYki0RMD2hF
+4ZvjJ6CTfwKBwQDiMqiZFTJGQwYqp4vWEmAW+I4r4xkUpWatoI2Fk5eI5T9+1PLX
+uYXsho56NxEU1UrOg4Cb/p+TcBc8PErkGqR0BkpxDMOInTOXSrQe6lxIBoECVXc3
+HTbrmiay0a5y5GfCgxPKqIJhfcToAceoVjovv0y7S4yoxGZKuUEe7E8JY2iqRNAO
+yOvKCCICv/hcN235E44RF+2/rDlOltagNej5tY6rIFkaDdgOF4bD7f9O5eEni1Bg
+litfoesDtQP/3rECgcEAkQfvQ7D6tIPmbqsbJBfCr6fmoqZllT4FIJN84b50+OL0
+mTGsfjdqC4tdhx3sdu7/VPbaIqm5NmX10bowWgWSY7MbVME4yQPyqSwC5NbIonEC
+d6N0mzoLR0kQ+Ai4u+2g82gicgAq2oj1uSNi3WZi48jQjHYFulCbo246o1NgeFFK
+77WshYe2R1ioQfQDOU1URKCR0uTaMHClgfu112yiGd12JAD+aF3TM0kxDXz+sXI5
+SKy311DFxECZeXRLpcC3AoHBAJkNMJWTyPYbeVu+CTQkec8Uun233EkXa2kUNZc/
+5DuXDaK+A3DMgYRufTKSPpDHGaCZ1SYPInX1Uoe2dgVjWssRL2uitR4ENabDoAOA
+ICVYXYYNagqQu5wwirF0QeaMXo1fjhuuHQh8GsMdXZvYEaAITZ9/NG5x/oY08+8H
+kr78SMBOPy3XQn964uKG+e3JwpOG14GKABdAlrHKFXNWchu/6dgcYXB87mrC/GhO
+zNwzC+QhFTZoOomFoqMgFWujng==
-----END PRIVATE KEY-----
diff --git a/lib-python/2.7/test/ssl_servers.py b/lib-python/2.7/test/ssl_servers.py
index a312e28573..a5023052d3 100644
--- a/lib-python/2.7/test/ssl_servers.py
+++ b/lib-python/2.7/test/ssl_servers.py
@@ -42,6 +42,11 @@ class HTTPSServer(_HTTPServer):
raise
return sslconn, addr
+ def handle_error(self, request, client_address):
+ "Suppose noisy error output by default."
+ if support.verbose:
+ _HTTPServer.handle_error(self, request, client_address)
+
class RootedHTTPRequestHandler(SimpleHTTPRequestHandler):
# need to override translate_path to get a known root,
# instead of using os.curdir, since the test could be
diff --git a/lib-python/2.7/test/support/__init__.py b/lib-python/2.7/test/support/__init__.py
new file mode 100644
index 0000000000..3bb7bb88e1
--- /dev/null
+++ b/lib-python/2.7/test/support/__init__.py
@@ -0,0 +1,2222 @@
+"""Supporting definitions for the Python regression tests."""
+
+if __name__ != 'test.support':
+ raise ImportError('test.support must be imported from the test package')
+
+import contextlib
+import errno
+import fnmatch
+import functools
+import gc
+import socket
+import stat
+import sys
+import os
+import platform
+import shutil
+import warnings
+import unittest
+import importlib
+import UserDict
+import re
+import time
+import struct
+import sysconfig
+import types
+
+try:
+ import thread
+except ImportError:
+ thread = None
+
+__all__ = ["Error", "TestFailed", "TestDidNotRun", "ResourceDenied", "import_module",
+ "verbose", "use_resources", "max_memuse", "record_original_stdout",
+ "get_original_stdout", "unload", "unlink", "rmtree", "forget",
+ "is_resource_enabled", "requires", "requires_mac_ver",
+ "find_unused_port", "bind_port",
+ "fcmp", "have_unicode", "is_jython", "TESTFN", "HOST", "FUZZ",
+ "SAVEDCWD", "temp_cwd", "findfile", "sortdict", "check_syntax_error",
+ "open_urlresource", "check_warnings", "check_py3k_warnings",
+ "CleanImport", "EnvironmentVarGuard", "captured_output",
+ "captured_stdout", "TransientResource", "transient_internet",
+ "run_with_locale", "set_memlimit", "bigmemtest", "bigaddrspacetest",
+ "BasicTestRunner", "run_unittest", "run_doctest", "threading_setup",
+ "threading_cleanup", "reap_threads", "start_threads", "cpython_only",
+ "check_impl_detail", "get_attribute", "py3k_bytes",
+ "import_fresh_module", "threading_cleanup", "reap_children",
+ "strip_python_stderr", "IPV6_ENABLED", "run_with_tz",
+ "SuppressCrashReport"]
+
+class Error(Exception):
+ """Base class for regression test exceptions."""
+
+class TestFailed(Error):
+ """Test failed."""
+
+class TestDidNotRun(Error):
+ """Test did not run any subtests."""
+
+class ResourceDenied(unittest.SkipTest):
+ """Test skipped because it requested a disallowed resource.
+
+ This is raised when a test calls requires() for a resource that
+ has not been enabled. It is used to distinguish between expected
+ and unexpected skips.
+ """
+
+@contextlib.contextmanager
+def _ignore_deprecated_imports(ignore=True):
+ """Context manager to suppress package and module deprecation
+ warnings when importing them.
+
+ If ignore is False, this context manager has no effect."""
+ if ignore:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", ".+ (module|package)",
+ DeprecationWarning)
+ yield
+ else:
+ yield
+
+
+def import_module(name, deprecated=False):
+ """Import and return the module to be tested, raising SkipTest if
+ it is not available.
+
+ If deprecated is True, any module or package deprecation messages
+ will be suppressed."""
+ with _ignore_deprecated_imports(deprecated):
+ try:
+ return importlib.import_module(name)
+ except ImportError, msg:
+ raise unittest.SkipTest(str(msg))
+
+
+def _save_and_remove_module(name, orig_modules):
+ """Helper function to save and remove a module from sys.modules
+
+ Raise ImportError if the module can't be imported."""
+ # try to import the module and raise an error if it can't be imported
+ if name not in sys.modules:
+ __import__(name)
+ del sys.modules[name]
+ for modname in list(sys.modules):
+ if modname == name or modname.startswith(name + '.'):
+ orig_modules[modname] = sys.modules[modname]
+ del sys.modules[modname]
+
+def _save_and_block_module(name, orig_modules):
+ """Helper function to save and block a module in sys.modules
+
+ Return True if the module was in sys.modules, False otherwise."""
+ saved = True
+ try:
+ orig_modules[name] = sys.modules[name]
+ except KeyError:
+ saved = False
+ sys.modules[name] = None
+ return saved
+
+
+def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
+ """Imports and returns a module, deliberately bypassing the sys.modules cache
+ and importing a fresh copy of the module. Once the import is complete,
+ the sys.modules cache is restored to its original state.
+
+ Modules named in fresh are also imported anew if needed by the import.
+ If one of these modules can't be imported, None is returned.
+
+ Importing of modules named in blocked is prevented while the fresh import
+ takes place.
+
+ If deprecated is True, any module or package deprecation messages
+ will be suppressed."""
+ # NOTE: test_heapq, test_json, and test_warnings include extra sanity
+ # checks to make sure that this utility function is working as expected
+ with _ignore_deprecated_imports(deprecated):
+ # Keep track of modules saved for later restoration as well
+ # as those which just need a blocking entry removed
+ orig_modules = {}
+ names_to_remove = []
+ _save_and_remove_module(name, orig_modules)
+ try:
+ for fresh_name in fresh:
+ _save_and_remove_module(fresh_name, orig_modules)
+ for blocked_name in blocked:
+ if not _save_and_block_module(blocked_name, orig_modules):
+ names_to_remove.append(blocked_name)
+ fresh_module = importlib.import_module(name)
+ except ImportError:
+ fresh_module = None
+ finally:
+ for orig_name, module in orig_modules.items():
+ sys.modules[orig_name] = module
+ for name_to_remove in names_to_remove:
+ del sys.modules[name_to_remove]
+ return fresh_module
+
+
+def get_attribute(obj, name):
+ """Get an attribute, raising SkipTest if AttributeError is raised."""
+ try:
+ attribute = getattr(obj, name)
+ except AttributeError:
+ if isinstance(obj, types.ModuleType):
+ msg = "module %r has no attribute %r" % (obj.__name__, name)
+ elif isinstance(obj, types.ClassType):
+ msg = "class %s has no attribute %r" % (obj.__name__, name)
+ elif isinstance(obj, types.InstanceType):
+ msg = "%s instance has no attribute %r" % (obj.__class__.__name__, name)
+ elif isinstance(obj, type):
+ msg = "type object %r has no attribute %r" % (obj.__name__, name)
+ else:
+ msg = "%r object has no attribute %r" % (type(obj).__name__, name)
+ raise unittest.SkipTest(msg)
+ else:
+ return attribute
+
+
+verbose = 1 # Flag set to 0 by regrtest.py
+use_resources = None # Flag set to [] by regrtest.py
+max_memuse = 0 # Disable bigmem tests (they will still be run with
+ # small sizes, to make sure they work.)
+real_max_memuse = 0
+failfast = False
+
+# _original_stdout is meant to hold stdout at the time regrtest began.
+# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
+# The point is to have some flavor of stdout the user can actually see.
+_original_stdout = None
+def record_original_stdout(stdout):
+ global _original_stdout
+ _original_stdout = stdout
+
+def get_original_stdout():
+ return _original_stdout or sys.stdout
+
+def unload(name):
+ try:
+ del sys.modules[name]
+ except KeyError:
+ pass
+
+def _force_run(path, func, *args):
+ try:
+ return func(*args)
+ except EnvironmentError as err:
+ if verbose >= 2:
+ print('%s: %s' % (err.__class__.__name__, err))
+ print('re-run %s%r' % (func.__name__, args))
+ os.chmod(path, stat.S_IRWXU)
+ return func(*args)
+
+if sys.platform.startswith("win"):
+ def _waitfor(func, pathname, waitall=False):
+ # Perform the operation
+ func(pathname)
+ # Now setup the wait loop
+ if waitall:
+ dirname = pathname
+ else:
+ dirname, name = os.path.split(pathname)
+ dirname = dirname or '.'
+ # Check for `pathname` to be removed from the filesystem.
+ # The exponential backoff of the timeout amounts to a total
+ # of ~1 second after which the deletion is probably an error
+ # anyway.
+ # Testing on an i7@4.3GHz shows that usually only 1 iteration is
+ # required when contention occurs.
+ timeout = 0.001
+ while timeout < 1.0:
+ # Note we are only testing for the existence of the file(s) in
+ # the contents of the directory regardless of any security or
+ # access rights. If we have made it this far, we have sufficient
+ # permissions to do that much using Python's equivalent of the
+ # Windows API FindFirstFile.
+ # Other Windows APIs can fail or give incorrect results when
+ # dealing with files that are pending deletion.
+ L = os.listdir(dirname)
+ if not (L if waitall else name in L):
+ return
+ # Increase the timeout and try again
+ time.sleep(timeout)
+ timeout *= 2
+ warnings.warn('tests may fail, delete still pending for ' + pathname,
+ RuntimeWarning, stacklevel=4)
+
+ def _unlink(filename):
+ _waitfor(os.unlink, filename)
+
+ def _rmdir(dirname):
+ _waitfor(os.rmdir, dirname)
+
+ def _rmtree(path):
+ def _rmtree_inner(path):
+ for name in _force_run(path, os.listdir, path):
+ fullname = os.path.join(path, name)
+ if os.path.isdir(fullname):
+ _waitfor(_rmtree_inner, fullname, waitall=True)
+ _force_run(fullname, os.rmdir, fullname)
+ else:
+ _force_run(fullname, os.unlink, fullname)
+ _waitfor(_rmtree_inner, path, waitall=True)
+ _waitfor(lambda p: _force_run(p, os.rmdir, p), path)
+else:
+ _unlink = os.unlink
+ _rmdir = os.rmdir
+
+ def _rmtree(path):
+ try:
+ shutil.rmtree(path)
+ return
+ except EnvironmentError:
+ pass
+
+ def _rmtree_inner(path):
+ for name in _force_run(path, os.listdir, path):
+ fullname = os.path.join(path, name)
+ try:
+ mode = os.lstat(fullname).st_mode
+ except EnvironmentError:
+ mode = 0
+ if stat.S_ISDIR(mode):
+ _rmtree_inner(fullname)
+ _force_run(path, os.rmdir, fullname)
+ else:
+ _force_run(path, os.unlink, fullname)
+ _rmtree_inner(path)
+ os.rmdir(path)
+
+def unlink(filename):
+ try:
+ _unlink(filename)
+ except OSError as exc:
+ if exc.errno not in (errno.ENOENT, errno.ENOTDIR):
+ raise
+
+def rmdir(dirname):
+ try:
+ _rmdir(dirname)
+ except OSError as error:
+ # The directory need not exist.
+ if error.errno != errno.ENOENT:
+ raise
+
+def rmtree(path):
+ try:
+ _rmtree(path)
+ except OSError, e:
+ # Unix returns ENOENT, Windows returns ESRCH.
+ if e.errno not in (errno.ENOENT, errno.ESRCH):
+ raise
+
+def forget(modname):
+ '''"Forget" a module was ever imported by removing it from sys.modules and
+ deleting any .pyc and .pyo files.'''
+ unload(modname)
+ for dirname in sys.path:
+ unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
+ # Deleting the .pyo file cannot be within the 'try' for the .pyc since
+ # the chance exists that there is no .pyc (and thus the 'try' statement
+ # is exited) but there is a .pyo file.
+ unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
+
+# Check whether a gui is actually available
+def _is_gui_available():
+ if hasattr(_is_gui_available, 'result'):
+ return _is_gui_available.result
+ reason = None
+ if sys.platform.startswith('win'):
+ # if Python is running as a service (such as the buildbot service),
+ # gui interaction may be disallowed
+ import ctypes
+ import ctypes.wintypes
+ UOI_FLAGS = 1
+ WSF_VISIBLE = 0x0001
+ class USEROBJECTFLAGS(ctypes.Structure):
+ _fields_ = [("fInherit", ctypes.wintypes.BOOL),
+ ("fReserved", ctypes.wintypes.BOOL),
+ ("dwFlags", ctypes.wintypes.DWORD)]
+ dll = ctypes.windll.user32
+ h = dll.GetProcessWindowStation()
+ if not h:
+ raise ctypes.WinError()
+ uof = USEROBJECTFLAGS()
+ needed = ctypes.wintypes.DWORD()
+ res = dll.GetUserObjectInformationW(h,
+ UOI_FLAGS,
+ ctypes.byref(uof),
+ ctypes.sizeof(uof),
+ ctypes.byref(needed))
+ if not res:
+ raise ctypes.WinError()
+ if not bool(uof.dwFlags & WSF_VISIBLE):
+ reason = "gui not available (WSF_VISIBLE flag not set)"
+ elif sys.platform == 'darwin':
+ # The Aqua Tk implementations on OS X can abort the process if
+ # being called in an environment where a window server connection
+ # cannot be made, for instance when invoked by a buildbot or ssh
+ # process not running under the same user id as the current console
+ # user. To avoid that, raise an exception if the window manager
+ # connection is not available.
+ from ctypes import cdll, c_int, pointer, Structure
+ from ctypes.util import find_library
+
+ app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
+
+ if app_services.CGMainDisplayID() == 0:
+ reason = "gui tests cannot run without OS X window manager"
+ else:
+ class ProcessSerialNumber(Structure):
+ _fields_ = [("highLongOfPSN", c_int),
+ ("lowLongOfPSN", c_int)]
+ psn = ProcessSerialNumber()
+ psn_p = pointer(psn)
+ if ( (app_services.GetCurrentProcess(psn_p) < 0) or
+ (app_services.SetFrontProcess(psn_p) < 0) ):
+ reason = "cannot run without OS X gui process"
+
+ # check on every platform whether tkinter can actually do anything
+ if not reason:
+ try:
+ from Tkinter import Tk
+ root = Tk()
+ root.withdraw()
+ root.update()
+ root.destroy()
+ except Exception as e:
+ err_string = str(e)
+ if len(err_string) > 50:
+ err_string = err_string[:50] + ' [...]'
+ reason = 'Tk unavailable due to {}: {}'.format(type(e).__name__,
+ err_string)
+
+ _is_gui_available.reason = reason
+ _is_gui_available.result = not reason
+
+ return _is_gui_available.result
+
+def is_resource_enabled(resource):
+ """Test whether a resource is enabled.
+
+ Known resources are set by regrtest.py. If not running under regrtest.py,
+ all resources are assumed enabled unless use_resources has been set.
+ """
+ return use_resources is None or resource in use_resources
+
+def requires(resource, msg=None):
+ """Raise ResourceDenied if the specified resource is not available."""
+ if not is_resource_enabled(resource):
+ if msg is None:
+ msg = "Use of the `%s' resource not enabled" % resource
+ raise ResourceDenied(msg)
+ if resource == 'gui' and not _is_gui_available():
+ raise ResourceDenied(_is_gui_available.reason)
+
+def requires_mac_ver(*min_version):
+ """Decorator raising SkipTest if the OS is Mac OS X and the OS X
+ version if less than min_version.
+
+ For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
+ is lesser than 10.5.
+ """
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kw):
+ if sys.platform == 'darwin':
+ version_txt = platform.mac_ver()[0]
+ try:
+ version = tuple(map(int, version_txt.split('.')))
+ except ValueError:
+ pass
+ else:
+ if version < min_version:
+ min_version_txt = '.'.join(map(str, min_version))
+ raise unittest.SkipTest(
+ "Mac OS X %s or higher required, not %s"
+ % (min_version_txt, version_txt))
+ return func(*args, **kw)
+ wrapper.min_version = min_version
+ return wrapper
+ return decorator
+
+
+# Don't use "localhost", since resolving it uses the DNS under recent
+# Windows versions (see issue #18792).
+HOST = "127.0.0.1"
+HOSTv6 = "::1"
+
+
+def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
+ """Returns an unused port that should be suitable for binding. This is
+ achieved by creating a temporary socket with the same family and type as
+ the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
+ the specified host address (defaults to 0.0.0.0) with the port set to 0,
+ eliciting an unused ephemeral port from the OS. The temporary socket is
+ then closed and deleted, and the ephemeral port is returned.
+
+ Either this method or bind_port() should be used for any tests where a
+ server socket needs to be bound to a particular port for the duration of
+ the test. Which one to use depends on whether the calling code is creating
+ a python socket, or if an unused port needs to be provided in a constructor
+ or passed to an external program (i.e. the -accept argument to openssl's
+ s_server mode). Always prefer bind_port() over find_unused_port() where
+ possible. Hard coded ports should *NEVER* be used. As soon as a server
+ socket is bound to a hard coded port, the ability to run multiple instances
+ of the test simultaneously on the same host is compromised, which makes the
+ test a ticking time bomb in a buildbot environment. On Unix buildbots, this
+ may simply manifest as a failed test, which can be recovered from without
+ intervention in most cases, but on Windows, the entire python process can
+ completely and utterly wedge, requiring someone to log in to the buildbot
+ and manually kill the affected process.
+
+ (This is easy to reproduce on Windows, unfortunately, and can be traced to
+ the SO_REUSEADDR socket option having different semantics on Windows versus
+ Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
+ listen and then accept connections on identical host/ports. An EADDRINUSE
+ socket.error will be raised at some point (depending on the platform and
+ the order bind and listen were called on each socket).
+
+ However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
+ will ever be raised when attempting to bind two identical host/ports. When
+ accept() is called on each socket, the second caller's process will steal
+ the port from the first caller, leaving them both in an awkwardly wedged
+ state where they'll no longer respond to any signals or graceful kills, and
+ must be forcibly killed via OpenProcess()/TerminateProcess().
+
+ The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
+ instead of SO_REUSEADDR, which effectively affords the same semantics as
+ SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
+ Source world compared to Windows ones, this is a common mistake. A quick
+ look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
+ openssl.exe is called with the 's_server' option, for example. See
+ http://bugs.python.org/issue2550 for more info. The following site also
+ has a very thorough description about the implications of both REUSEADDR
+ and EXCLUSIVEADDRUSE on Windows:
+ http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
+
+ XXX: although this approach is a vast improvement on previous attempts to
+ elicit unused ports, it rests heavily on the assumption that the ephemeral
+ port returned to us by the OS won't immediately be dished back out to some
+ other process when we close and delete our temporary socket but before our
+ calling code has a chance to bind the returned port. We can deal with this
+ issue if/when we come across it."""
+ tempsock = socket.socket(family, socktype)
+ port = bind_port(tempsock)
+ tempsock.close()
+ del tempsock
+ return port
+
+def bind_port(sock, host=HOST):
+ """Bind the socket to a free port and return the port number. Relies on
+ ephemeral ports in order to ensure we are using an unbound port. This is
+ important as many tests may be running simultaneously, especially in a
+ buildbot environment. This method raises an exception if the sock.family
+ is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
+ or SO_REUSEPORT set on it. Tests should *never* set these socket options
+ for TCP/IP sockets. The only case for setting these options is testing
+ multicasting via multiple UDP sockets.
+
+ Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
+ on Windows), it will be set on the socket. This will prevent anyone else
+ from bind()'ing to our host/port for the duration of the test.
+ """
+ if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
+ if hasattr(socket, 'SO_REUSEADDR'):
+ if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
+ raise TestFailed("tests should never set the SO_REUSEADDR " \
+ "socket option on TCP/IP sockets!")
+ if hasattr(socket, 'SO_REUSEPORT'):
+ try:
+ if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
+ raise TestFailed("tests should never set the SO_REUSEPORT " \
+ "socket option on TCP/IP sockets!")
+ except EnvironmentError:
+ # Python's socket module was compiled using modern headers
+ # thus defining SO_REUSEPORT but this process is running
+ # under an older kernel that does not support SO_REUSEPORT.
+ pass
+ if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
+
+ sock.bind((host, 0))
+ port = sock.getsockname()[1]
+ return port
+
+def _is_ipv6_enabled():
+ """Check whether IPv6 is enabled on this host."""
+ if socket.has_ipv6:
+ sock = None
+ try:
+ sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ sock.bind((HOSTv6, 0))
+ return True
+ except socket.error:
+ pass
+ finally:
+ if sock:
+ sock.close()
+ return False
+
+IPV6_ENABLED = _is_ipv6_enabled()
+
+def system_must_validate_cert(f):
+ """Skip the test on TLS certificate validation failures."""
+ @functools.wraps(f)
+ def dec(*args, **kwargs):
+ try:
+ f(*args, **kwargs)
+ except IOError as e:
+ if "CERTIFICATE_VERIFY_FAILED" in str(e):
+ raise unittest.SkipTest("system does not contain "
+ "necessary certificates")
+ raise
+ return dec
+
+FUZZ = 1e-6
+
+def fcmp(x, y): # fuzzy comparison function
+ if isinstance(x, float) or isinstance(y, float):
+ try:
+ fuzz = (abs(x) + abs(y)) * FUZZ
+ if abs(x-y) <= fuzz:
+ return 0
+ except:
+ pass
+ elif type(x) == type(y) and isinstance(x, (tuple, list)):
+ for i in range(min(len(x), len(y))):
+ outcome = fcmp(x[i], y[i])
+ if outcome != 0:
+ return outcome
+ return (len(x) > len(y)) - (len(x) < len(y))
+ return (x > y) - (x < y)
+
+
+# A constant likely larger than the underlying OS pipe buffer size, to
+# make writes blocking.
+# Windows limit seems to be around 512 B, and many Unix kernels have a
+# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
+# (see issue #17835 for a discussion of this number).
+PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
+
+# A constant likely larger than the underlying OS socket buffer size, to make
+# writes blocking.
+# The socket buffer sizes can usually be tuned system-wide (e.g. through sysctl
+# on Linux), or on a per-socket basis (SO_SNDBUF/SO_RCVBUF). See issue #18643
+# for a discussion of this number).
+SOCK_MAX_SIZE = 16 * 1024 * 1024 + 1
+
+is_jython = sys.platform.startswith('java')
+
+try:
+ unicode
+ have_unicode = True
+except NameError:
+ have_unicode = False
+
+requires_unicode = unittest.skipUnless(have_unicode, 'no unicode support')
+
+def u(s):
+ return unicode(s, 'unicode-escape')
+
+# FS_NONASCII: non-ASCII Unicode character encodable by
+# sys.getfilesystemencoding(), or None if there is no such character.
+FS_NONASCII = None
+if have_unicode:
+ for character in (
+ # First try printable and common characters to have a readable filename.
+ # For each character, the encoding list are just example of encodings able
+ # to encode the character (the list is not exhaustive).
+
+ # U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
+ unichr(0x00E6),
+ # U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
+ unichr(0x0130),
+ # U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
+ unichr(0x0141),
+ # U+03C6 (Greek Small Letter Phi): cp1253
+ unichr(0x03C6),
+ # U+041A (Cyrillic Capital Letter Ka): cp1251
+ unichr(0x041A),
+ # U+05D0 (Hebrew Letter Alef): Encodable to cp424
+ unichr(0x05D0),
+ # U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
+ unichr(0x060C),
+ # U+062A (Arabic Letter Teh): cp720
+ unichr(0x062A),
+ # U+0E01 (Thai Character Ko Kai): cp874
+ unichr(0x0E01),
+
+ # Then try more "special" characters. "special" because they may be
+ # interpreted or displayed differently depending on the exact locale
+ # encoding and the font.
+
+ # U+00A0 (No-Break Space)
+ unichr(0x00A0),
+ # U+20AC (Euro Sign)
+ unichr(0x20AC),
+ ):
+ try:
+ # In Windows, 'mbcs' is used, and encode() returns '?'
+ # for characters missing in the ANSI codepage
+ if character.encode(sys.getfilesystemencoding())\
+ .decode(sys.getfilesystemencoding())\
+ != character:
+ raise UnicodeError
+ except UnicodeError:
+ pass
+ else:
+ FS_NONASCII = character
+ break
+
+# Filename used for testing
+if os.name == 'java':
+ # Jython disallows @ in module names
+ TESTFN = '$test'
+elif os.name == 'riscos':
+ TESTFN = 'testfile'
+else:
+ TESTFN = '@test'
+ # Unicode name only used if TEST_FN_ENCODING exists for the platform.
+ if have_unicode:
+ # Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
+ # TESTFN_UNICODE is a filename that can be encoded using the
+ # file system encoding, but *not* with the default (ascii) encoding
+ if isinstance('', unicode):
+ # python -U
+ # XXX perhaps unicode() should accept Unicode strings?
+ TESTFN_UNICODE = "@test-\xe0\xf2"
+ else:
+ # 2 latin characters.
+ TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
+ TESTFN_ENCODING = sys.getfilesystemencoding()
+ # TESTFN_UNENCODABLE is a filename that should *not* be
+ # able to be encoded by *either* the default or filesystem encoding.
+ # This test really only makes sense on Windows NT platforms
+ # which have special Unicode support in posixmodule.
+ if (not hasattr(sys, "getwindowsversion") or
+ sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
+ TESTFN_UNENCODABLE = None
+ else:
+ # Japanese characters (I think - from bug 846133)
+ TESTFN_UNENCODABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
+ try:
+ # XXX - Note - should be using TESTFN_ENCODING here - but for
+ # Windows, "mbcs" currently always operates as if in
+ # errors=ignore' mode - hence we get '?' characters rather than
+ # the exception. 'Latin1' operates as we expect - ie, fails.
+ # See [ 850997 ] mbcs encoding ignores errors
+ TESTFN_UNENCODABLE.encode("Latin1")
+ except UnicodeEncodeError:
+ pass
+ else:
+ print \
+ 'WARNING: The filename %r CAN be encoded by the filesystem. ' \
+ 'Unicode filename tests may not be effective' \
+ % TESTFN_UNENCODABLE
+
+
+# Disambiguate TESTFN for parallel testing, while letting it remain a valid
+# module name.
+TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
+
+# Define the URL of a dedicated HTTP server for the network tests.
+# The URL must use clear-text HTTP: no redirection to encrypted HTTPS.
+TEST_HTTP_URL = "http://www.pythontest.net"
+
+# Save the initial cwd
+SAVEDCWD = os.getcwd()
+
+@contextlib.contextmanager
+def temp_dir(path=None, quiet=False):
+ """Return a context manager that creates a temporary directory.
+
+ Arguments:
+
+ path: the directory to create temporarily. If omitted or None,
+ defaults to creating a temporary directory using tempfile.mkdtemp.
+
+ quiet: if False (the default), the context manager raises an exception
+ on error. Otherwise, if the path is specified and cannot be
+ created, only a warning is issued.
+
+ """
+ dir_created = False
+ if path is None:
+ import tempfile
+ path = tempfile.mkdtemp()
+ dir_created = True
+ path = os.path.realpath(path)
+ else:
+ if (have_unicode and isinstance(path, unicode) and
+ not os.path.supports_unicode_filenames):
+ try:
+ path = path.encode(sys.getfilesystemencoding() or 'ascii')
+ except UnicodeEncodeError:
+ if not quiet:
+ raise unittest.SkipTest('unable to encode the cwd name with '
+ 'the filesystem encoding.')
+ try:
+ os.mkdir(path)
+ dir_created = True
+ except OSError:
+ if not quiet:
+ raise
+ warnings.warn('tests may fail, unable to create temp dir: ' + path,
+ RuntimeWarning, stacklevel=3)
+ if dir_created:
+ pid = os.getpid()
+ try:
+ yield path
+ finally:
+ # In case the process forks, let only the parent remove the
+ # directory. The child has a diffent process id. (bpo-30028)
+ if dir_created and pid == os.getpid():
+ rmtree(path)
+
+@contextlib.contextmanager
+def change_cwd(path, quiet=False):
+ """Return a context manager that changes the current working directory.
+
+ Arguments:
+
+ path: the directory to use as the temporary current working directory.
+
+ quiet: if False (the default), the context manager raises an exception
+ on error. Otherwise, it issues only a warning and keeps the current
+ working directory the same.
+
+ """
+ saved_dir = os.getcwd()
+ try:
+ os.chdir(path)
+ except OSError:
+ if not quiet:
+ raise
+ warnings.warn('tests may fail, unable to change CWD to: ' + path,
+ RuntimeWarning, stacklevel=3)
+ try:
+ yield os.getcwd()
+ finally:
+ os.chdir(saved_dir)
+
+
+@contextlib.contextmanager
+def temp_cwd(name='tempcwd', quiet=False):
+ """
+ Context manager that temporarily creates and changes the CWD.
+
+ The function temporarily changes the current working directory
+ after creating a temporary directory in the current directory with
+ name *name*. If *name* is None, the temporary directory is
+ created using tempfile.mkdtemp.
+
+ If *quiet* is False (default) and it is not possible to
+ create or change the CWD, an error is raised. If *quiet* is True,
+ only a warning is raised and the original CWD is used.
+
+ """
+ with temp_dir(path=name, quiet=quiet) as temp_path:
+ with change_cwd(temp_path, quiet=quiet) as cwd_dir:
+ yield cwd_dir
+
+# TEST_HOME_DIR refers to the top level directory of the "test" package
+# that contains Python's regression test suite
+TEST_SUPPORT_DIR = os.path.dirname(os.path.abspath(__file__))
+TEST_HOME_DIR = os.path.dirname(TEST_SUPPORT_DIR)
+
+# TEST_DATA_DIR is used as a target download location for remote resources
+TEST_DATA_DIR = os.path.join(TEST_HOME_DIR, "data")
+
+def findfile(file, subdir=None):
+ """Try to find a file on sys.path and the working directory. If it is not
+ found the argument passed to the function is returned (this does not
+ necessarily signal failure; could still be the legitimate path)."""
+ if os.path.isabs(file):
+ return file
+ if subdir is not None:
+ file = os.path.join(subdir, file)
+ path = [TEST_HOME_DIR] + sys.path
+ for dn in path:
+ fn = os.path.join(dn, file)
+ if os.path.exists(fn): return fn
+ return file
+
+def sortdict(dict):
+ "Like repr(dict), but in sorted order."
+ items = dict.items()
+ items.sort()
+ reprpairs = ["%r: %r" % pair for pair in items]
+ withcommas = ", ".join(reprpairs)
+ return "{%s}" % withcommas
+
+def make_bad_fd():
+ """
+ Create an invalid file descriptor by opening and closing a file and return
+ its fd.
+ """
+ file = open(TESTFN, "wb")
+ try:
+ return file.fileno()
+ finally:
+ file.close()
+ unlink(TESTFN)
+
+def check_syntax_error(testcase, statement, errtext='', lineno=None, offset=None):
+ with testcase.assertRaisesRegexp(SyntaxError, errtext) as cm:
+ compile(statement, '<test string>', 'exec')
+ err = cm.exception
+ if lineno is not None:
+ testcase.assertEqual(err.lineno, lineno)
+ if offset is not None:
+ testcase.assertEqual(err.offset, offset)
+
+def open_urlresource(url, check=None):
+ import urlparse, urllib2
+
+ filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
+
+ fn = os.path.join(TEST_DATA_DIR, filename)
+
+ def check_valid_file(fn):
+ f = open(fn)
+ if check is None:
+ return f
+ elif check(f):
+ f.seek(0)
+ return f
+ f.close()
+
+ if os.path.exists(fn):
+ f = check_valid_file(fn)
+ if f is not None:
+ return f
+ unlink(fn)
+
+ # Verify the requirement before downloading the file
+ requires('urlfetch')
+
+ print >> get_original_stdout(), '\tfetching %s ...' % url
+ f = urllib2.urlopen(url, timeout=15)
+ try:
+ with open(fn, "wb") as out:
+ s = f.read()
+ while s:
+ out.write(s)
+ s = f.read()
+ finally:
+ f.close()
+
+ f = check_valid_file(fn)
+ if f is not None:
+ return f
+ raise TestFailed('invalid resource "%s"' % fn)
+
+
+class WarningsRecorder(object):
+ """Convenience wrapper for the warnings list returned on
+ entry to the warnings.catch_warnings() context manager.
+ """
+ def __init__(self, warnings_list):
+ self._warnings = warnings_list
+ self._last = 0
+
+ def __getattr__(self, attr):
+ if len(self._warnings) > self._last:
+ return getattr(self._warnings[-1], attr)
+ elif attr in warnings.WarningMessage._WARNING_DETAILS:
+ return None
+ raise AttributeError("%r has no attribute %r" % (self, attr))
+
+ @property
+ def warnings(self):
+ return self._warnings[self._last:]
+
+ def reset(self):
+ self._last = len(self._warnings)
+
+
+def _filterwarnings(filters, quiet=False):
+ """Catch the warnings, then check if all the expected
+ warnings have been raised and re-raise unexpected warnings.
+ If 'quiet' is True, only re-raise the unexpected warnings.
+ """
+ # Clear the warning registry of the calling module
+ # in order to re-raise the warnings.
+ frame = sys._getframe(2)
+ registry = frame.f_globals.get('__warningregistry__')
+ if registry:
+ registry.clear()
+ with warnings.catch_warnings(record=True) as w:
+ # Set filter "always" to record all warnings. Because
+ # test_warnings swap the module, we need to look up in
+ # the sys.modules dictionary.
+ sys.modules['warnings'].simplefilter("always")
+ yield WarningsRecorder(w)
+ # Filter the recorded warnings
+ reraise = [warning.message for warning in w]
+ missing = []
+ for msg, cat in filters:
+ seen = False
+ for exc in reraise[:]:
+ message = str(exc)
+ # Filter out the matching messages
+ if (re.match(msg, message, re.I) and
+ issubclass(exc.__class__, cat)):
+ seen = True
+ reraise.remove(exc)
+ if not seen and not quiet:
+ # This filter caught nothing
+ missing.append((msg, cat.__name__))
+ if reraise:
+ raise AssertionError("unhandled warning %r" % reraise[0])
+ if missing:
+ raise AssertionError("filter (%r, %s) did not catch any warning" %
+ missing[0])
+
+
+@contextlib.contextmanager
+def check_warnings(*filters, **kwargs):
+ """Context manager to silence warnings.
+
+ Accept 2-tuples as positional arguments:
+ ("message regexp", WarningCategory)
+
+ Optional argument:
+ - if 'quiet' is True, it does not fail if a filter catches nothing
+ (default True without argument,
+ default False if some filters are defined)
+
+ Without argument, it defaults to:
+ check_warnings(("", Warning), quiet=True)
+ """
+ quiet = kwargs.get('quiet')
+ if not filters:
+ filters = (("", Warning),)
+ # Preserve backward compatibility
+ if quiet is None:
+ quiet = True
+ return _filterwarnings(filters, quiet)
+
+
+@contextlib.contextmanager
+def check_py3k_warnings(*filters, **kwargs):
+ """Context manager to silence py3k warnings.
+
+ Accept 2-tuples as positional arguments:
+ ("message regexp", WarningCategory)
+
+ Optional argument:
+ - if 'quiet' is True, it does not fail if a filter catches nothing
+ (default False)
+
+ Without argument, it defaults to:
+ check_py3k_warnings(("", DeprecationWarning), quiet=False)
+ """
+ if sys.py3kwarning:
+ if not filters:
+ filters = (("", DeprecationWarning),)
+ else:
+ # It should not raise any py3k warning
+ filters = ()
+ return _filterwarnings(filters, kwargs.get('quiet'))
+
+
+class CleanImport(object):
+ """Context manager to force import to return a new module reference.
+
+ This is useful for testing module-level behaviours, such as
+ the emission of a DeprecationWarning on import.
+
+ Use like this:
+
+ with CleanImport("foo"):
+ importlib.import_module("foo") # new reference
+ """
+
+ def __init__(self, *module_names):
+ self.original_modules = sys.modules.copy()
+ for module_name in module_names:
+ if module_name in sys.modules:
+ module = sys.modules[module_name]
+ # It is possible that module_name is just an alias for
+ # another module (e.g. stub for modules renamed in 3.x).
+ # In that case, we also need delete the real module to clear
+ # the import cache.
+ if module.__name__ != module_name:
+ del sys.modules[module.__name__]
+ del sys.modules[module_name]
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *ignore_exc):
+ sys.modules.update(self.original_modules)
+
+
+class EnvironmentVarGuard(UserDict.DictMixin):
+
+ """Class to help protect the environment variable properly. Can be used as
+ a context manager."""
+
+ def __init__(self):
+ self._environ = os.environ
+ self._changed = {}
+
+ def __getitem__(self, envvar):
+ return self._environ[envvar]
+
+ def __setitem__(self, envvar, value):
+ # Remember the initial value on the first access
+ if envvar not in self._changed:
+ self._changed[envvar] = self._environ.get(envvar)
+ self._environ[envvar] = value
+
+ def __delitem__(self, envvar):
+ # Remember the initial value on the first access
+ if envvar not in self._changed:
+ self._changed[envvar] = self._environ.get(envvar)
+ if envvar in self._environ:
+ del self._environ[envvar]
+
+ def keys(self):
+ return self._environ.keys()
+
+ def set(self, envvar, value):
+ self[envvar] = value
+
+ def unset(self, envvar):
+ del self[envvar]
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *ignore_exc):
+ for (k, v) in self._changed.items():
+ if v is None:
+ if k in self._environ:
+ del self._environ[k]
+ else:
+ self._environ[k] = v
+ os.environ = self._environ
+
+
+class DirsOnSysPath(object):
+ """Context manager to temporarily add directories to sys.path.
+
+ This makes a copy of sys.path, appends any directories given
+ as positional arguments, then reverts sys.path to the copied
+ settings when the context ends.
+
+ Note that *all* sys.path modifications in the body of the
+ context manager, including replacement of the object,
+ will be reverted at the end of the block.
+ """
+
+ def __init__(self, *paths):
+ self.original_value = sys.path[:]
+ self.original_object = sys.path
+ sys.path.extend(paths)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *ignore_exc):
+ sys.path = self.original_object
+ sys.path[:] = self.original_value
+
+
+class TransientResource(object):
+
+ """Raise ResourceDenied if an exception is raised while the context manager
+ is in effect that matches the specified exception and attributes."""
+
+ def __init__(self, exc, **kwargs):
+ self.exc = exc
+ self.attrs = kwargs
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type_=None, value=None, traceback=None):
+ """If type_ is a subclass of self.exc and value has attributes matching
+ self.attrs, raise ResourceDenied. Otherwise let the exception
+ propagate (if any)."""
+ if type_ is not None and issubclass(self.exc, type_):
+ for attr, attr_value in self.attrs.iteritems():
+ if not hasattr(value, attr):
+ break
+ if getattr(value, attr) != attr_value:
+ break
+ else:
+ raise ResourceDenied("an optional resource is not available")
+
+
+@contextlib.contextmanager
+def transient_internet(resource_name, timeout=30.0, errnos=()):
+ """Return a context manager that raises ResourceDenied when various issues
+ with the Internet connection manifest themselves as exceptions."""
+ default_errnos = [
+ ('ECONNREFUSED', 111),
+ ('ECONNRESET', 104),
+ ('EHOSTUNREACH', 113),
+ ('ENETUNREACH', 101),
+ ('ETIMEDOUT', 110),
+ # socket.create_connection() fails randomly with
+ # EADDRNOTAVAIL on Travis CI.
+ ('EADDRNOTAVAIL', 99),
+ ]
+ default_gai_errnos = [
+ ('EAI_AGAIN', -3),
+ ('EAI_FAIL', -4),
+ ('EAI_NONAME', -2),
+ ('EAI_NODATA', -5),
+ # Windows defines EAI_NODATA as 11001 but idiotic getaddrinfo()
+ # implementation actually returns WSANO_DATA i.e. 11004.
+ ('WSANO_DATA', 11004),
+ ]
+
+ denied = ResourceDenied("Resource '%s' is not available" % resource_name)
+ captured_errnos = errnos
+ gai_errnos = []
+ if not captured_errnos:
+ captured_errnos = [getattr(errno, name, num)
+ for (name, num) in default_errnos]
+ gai_errnos = [getattr(socket, name, num)
+ for (name, num) in default_gai_errnos]
+
+ def filter_error(err):
+ n = getattr(err, 'errno', None)
+ if (isinstance(err, socket.timeout) or
+ (isinstance(err, socket.gaierror) and n in gai_errnos) or
+ n in captured_errnos):
+ if not verbose:
+ sys.stderr.write(denied.args[0] + "\n")
+ raise denied
+
+ old_timeout = socket.getdefaulttimeout()
+ try:
+ if timeout is not None:
+ socket.setdefaulttimeout(timeout)
+ yield
+ except IOError as err:
+ # urllib can wrap original socket errors multiple times (!), we must
+ # unwrap to get at the original error.
+ while True:
+ a = err.args
+ if len(a) >= 1 and isinstance(a[0], IOError):
+ err = a[0]
+ # The error can also be wrapped as args[1]:
+ # except socket.error as msg:
+ # raise IOError('socket error', msg).with_traceback(sys.exc_info()[2])
+ elif len(a) >= 2 and isinstance(a[1], IOError):
+ err = a[1]
+ else:
+ break
+ filter_error(err)
+ raise
+ # XXX should we catch generic exceptions and look for their
+ # __cause__ or __context__?
+ finally:
+ socket.setdefaulttimeout(old_timeout)
+
+
+@contextlib.contextmanager
+def captured_output(stream_name):
+ """Return a context manager used by captured_stdout and captured_stdin
+ that temporarily replaces the sys stream *stream_name* with a StringIO."""
+ import StringIO
+ orig_stdout = getattr(sys, stream_name)
+ setattr(sys, stream_name, StringIO.StringIO())
+ try:
+ yield getattr(sys, stream_name)
+ finally:
+ setattr(sys, stream_name, orig_stdout)
+
+def captured_stdout():
+ """Capture the output of sys.stdout:
+
+ with captured_stdout() as s:
+ print "hello"
+ self.assertEqual(s.getvalue(), "hello")
+ """
+ return captured_output("stdout")
+
+def captured_stderr():
+ return captured_output("stderr")
+
+def captured_stdin():
+ return captured_output("stdin")
+
+def gc_collect():
+ """Force as many objects as possible to be collected.
+
+ In non-CPython implementations of Python, this is needed because timely
+ deallocation is not guaranteed by the garbage collector. (Even in CPython
+ this can be the case in case of reference cycles.) This means that __del__
+ methods may be called later than expected and weakrefs may remain alive for
+ longer than expected. This function tries its best to force all garbage
+ objects to disappear.
+ """
+ gc.collect()
+ if is_jython:
+ time.sleep(0.1)
+ gc.collect()
+ gc.collect()
+
+
+_header = '2P'
+if hasattr(sys, "gettotalrefcount"):
+ _header = '2P' + _header
+_vheader = _header + 'P'
+
+def calcobjsize(fmt):
+ return struct.calcsize(_header + fmt + '0P')
+
+def calcvobjsize(fmt):
+ return struct.calcsize(_vheader + fmt + '0P')
+
+
+_TPFLAGS_HAVE_GC = 1<<14
+_TPFLAGS_HEAPTYPE = 1<<9
+
+def check_sizeof(test, o, size):
+ import _testcapi
+ result = sys.getsizeof(o)
+ # add GC header size
+ if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
+ ((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
+ size += _testcapi.SIZEOF_PYGC_HEAD
+ msg = 'wrong size for %s: got %d, expected %d' \
+ % (type(o), result, size)
+ test.assertEqual(result, size, msg)
+
+
+#=======================================================================
+# Decorator for running a function in a different locale, correctly resetting
+# it afterwards.
+
+def run_with_locale(catstr, *locales):
+ def decorator(func):
+ def inner(*args, **kwds):
+ try:
+ import locale
+ category = getattr(locale, catstr)
+ orig_locale = locale.setlocale(category)
+ except AttributeError:
+ # if the test author gives us an invalid category string
+ raise
+ except:
+ # cannot retrieve original locale, so do nothing
+ locale = orig_locale = None
+ else:
+ for loc in locales:
+ try:
+ locale.setlocale(category, loc)
+ break
+ except:
+ pass
+
+ # now run the function, resetting the locale on exceptions
+ try:
+ return func(*args, **kwds)
+ finally:
+ if locale and orig_locale:
+ locale.setlocale(category, orig_locale)
+ inner.func_name = func.func_name
+ inner.__doc__ = func.__doc__
+ return inner
+ return decorator
+
+#=======================================================================
+# Decorator for running a function in a specific timezone, correctly
+# resetting it afterwards.
+
+def run_with_tz(tz):
+ def decorator(func):
+ def inner(*args, **kwds):
+ try:
+ tzset = time.tzset
+ except AttributeError:
+ raise unittest.SkipTest("tzset required")
+ if 'TZ' in os.environ:
+ orig_tz = os.environ['TZ']
+ else:
+ orig_tz = None
+ os.environ['TZ'] = tz
+ tzset()
+
+ # now run the function, resetting the tz on exceptions
+ try:
+ return func(*args, **kwds)
+ finally:
+ if orig_tz is None:
+ del os.environ['TZ']
+ else:
+ os.environ['TZ'] = orig_tz
+ time.tzset()
+
+ inner.__name__ = func.__name__
+ inner.__doc__ = func.__doc__
+ return inner
+ return decorator
+
+#=======================================================================
+# Big-memory-test support. Separate from 'resources' because memory use should be configurable.
+
+# Some handy shorthands. Note that these are used for byte-limits as well
+# as size-limits, in the various bigmem tests
+_1M = 1024*1024
+_1G = 1024 * _1M
+_2G = 2 * _1G
+_4G = 4 * _1G
+
+MAX_Py_ssize_t = sys.maxsize
+
+def set_memlimit(limit):
+ global max_memuse
+ global real_max_memuse
+ sizes = {
+ 'k': 1024,
+ 'm': _1M,
+ 'g': _1G,
+ 't': 1024*_1G,
+ }
+ m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
+ re.IGNORECASE | re.VERBOSE)
+ if m is None:
+ raise ValueError('Invalid memory limit %r' % (limit,))
+ memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
+ real_max_memuse = memlimit
+ if memlimit > MAX_Py_ssize_t:
+ memlimit = MAX_Py_ssize_t
+ if memlimit < _2G - 1:
+ raise ValueError('Memory limit %r too low to be useful' % (limit,))
+ max_memuse = memlimit
+
+def bigmemtest(minsize, memuse, overhead=5*_1M):
+ """Decorator for bigmem tests.
+
+ 'minsize' is the minimum useful size for the test (in arbitrary,
+ test-interpreted units.) 'memuse' is the number of 'bytes per size' for
+ the test, or a good estimate of it. 'overhead' specifies fixed overhead,
+ independent of the testsize, and defaults to 5Mb.
+
+ The decorator tries to guess a good value for 'size' and passes it to
+ the decorated test function. If minsize * memuse is more than the
+ allowed memory use (as defined by max_memuse), the test is skipped.
+ Otherwise, minsize is adjusted upward to use up to max_memuse.
+ """
+ def decorator(f):
+ def wrapper(self):
+ if not max_memuse:
+ # If max_memuse is 0 (the default),
+ # we still want to run the tests with size set to a few kb,
+ # to make sure they work. We still want to avoid using
+ # too much memory, though, but we do that noisily.
+ maxsize = 5147
+ self.assertFalse(maxsize * memuse + overhead > 20 * _1M)
+ else:
+ maxsize = int((max_memuse - overhead) / memuse)
+ if maxsize < minsize:
+ # Really ought to print 'test skipped' or something
+ if verbose:
+ sys.stderr.write("Skipping %s because of memory "
+ "constraint\n" % (f.__name__,))
+ return
+ # Try to keep some breathing room in memory use
+ maxsize = max(maxsize - 50 * _1M, minsize)
+ return f(self, maxsize)
+ wrapper.minsize = minsize
+ wrapper.memuse = memuse
+ wrapper.overhead = overhead
+ return wrapper
+ return decorator
+
+def precisionbigmemtest(size, memuse, overhead=5*_1M, dry_run=True):
+ def decorator(f):
+ def wrapper(self):
+ if not real_max_memuse:
+ maxsize = 5147
+ else:
+ maxsize = size
+
+ if ((real_max_memuse or not dry_run)
+ and real_max_memuse < maxsize * memuse):
+ if verbose:
+ sys.stderr.write("Skipping %s because of memory "
+ "constraint\n" % (f.__name__,))
+ return
+
+ return f(self, maxsize)
+ wrapper.size = size
+ wrapper.memuse = memuse
+ wrapper.overhead = overhead
+ return wrapper
+ return decorator
+
+def bigaddrspacetest(f):
+ """Decorator for tests that fill the address space."""
+ def wrapper(self):
+ if max_memuse < MAX_Py_ssize_t:
+ if verbose:
+ sys.stderr.write("Skipping %s because of memory "
+ "constraint\n" % (f.__name__,))
+ else:
+ return f(self)
+ return wrapper
+
+#=======================================================================
+# unittest integration.
+
+class BasicTestRunner:
+ def run(self, test):
+ result = unittest.TestResult()
+ test(result)
+ return result
+
+def _id(obj):
+ return obj
+
+def requires_resource(resource):
+ if resource == 'gui' and not _is_gui_available():
+ return unittest.skip(_is_gui_available.reason)
+ if is_resource_enabled(resource):
+ return _id
+ else:
+ return unittest.skip("resource {0!r} is not enabled".format(resource))
+
+def cpython_only(test):
+ """
+ Decorator for tests only applicable on CPython.
+ """
+ return impl_detail(cpython=True)(test)
+
+def impl_detail(msg=None, **guards):
+ if check_impl_detail(**guards):
+ return _id
+ if msg is None:
+ guardnames, default = _parse_guards(guards)
+ if default:
+ msg = "implementation detail not available on {0}"
+ else:
+ msg = "implementation detail specific to {0}"
+ guardnames = sorted(guardnames.keys())
+ msg = msg.format(' or '.join(guardnames))
+ return unittest.skip(msg)
+
+def _parse_guards(guards):
+ # Returns a tuple ({platform_name: run_me}, default_value)
+ if not guards:
+ return ({'cpython': True}, False)
+ is_true = guards.values()[0]
+ assert guards.values() == [is_true] * len(guards) # all True or all False
+ return (guards, not is_true)
+
+# Use the following check to guard CPython's implementation-specific tests --
+# or to run them only on the implementation(s) guarded by the arguments.
+def check_impl_detail(**guards):
+ """This function returns True or False depending on the host platform.
+ Examples:
+ if check_impl_detail(): # only on CPython (default)
+ if check_impl_detail(jython=True): # only on Jython
+ if check_impl_detail(cpython=False): # everywhere except on CPython
+ """
+ guards, default = _parse_guards(guards)
+ return guards.get(platform.python_implementation().lower(), default)
+
+# ----------------------------------
+# PyPy extension: you can run::
+# python ..../test_foo.py --pdb
+# to get a pdb prompt in case of exceptions
+
+ResultClass = unittest.TextTestRunner.resultclass
+
+class TestResultWithPdb(ResultClass):
+
+ def addError(self, testcase, exc_info):
+ ResultClass.addError(self, testcase, exc_info)
+ if '--pdb' in sys.argv:
+ import pdb, traceback
+ traceback.print_tb(exc_info[2])
+ pdb.post_mortem(exc_info[2])
+
+# ----------------------------------
+
+def _filter_suite(suite, pred):
+ """Recursively filter test cases in a suite based on a predicate."""
+ newtests = []
+ for test in suite._tests:
+ if isinstance(test, unittest.TestSuite):
+ _filter_suite(test, pred)
+ newtests.append(test)
+ else:
+ if pred(test):
+ newtests.append(test)
+ suite._tests = newtests
+
+def _run_suite(suite):
+ """Run tests from a unittest.TestSuite-derived class."""
+ if verbose:
+ runner = unittest.TextTestRunner(sys.stdout, verbosity=2,
+ failfast=failfast)
+ else:
+ runner = BasicTestRunner()
+
+ result = runner.run(suite)
+ if not result.testsRun and not result.skipped:
+ raise TestDidNotRun
+ if not result.wasSuccessful():
+ if len(result.errors) == 1 and not result.failures:
+ err = result.errors[0][1]
+ elif len(result.failures) == 1 and not result.errors:
+ err = result.failures[0][1]
+ else:
+ err = "multiple errors occurred"
+ if not verbose:
+ err += "; run in verbose mode for details"
+ raise TestFailed(err)
+
+
+# By default, don't filter tests
+_match_test_func = None
+_match_test_patterns = None
+
+
+def match_test(test):
+ # Function used by support.run_unittest() and regrtest --list-cases
+ if _match_test_func is None:
+ return True
+ else:
+ return _match_test_func(test.id())
+
+
+def _is_full_match_test(pattern):
+ # If a pattern contains at least one dot, it's considered
+ # as a full test identifier.
+ # Example: 'test.test_os.FileTests.test_access'.
+ #
+ # Reject patterns which contain fnmatch patterns: '*', '?', '[...]'
+ # or '[!...]'. For example, reject 'test_access*'.
+ return ('.' in pattern) and (not re.search(r'[?*\[\]]', pattern))
+
+
+def set_match_tests(patterns):
+ global _match_test_func, _match_test_patterns
+
+ if patterns == _match_test_patterns:
+ # No change: no need to recompile patterns.
+ return
+
+ if not patterns:
+ func = None
+ # set_match_tests(None) behaves as set_match_tests(())
+ patterns = ()
+ elif all(map(_is_full_match_test, patterns)):
+ # Simple case: all patterns are full test identifier.
+ # The test.bisect utility only uses such full test identifiers.
+ func = set(patterns).__contains__
+ else:
+ regex = '|'.join(map(fnmatch.translate, patterns))
+ # The search *is* case sensitive on purpose:
+ # don't use flags=re.IGNORECASE
+ regex_match = re.compile(regex).match
+
+ def match_test_regex(test_id):
+ if regex_match(test_id):
+ # The regex matchs the whole identifier like
+ # 'test.test_os.FileTests.test_access'
+ return True
+ else:
+ # Try to match parts of the test identifier.
+ # For example, split 'test.test_os.FileTests.test_access'
+ # into: 'test', 'test_os', 'FileTests' and 'test_access'.
+ return any(map(regex_match, test_id.split(".")))
+
+ func = match_test_regex
+
+ # Create a copy since patterns can be mutable and so modified later
+ _match_test_patterns = tuple(patterns)
+ _match_test_func = func
+
+
+# ----------------------------------
+# PyPy extension: you can run::
+# python ..../test_foo.py --filter bar
+# to run only the test cases whose name contains bar
+
+def filter_maybe(suite):
+ try:
+ i = sys.argv.index('--filter')
+ filter = sys.argv[i+1]
+ except (ValueError, IndexError):
+ return suite
+ tests = []
+ for test in linearize_suite(suite):
+ if filter in test._testMethodName:
+ tests.append(test)
+ return unittest.TestSuite(tests)
+
+def linearize_suite(suite_or_test):
+ try:
+ it = iter(suite_or_test)
+ except TypeError:
+ yield suite_or_test
+ return
+ for subsuite in it:
+ for item in linearize_suite(subsuite):
+ yield item
+
+# ----------------------------------
+
+def run_unittest(*classes):
+ """Run tests from unittest.TestCase-derived classes."""
+ valid_types = (unittest.TestSuite, unittest.TestCase)
+ suite = unittest.TestSuite()
+ for cls in classes:
+ if isinstance(cls, str):
+ if cls in sys.modules:
+ suite.addTest(unittest.findTestCases(sys.modules[cls]))
+ else:
+ raise ValueError("str arguments must be keys in sys.modules")
+ elif isinstance(cls, valid_types):
+ suite.addTest(cls)
+ else:
+ suite.addTest(unittest.makeSuite(cls))
+ _filter_suite(suite, match_test)
+ _run_suite(suite)
+
+#=======================================================================
+# Check for the presence of docstrings.
+
+HAVE_DOCSTRINGS = (check_impl_detail(cpython=False) or
+ sys.platform == 'win32' or
+ sysconfig.get_config_var('WITH_DOC_STRINGS'))
+
+requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
+ "test requires docstrings")
+
+
+#=======================================================================
+# doctest driver.
+
+def run_doctest(module, verbosity=None):
+ """Run doctest on the given module. Return (#failures, #tests).
+
+ If optional argument verbosity is not specified (or is None), pass
+ test.support's belief about verbosity on to doctest. Else doctest's
+ usual behavior is used (it searches sys.argv for -v).
+ """
+
+ import doctest
+
+ if verbosity is None:
+ verbosity = verbose
+ else:
+ verbosity = None
+
+ # Direct doctest output (normally just errors) to real stdout; doctest
+ # output shouldn't be compared by regrtest.
+ save_stdout = sys.stdout
+ sys.stdout = get_original_stdout()
+ try:
+ f, t = doctest.testmod(module, verbose=verbosity)
+ if f:
+ raise TestFailed("%d of %d doctests failed" % (f, t))
+ finally:
+ sys.stdout = save_stdout
+ if verbose:
+ print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
+ return f, t
+
+#=======================================================================
+# Threading support to prevent reporting refleaks when running regrtest.py -R
+
+# Flag used by saved_test_environment of test.libregrtest.save_env,
+# to check if a test modified the environment. The flag should be set to False
+# before running a new test.
+#
+# For example, threading_cleanup() sets the flag is the function fails
+# to cleanup threads.
+environment_altered = False
+
+# NOTE: we use thread._count() rather than threading.enumerate() (or the
+# moral equivalent thereof) because a threading.Thread object is still alive
+# until its __bootstrap() method has returned, even after it has been
+# unregistered from the threading module.
+# thread._count(), on the other hand, only gets decremented *after* the
+# __bootstrap() method has returned, which gives us reliable reference counts
+# at the end of a test run.
+
+def threading_setup():
+ if thread:
+ return thread._count(),
+ else:
+ return 1,
+
+def threading_cleanup(nb_threads):
+ if not thread:
+ return
+
+ _MAX_COUNT = 10
+ for count in range(_MAX_COUNT):
+ n = thread._count()
+ if n == nb_threads:
+ break
+ time.sleep(0.1)
+ # XXX print a warning in case of failure?
+
+def reap_threads(func):
+ """Use this function when threads are being used. This will
+ ensure that the threads are cleaned up even when the test fails.
+ If threading is unavailable this function does nothing.
+ """
+ if not thread:
+ return func
+
+ @functools.wraps(func)
+ def decorator(*args):
+ key = threading_setup()
+ try:
+ return func(*args)
+ finally:
+ threading_cleanup(*key)
+ return decorator
+
+
+@contextlib.contextmanager
+def wait_threads_exit(timeout=60.0):
+ """
+ bpo-31234: Context manager to wait until all threads created in the with
+ statement exit.
+
+ Use thread.count() to check if threads exited. Indirectly, wait until
+ threads exit the internal t_bootstrap() C function of the thread module.
+
+ threading_setup() and threading_cleanup() are designed to emit a warning
+ if a test leaves running threads in the background. This context manager
+ is designed to cleanup threads started by the thread.start_new_thread()
+ which doesn't allow to wait for thread exit, whereas thread.Thread has a
+ join() method.
+ """
+ old_count = thread._count()
+ try:
+ yield
+ finally:
+ start_time = time.time()
+ deadline = start_time + timeout
+ while True:
+ count = thread._count()
+ if count <= old_count:
+ break
+ if time.time() > deadline:
+ dt = time.time() - start_time
+ msg = ("wait_threads() failed to cleanup %s "
+ "threads after %.1f seconds "
+ "(count: %s, old count: %s)"
+ % (count - old_count, dt, count, old_count))
+ raise AssertionError(msg)
+ time.sleep(0.010)
+ gc_collect()
+
+
+def reap_children():
+ """Use this function at the end of test_main() whenever sub-processes
+ are started. This will help ensure that no extra children (zombies)
+ stick around to hog resources and create problems when looking
+ for refleaks.
+ """
+
+ # Reap all our dead child processes so we don't leave zombies around.
+ # These hog resources and might be causing some of the buildbots to die.
+ if hasattr(os, 'waitpid'):
+ any_process = -1
+ while True:
+ try:
+ # This will raise an exception on Windows. That's ok.
+ pid, status = os.waitpid(any_process, os.WNOHANG)
+ if pid == 0:
+ break
+ except:
+ break
+
+@contextlib.contextmanager
+def start_threads(threads, unlock=None):
+ threads = list(threads)
+ started = []
+ try:
+ try:
+ for t in threads:
+ t.start()
+ started.append(t)
+ except:
+ if verbose:
+ print("Can't start %d threads, only %d threads started" %
+ (len(threads), len(started)))
+ raise
+ yield
+ finally:
+ if unlock:
+ unlock()
+ endtime = starttime = time.time()
+ for timeout in range(1, 16):
+ endtime += 60
+ for t in started:
+ t.join(max(endtime - time.time(), 0.01))
+ started = [t for t in started if t.isAlive()]
+ if not started:
+ break
+ if verbose:
+ print('Unable to join %d threads during a period of '
+ '%d minutes' % (len(started), timeout))
+ started = [t for t in started if t.isAlive()]
+ if started:
+ raise AssertionError('Unable to join %d threads' % len(started))
+
+@contextlib.contextmanager
+def swap_attr(obj, attr, new_val):
+ """Temporary swap out an attribute with a new object.
+
+ Usage:
+ with swap_attr(obj, "attr", 5):
+ ...
+
+ This will set obj.attr to 5 for the duration of the with: block,
+ restoring the old value at the end of the block. If `attr` doesn't
+ exist on `obj`, it will be created and then deleted at the end of the
+ block.
+
+ The old value (or None if it doesn't exist) will be assigned to the
+ target of the "as" clause, if there is one.
+ """
+ if hasattr(obj, attr):
+ real_val = getattr(obj, attr)
+ setattr(obj, attr, new_val)
+ try:
+ yield real_val
+ finally:
+ setattr(obj, attr, real_val)
+ else:
+ setattr(obj, attr, new_val)
+ try:
+ yield
+ finally:
+ if hasattr(obj, attr):
+ delattr(obj, attr)
+
+@contextlib.contextmanager
+def swap_item(obj, item, new_val):
+ """Temporary swap out an item with a new object.
+
+ Usage:
+ with swap_item(obj, "item", 5):
+ ...
+
+ This will set obj["item"] to 5 for the duration of the with: block,
+ restoring the old value at the end of the block. If `item` doesn't
+ exist on `obj`, it will be created and then deleted at the end of the
+ block.
+
+ The old value (or None if it doesn't exist) will be assigned to the
+ target of the "as" clause, if there is one.
+ """
+ if item in obj:
+ real_val = obj[item]
+ obj[item] = new_val
+ try:
+ yield real_val
+ finally:
+ obj[item] = real_val
+ else:
+ obj[item] = new_val
+ try:
+ yield
+ finally:
+ if item in obj:
+ del obj[item]
+
+def py3k_bytes(b):
+ """Emulate the py3k bytes() constructor.
+
+ NOTE: This is only a best effort function.
+ """
+ try:
+ # memoryview?
+ return b.tobytes()
+ except AttributeError:
+ try:
+ # iterable of ints?
+ return b"".join(chr(x) for x in b)
+ except TypeError:
+ return bytes(b)
+
+requires_type_collecting = unittest.skipIf(hasattr(sys, 'getcounts'),
+ 'types are immortal if COUNT_ALLOCS is defined')
+
+def args_from_interpreter_flags():
+ """Return a list of command-line arguments reproducing the current
+ settings in sys.flags."""
+ import subprocess
+ return subprocess._args_from_interpreter_flags()
+
+def strip_python_stderr(stderr):
+ """Strip the stderr of a Python process from potential debug output
+ emitted by the interpreter.
+
+ This will typically be run on the result of the communicate() method
+ of a subprocess.Popen object.
+ """
+ stderr = re.sub(br"\[\d+ refs\]\r?\n?$", b"", stderr).strip()
+ return stderr
+
+
+def check_free_after_iterating(test, iter, cls, args=()):
+ class A(cls):
+ def __del__(self):
+ done[0] = True
+ try:
+ next(it)
+ except StopIteration:
+ pass
+
+ done = [False]
+ it = iter(A(*args))
+ # Issue 26494: Shouldn't crash
+ test.assertRaises(StopIteration, next, it)
+ # The sequence should be deallocated just after the end of iterating
+ gc_collect()
+ test.assertTrue(done[0])
+
+@contextlib.contextmanager
+def disable_gc():
+ have_gc = gc.isenabled()
+ gc.disable()
+ try:
+ yield
+ finally:
+ if have_gc:
+ gc.enable()
+
+
+def python_is_optimized():
+ """Find if Python was built with optimizations."""
+ cflags = sysconfig.get_config_var('PY_CFLAGS') or ''
+ final_opt = ""
+ for opt in cflags.split():
+ if opt.startswith('-O'):
+ final_opt = opt
+ return final_opt not in ('', '-O0', '-Og')
+
+
+class SuppressCrashReport:
+ """Try to prevent a crash report from popping up.
+
+ On Windows, don't display the Windows Error Reporting dialog. On UNIX,
+ disable the creation of coredump file.
+ """
+ old_value = None
+ old_modes = None
+
+ def __enter__(self):
+ """On Windows, disable Windows Error Reporting dialogs using
+ SetErrorMode.
+
+ On UNIX, try to save the previous core file size limit, then set
+ soft limit to 0.
+ """
+ if sys.platform.startswith('win'):
+ # see http://msdn.microsoft.com/en-us/library/windows/desktop/ms680621.aspx
+ # GetErrorMode is not available on Windows XP and Windows Server 2003,
+ # but SetErrorMode returns the previous value, so we can use that
+ import ctypes
+ self._k32 = ctypes.windll.kernel32
+ SEM_NOGPFAULTERRORBOX = 0x02
+ self.old_value = self._k32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
+ self._k32.SetErrorMode(self.old_value | SEM_NOGPFAULTERRORBOX)
+
+ # Suppress assert dialogs in debug builds
+ # (see http://bugs.python.org/issue23314)
+ try:
+ import _testcapi
+ _testcapi.CrtSetReportMode
+ except (AttributeError, ImportError):
+ # no _testcapi or a release build
+ pass
+ else:
+ self.old_modes = {}
+ for report_type in [_testcapi.CRT_WARN,
+ _testcapi.CRT_ERROR,
+ _testcapi.CRT_ASSERT]:
+ old_mode = _testcapi.CrtSetReportMode(report_type,
+ _testcapi.CRTDBG_MODE_FILE)
+ old_file = _testcapi.CrtSetReportFile(report_type,
+ _testcapi.CRTDBG_FILE_STDERR)
+ self.old_modes[report_type] = old_mode, old_file
+
+ else:
+ try:
+ import resource
+ except ImportError:
+ resource = None
+
+ if resource is not None:
+ try:
+ self.old_value = resource.getrlimit(resource.RLIMIT_CORE)
+ resource.setrlimit(resource.RLIMIT_CORE,
+ (0, self.old_value[1]))
+ except (ValueError, OSError):
+ pass
+
+ if sys.platform == 'darwin':
+ # Check if the 'Crash Reporter' on OSX was configured
+ # in 'Developer' mode and warn that it will get triggered
+ # when it is.
+ #
+ # This assumes that this context manager is used in tests
+ # that might trigger the next manager.
+ import subprocess
+ cmd = ['/usr/bin/defaults', 'read',
+ 'com.apple.CrashReporter', 'DialogType']
+ proc = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout = proc.communicate()[0]
+ if stdout.strip() == b'developer':
+ sys.stdout.write("this test triggers the Crash Reporter, "
+ "that is intentional")
+ sys.stdout.flush()
+
+ return self
+
+ def __exit__(self, *ignore_exc):
+ """Restore Windows ErrorMode or core file behavior to initial value."""
+ if self.old_value is None:
+ return
+
+ if sys.platform.startswith('win'):
+ self._k32.SetErrorMode(self.old_value)
+
+ if self.old_modes:
+ import _testcapi
+ for report_type, (old_mode, old_file) in self.old_modes.items():
+ _testcapi.CrtSetReportMode(report_type, old_mode)
+ _testcapi.CrtSetReportFile(report_type, old_file)
+ else:
+ import resource
+ try:
+ resource.setrlimit(resource.RLIMIT_CORE, self.old_value)
+ except (ValueError, OSError):
+ pass
+
+
+def _crash_python():
+ """Deliberate crash of Python.
+
+ Python can be killed by a segmentation fault (SIGSEGV), a bus error
+ (SIGBUS), or a different error depending on the platform.
+
+ Use SuppressCrashReport() to prevent a crash report from popping up.
+ """
+
+ import _testcapi
+ with SuppressCrashReport():
+ _testcapi._read_null()
+
+
+def fd_count():
+ """Count the number of open file descriptors.
+ """
+ if sys.platform.startswith(('linux', 'freebsd')):
+ try:
+ names = os.listdir("/proc/self/fd")
+ # Substract one because listdir() opens internally a file
+ # descriptor to list the content of the /proc/self/fd/ directory.
+ return len(names) - 1
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+
+ MAXFD = 256
+ if hasattr(os, 'sysconf'):
+ try:
+ MAXFD = os.sysconf("SC_OPEN_MAX")
+ except OSError:
+ pass
+
+ old_modes = None
+ if sys.platform == 'win32':
+ # bpo-25306, bpo-31009: Call CrtSetReportMode() to not kill the process
+ # on invalid file descriptor if Python is compiled in debug mode
+ try:
+ import msvcrt
+ msvcrt.CrtSetReportMode
+ except (AttributeError, ImportError):
+ # no msvcrt or a release build
+ pass
+ else:
+ old_modes = {}
+ for report_type in (msvcrt.CRT_WARN,
+ msvcrt.CRT_ERROR,
+ msvcrt.CRT_ASSERT):
+ old_modes[report_type] = msvcrt.CrtSetReportMode(report_type, 0)
+
+ try:
+ count = 0
+ for fd in range(MAXFD):
+ try:
+ # Prefer dup() over fstat(). fstat() can require input/output
+ # whereas dup() doesn't.
+ fd2 = os.dup(fd)
+ except OSError as e:
+ if e.errno != errno.EBADF:
+ raise
+ else:
+ os.close(fd2)
+ count += 1
+ finally:
+ if old_modes is not None:
+ for report_type in (msvcrt.CRT_WARN,
+ msvcrt.CRT_ERROR,
+ msvcrt.CRT_ASSERT):
+ msvcrt.CrtSetReportMode(report_type, old_modes[report_type])
+
+ return count
+
+
+class SaveSignals:
+ """
+ Save an restore signal handlers.
+
+ This class is only able to save/restore signal handlers registered
+ by the Python signal module: see bpo-13285 for "external" signal
+ handlers.
+ """
+
+ def __init__(self):
+ import signal
+ self.signal = signal
+ self.signals = list(range(1, signal.NSIG))
+ # SIGKILL and SIGSTOP signals cannot be ignored nor catched
+ for signame in ('SIGKILL', 'SIGSTOP'):
+ try:
+ signum = getattr(signal, signame)
+ except AttributeError:
+ continue
+ self.signals.remove(signum)
+ self.handlers = {}
+
+ def save(self):
+ for signum in self.signals:
+ handler = self.signal.getsignal(signum)
+ if handler is None:
+ # getsignal() returns None if a signal handler was not
+ # registered by the Python signal module,
+ # and the handler is not SIG_DFL nor SIG_IGN.
+ #
+ # Ignore the signal: we cannot restore the handler.
+ continue
+ self.handlers[signum] = handler
+
+ def restore(self):
+ for signum, handler in self.handlers.items():
+ self.signal.signal(signum, handler)
diff --git a/lib-python/2.7/test/support/script_helper.py b/lib-python/2.7/test/support/script_helper.py
new file mode 100644
index 0000000000..e06cdc32d1
--- /dev/null
+++ b/lib-python/2.7/test/support/script_helper.py
@@ -0,0 +1,170 @@
+# Common utility functions used by various script execution tests
+# e.g. test_cmd_line, test_cmd_line_script and test_runpy
+
+import sys
+import os
+import re
+import os.path
+import tempfile
+import subprocess
+import py_compile
+import contextlib
+import shutil
+try:
+ import zipfile
+except ImportError:
+ # If Python is build without Unicode support, importing _io will
+ # fail, which, in turn, means that zipfile cannot be imported
+ # Most of this module can then still be used.
+ pass
+
+from test.support import strip_python_stderr
+
+# Executing the interpreter in a subprocess
+def _assert_python(expected_success, *args, **env_vars):
+ cmd_line = [sys.executable]
+ if not env_vars:
+ cmd_line.append('-E')
+ cmd_line.extend(args)
+ # Need to preserve the original environment, for in-place testing of
+ # shared library builds.
+ env = os.environ.copy()
+ env.update(env_vars)
+ p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ env=env)
+ try:
+ out, err = p.communicate()
+ finally:
+ subprocess._cleanup()
+ p.stdout.close()
+ p.stderr.close()
+ rc = p.returncode
+ err = strip_python_stderr(err)
+ if (rc and expected_success) or (not rc and not expected_success):
+ raise AssertionError(
+ "Process return code is %d, "
+ "stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
+ return rc, out, err
+
+def assert_python_ok(*args, **env_vars):
+ """
+ Assert that running the interpreter with `args` and optional environment
+ variables `env_vars` is ok and return a (return code, stdout, stderr) tuple.
+ """
+ return _assert_python(True, *args, **env_vars)
+
+def assert_python_failure(*args, **env_vars):
+ """
+ Assert that running the interpreter with `args` and optional environment
+ variables `env_vars` fails and return a (return code, stdout, stderr) tuple.
+ """
+ return _assert_python(False, *args, **env_vars)
+
+def python_exit_code(*args):
+ cmd_line = [sys.executable, '-E']
+ cmd_line.extend(args)
+ with open(os.devnull, 'w') as devnull:
+ return subprocess.call(cmd_line, stdout=devnull,
+ stderr=subprocess.STDOUT)
+
+def spawn_python(*args, **kwargs):
+ cmd_line = [sys.executable, '-E']
+ cmd_line.extend(args)
+ return subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ **kwargs)
+
+def kill_python(p):
+ p.stdin.close()
+ data = p.stdout.read()
+ p.stdout.close()
+ # try to cleanup the child so we don't appear to leak when running
+ # with regrtest -R.
+ p.wait()
+ subprocess._cleanup()
+ return data
+
+def run_python(*args, **kwargs):
+ if __debug__:
+ p = spawn_python(*args, **kwargs)
+ else:
+ p = spawn_python('-O', *args, **kwargs)
+ stdout_data = kill_python(p)
+ return p.wait(), stdout_data
+
+# Script creation utilities
+@contextlib.contextmanager
+def temp_dir():
+ dirname = tempfile.mkdtemp()
+ dirname = os.path.realpath(dirname)
+ try:
+ yield dirname
+ finally:
+ shutil.rmtree(dirname)
+
+def make_script(script_dir, script_basename, source):
+ script_filename = script_basename+os.extsep+'py'
+ script_name = os.path.join(script_dir, script_filename)
+ script_file = open(script_name, 'w')
+ script_file.write(source)
+ script_file.close()
+ return script_name
+
+def compile_script(script_name):
+ py_compile.compile(script_name, doraise=True)
+ if __debug__:
+ compiled_name = script_name + 'c'
+ else:
+ compiled_name = script_name + 'o'
+ return compiled_name
+
+def make_zip_script(zip_dir, zip_basename, script_name, name_in_zip=None):
+ zip_filename = zip_basename+os.extsep+'zip'
+ zip_name = os.path.join(zip_dir, zip_filename)
+ zip_file = zipfile.ZipFile(zip_name, 'w')
+ if name_in_zip is None:
+ name_in_zip = os.path.basename(script_name)
+ zip_file.write(script_name, name_in_zip)
+ zip_file.close()
+ #if test.test_support.verbose:
+ # zip_file = zipfile.ZipFile(zip_name, 'r')
+ # print 'Contents of %r:' % zip_name
+ # zip_file.printdir()
+ # zip_file.close()
+ return zip_name, os.path.join(zip_name, name_in_zip)
+
+def make_pkg(pkg_dir, init_source=''):
+ os.mkdir(pkg_dir)
+ make_script(pkg_dir, '__init__', init_source)
+
+def make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
+ source, depth=1, compiled=False):
+ unlink = []
+ init_name = make_script(zip_dir, '__init__', '')
+ unlink.append(init_name)
+ init_basename = os.path.basename(init_name)
+ script_name = make_script(zip_dir, script_basename, source)
+ unlink.append(script_name)
+ if compiled:
+ init_name = compile_script(init_name)
+ script_name = compile_script(script_name)
+ unlink.extend((init_name, script_name))
+ pkg_names = [os.sep.join([pkg_name]*i) for i in range(1, depth+1)]
+ script_name_in_zip = os.path.join(pkg_names[-1], os.path.basename(script_name))
+ zip_filename = zip_basename+os.extsep+'zip'
+ zip_name = os.path.join(zip_dir, zip_filename)
+ zip_file = zipfile.ZipFile(zip_name, 'w')
+ for name in pkg_names:
+ init_name_in_zip = os.path.join(name, init_basename)
+ zip_file.write(init_name, init_name_in_zip)
+ zip_file.write(script_name, script_name_in_zip)
+ zip_file.close()
+ for name in unlink:
+ os.unlink(name)
+ #if test.test_support.verbose:
+ # zip_file = zipfile.ZipFile(zip_name, 'r')
+ # print 'Contents of %r:' % zip_name
+ # zip_file.printdir()
+ # zip_file.close()
+ return zip_name, os.path.join(zip_name, script_name_in_zip)
diff --git a/lib-python/2.7/test/talos-2019-0758.pem b/lib-python/2.7/test/talos-2019-0758.pem
new file mode 100644
index 0000000000..13b95a77fd
--- /dev/null
+++ b/lib-python/2.7/test/talos-2019-0758.pem
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDqDCCApKgAwIBAgIBAjALBgkqhkiG9w0BAQswHzELMAkGA1UEBhMCVUsxEDAO
+BgNVBAMTB2NvZHktY2EwHhcNMTgwNjE4MTgwMDU4WhcNMjgwNjE0MTgwMDU4WjA7
+MQswCQYDVQQGEwJVSzEsMCoGA1UEAxMjY29kZW5vbWljb24tdm0tMi50ZXN0Lmxh
+bC5jaXNjby5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC63fGB
+J80A9Av1GB0bptslKRIUtJm8EeEu34HkDWbL6AJY0P8WfDtlXjlPaLqFa6sqH6ES
+V48prSm1ZUbDSVL8R6BYVYpOlK8/48xk4pGTgRzv69gf5SGtQLwHy8UPBKgjSZoD
+5a5k5wJXGswhKFFNqyyxqCvWmMnJWxXTt2XDCiWc4g4YAWi4O4+6SeeHVAV9rV7C
+1wxqjzKovVe2uZOHjKEzJbbIU6JBPb6TRfMdRdYOw98n1VXDcKVgdX2DuuqjCzHP
+WhU4Tw050M9NaK3eXp4Mh69VuiKoBGOLSOcS8reqHIU46Reg0hqeL8LIL6OhFHIF
+j7HR6V1X6F+BfRS/AgMBAAGjgdYwgdMwCQYDVR0TBAIwADAdBgNVHQ4EFgQUOktp
+HQjxDXXUg8prleY9jeLKeQ4wTwYDVR0jBEgwRoAUx6zgPygZ0ZErF9sPC4+5e2Io
+UU+hI6QhMB8xCzAJBgNVBAYTAlVLMRAwDgYDVQQDEwdjb2R5LWNhggkA1QEAuwb7
+2s0wCQYDVR0SBAIwADAuBgNVHREEJzAlgiNjb2Rlbm9taWNvbi12bS0yLnRlc3Qu
+bGFsLmNpc2NvLmNvbTAOBgNVHQ8BAf8EBAMCBaAwCwYDVR0fBAQwAjAAMAsGCSqG
+SIb3DQEBCwOCAQEAvqantx2yBlM11RoFiCfi+AfSblXPdrIrHvccepV4pYc/yO6p
+t1f2dxHQb8rWH3i6cWag/EgIZx+HJQvo0rgPY1BFJsX1WnYf1/znZpkUBGbVmlJr
+t/dW1gSkNS6sPsM0Q+7HPgEv8CPDNK5eo7vU2seE0iWOkxSyVUuiCEY9ZVGaLVit
+p0C78nZ35Pdv4I+1cosmHl28+es1WI22rrnmdBpH8J1eY6WvUw2xuZHLeNVN0TzV
+Q3qq53AaCWuLOD1AjESWuUCxMZTK9DPS4JKXTK8RLyDeqOvJGjsSWp3kL0y3GaQ+
+10T1rfkKJub2+m9A9duin1fn6tHc2wSvB7m3DA==
+-----END CERTIFICATE-----
diff --git a/lib-python/2.7/test/test_abc.py b/lib-python/2.7/test/test_abc.py
index 6a8c3a1327..dbba37cdb6 100644
--- a/lib-python/2.7/test/test_abc.py
+++ b/lib-python/2.7/test/test_abc.py
@@ -208,6 +208,7 @@ class TestABC(unittest.TestCase):
C()
self.assertEqual(B.counter, 1)
+ @test_support.requires_type_collecting
def test_cache_leak(self):
# See issue #2521.
class A(object):
diff --git a/lib-python/2.7/test/test_aifc.py b/lib-python/2.7/test/test_aifc.py
index 8c4b30f5c1..7845d0121d 100644
--- a/lib-python/2.7/test/test_aifc.py
+++ b/lib-python/2.7/test/test_aifc.py
@@ -129,6 +129,18 @@ class AifcMiscTest(audiotests.AudioTests, unittest.TestCase):
#This file contains chunk types aifc doesn't recognize.
self.f = aifc.open(findfile('Sine-1000Hz-300ms.aif'))
+ def test_close_opened_files_on_error(self):
+ non_aifc_file = findfile('pluck-pcm8.wav', subdir='audiodata')
+
+ class Aifc(aifc.Aifc_read):
+ def __init__(self):
+ pass
+
+ a = Aifc()
+ with self.assertRaises(aifc.Error):
+ aifc.Aifc_read.__init__(a, non_aifc_file)
+ self.assertTrue(a._file.closed)
+
def test_write_markers_values(self):
fout = aifc.open(io.BytesIO(), 'wb')
self.assertEqual(fout.getmarkers(), None)
@@ -202,6 +214,14 @@ class AIFCLowLevelTest(unittest.TestCase):
b = io.BytesIO('FORM' + struct.pack('>L', 4) + 'AIFF')
self.assertRaises(aifc.Error, aifc.open, b)
+ def test_read_no_ssnd_chunk(self):
+ b = b'FORM' + struct.pack('>L', 4) + b'AIFC'
+ b += b'COMM' + struct.pack('>LhlhhLL', 38, 0, 0, 0, 0, 0, 0)
+ b += b'NONE' + struct.pack('B', 14) + b'not compressed' + b'\x00'
+ with self.assertRaisesRegexp(aifc.Error, 'COMM chunk and/or SSND chunk'
+ ' missing'):
+ aifc.open(io.BytesIO(b))
+
def test_read_wrong_compression_type(self):
b = 'FORM' + struct.pack('>L', 4) + 'AIFC'
b += 'COMM' + struct.pack('>LhlhhLL', 23, 0, 0, 0, 0, 0, 0)
diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py
index 176ce40d07..048b62207c 100644
--- a/lib-python/2.7/test/test_argparse.py
+++ b/lib-python/2.7/test/test_argparse.py
@@ -4656,7 +4656,7 @@ class TestAddArgumentMetavar(TestCase):
self.do_test_exception(nargs=None, metavar=tuple())
def test_nargs_None_metavar_length1(self):
- self.do_test_no_exception(nargs=None, metavar=("1"))
+ self.do_test_no_exception(nargs=None, metavar=("1",))
def test_nargs_None_metavar_length2(self):
self.do_test_exception(nargs=None, metavar=("1", "2"))
@@ -4673,7 +4673,7 @@ class TestAddArgumentMetavar(TestCase):
self.do_test_exception(nargs="?", metavar=tuple())
def test_nargs_optional_metavar_length1(self):
- self.do_test_no_exception(nargs="?", metavar=("1"))
+ self.do_test_no_exception(nargs="?", metavar=("1",))
def test_nargs_optional_metavar_length2(self):
self.do_test_exception(nargs="?", metavar=("1", "2"))
@@ -4690,7 +4690,7 @@ class TestAddArgumentMetavar(TestCase):
self.do_test_exception(nargs="*", metavar=tuple())
def test_nargs_zeroormore_metavar_length1(self):
- self.do_test_no_exception(nargs="*", metavar=("1"))
+ self.do_test_exception(nargs="*", metavar=("1",))
def test_nargs_zeroormore_metavar_length2(self):
self.do_test_no_exception(nargs="*", metavar=("1", "2"))
@@ -4707,7 +4707,7 @@ class TestAddArgumentMetavar(TestCase):
self.do_test_exception(nargs="+", metavar=tuple())
def test_nargs_oneormore_metavar_length1(self):
- self.do_test_no_exception(nargs="+", metavar=("1"))
+ self.do_test_exception(nargs="+", metavar=("1",))
def test_nargs_oneormore_metavar_length2(self):
self.do_test_no_exception(nargs="+", metavar=("1", "2"))
@@ -4724,7 +4724,7 @@ class TestAddArgumentMetavar(TestCase):
self.do_test_no_exception(nargs="...", metavar=tuple())
def test_nargs_remainder_metavar_length1(self):
- self.do_test_no_exception(nargs="...", metavar=("1"))
+ self.do_test_no_exception(nargs="...", metavar=("1",))
def test_nargs_remainder_metavar_length2(self):
self.do_test_no_exception(nargs="...", metavar=("1", "2"))
@@ -4741,7 +4741,7 @@ class TestAddArgumentMetavar(TestCase):
self.do_test_exception(nargs="A...", metavar=tuple())
def test_nargs_parser_metavar_length1(self):
- self.do_test_no_exception(nargs="A...", metavar=("1"))
+ self.do_test_no_exception(nargs="A...", metavar=("1",))
def test_nargs_parser_metavar_length2(self):
self.do_test_exception(nargs="A...", metavar=("1", "2"))
@@ -4758,7 +4758,7 @@ class TestAddArgumentMetavar(TestCase):
self.do_test_exception(nargs=1, metavar=tuple())
def test_nargs_1_metavar_length1(self):
- self.do_test_no_exception(nargs=1, metavar=("1"))
+ self.do_test_no_exception(nargs=1, metavar=("1",))
def test_nargs_1_metavar_length2(self):
self.do_test_exception(nargs=1, metavar=("1", "2"))
@@ -4775,7 +4775,7 @@ class TestAddArgumentMetavar(TestCase):
self.do_test_exception(nargs=2, metavar=tuple())
def test_nargs_2_metavar_length1(self):
- self.do_test_no_exception(nargs=2, metavar=("1"))
+ self.do_test_exception(nargs=2, metavar=("1",))
def test_nargs_2_metavar_length2(self):
self.do_test_no_exception(nargs=2, metavar=("1", "2"))
@@ -4792,7 +4792,7 @@ class TestAddArgumentMetavar(TestCase):
self.do_test_exception(nargs=3, metavar=tuple())
def test_nargs_3_metavar_length1(self):
- self.do_test_no_exception(nargs=3, metavar=("1"))
+ self.do_test_exception(nargs=3, metavar=("1",))
def test_nargs_3_metavar_length2(self):
self.do_test_exception(nargs=3, metavar=("1", "2"))
@@ -4819,6 +4819,30 @@ class TestImportStar(TestCase):
]
self.assertEqual(sorted(items), sorted(argparse.__all__))
+
+class TestWrappingMetavar(TestCase):
+
+ def setUp(self):
+ self.parser = ErrorRaisingArgumentParser(
+ 'this_is_spammy_prog_with_a_long_name_sorry_about_the_name'
+ )
+ # this metavar was triggering library assertion errors due to usage
+ # message formatting incorrectly splitting on the ] chars within
+ metavar = '<http[s]://example:1234>'
+ self.parser.add_argument('--proxy', metavar=metavar)
+
+ def test_help_with_metavar(self):
+ help_text = self.parser.format_help()
+ self.assertEqual(help_text, textwrap.dedent('''\
+ usage: this_is_spammy_prog_with_a_long_name_sorry_about_the_name
+ [-h] [--proxy <http[s]://example:1234>]
+
+ optional arguments:
+ -h, --help show this help message and exit
+ --proxy <http[s]://example:1234>
+ '''))
+
+
def test_main():
# silence warnings about version argument - these are expected
with test_support.check_warnings(
diff --git a/lib-python/2.7/test/test_ast.py b/lib-python/2.7/test/test_ast.py
index fb1223e2bf..3f4e2c573d 100644
--- a/lib-python/2.7/test/test_ast.py
+++ b/lib-python/2.7/test/test_ast.py
@@ -103,6 +103,12 @@ exec_tests = [
"{r for l in x if g}",
# setcomp with naked tuple
"{r for l,m in x}",
+ # Decorated FunctionDef
+ "@deco1\n@deco2()\n@deco3(1)\ndef f(): pass",
+ # Decorated ClassDef
+ "@deco1\n@deco2()\n@deco3(1)\nclass C: pass",
+ # Decorator with generator argument
+ "@deco(a for a in b)\ndef f(): pass",
]
# These are compiled through "single"
@@ -552,6 +558,9 @@ exec_results = [
('Module', [('Expr', (1, 0), ('DictComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'v', ('Store',)), ('Name', (1, 13), 'w', ('Store',))], ('Store',)), ('Name', (1, 18), 'x', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('SetComp', (1, 1), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 12), 'x', ('Load',)), [('Name', (1, 17), 'g', ('Load',))])]))]),
('Module', [('Expr', (1, 0), ('SetComp', (1, 1), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Tuple', (1, 7), [('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 9), 'm', ('Store',))], ('Store',)), ('Name', (1, 14), 'x', ('Load',)), [])]))]),
+('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, []), [('Pass', (4, 9))], [('Name', (1, 1), 'deco1', ('Load',)), ('Call', (2, 1), ('Name', (2, 1), 'deco2', ('Load',)), [], [], None, None), ('Call', (3, 1), ('Name', (3, 1), 'deco3', ('Load',)), [('Num', (3, 7), 1)], [], None, None)])]),
+('Module', [('ClassDef', (1, 0), 'C', [], [('Pass', (4, 9))], [('Name', (1, 1), 'deco1', ('Load',)), ('Call', (2, 1), ('Name', (2, 1), 'deco2', ('Load',)), [], [], None, None), ('Call', (3, 1), ('Name', (3, 1), 'deco3', ('Load',)), [('Num', (3, 7), 1)], [], None, None)])]),
+('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, []), [('Pass', (2, 9))], [('Call', (1, 1), ('Name', (1, 1), 'deco', ('Load',)), [('GeneratorExp', (1, 6), ('Name', (1, 6), 'a', ('Load',)), [('comprehension', ('Name', (1, 12), 'a', ('Store',)), ('Name', (1, 17), 'b', ('Load',)), [])])], [], None, None)])]),
]
single_results = [
('Interactive', [('Expr', (1, 0), ('BinOp', (1, 0), ('Num', (1, 0), 1), ('Add',), ('Num', (1, 2), 2)))]),
diff --git a/lib-python/2.7/test/test_asyncore.py b/lib-python/2.7/test/test_asyncore.py
index 20eceb6350..4b347a3a6d 100644
--- a/lib-python/2.7/test/test_asyncore.py
+++ b/lib-python/2.7/test/test_asyncore.py
@@ -442,6 +442,19 @@ class FileWrapperTest(unittest.TestCase):
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
+ def test_close_twice(self):
+ fd = os.open(TESTFN, os.O_RDONLY)
+ f = asyncore.file_wrapper(fd)
+ os.close(fd)
+
+ os.close(f.fd) # file_wrapper dupped fd
+ with self.assertRaises(OSError):
+ f.close()
+
+ self.assertEqual(f.fd, -1)
+ # calling close twice should not fail
+ f.close()
+
class BaseTestHandler(asyncore.dispatcher):
@@ -606,6 +619,9 @@ class BaseTestAPI(unittest.TestCase):
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
+ if sys.platform == "darwin" and self.use_poll:
+ self.skipTest("poll may fail on macOS; see issue #28087")
+
class TestClient(BaseClient):
def handle_expt(self):
self.flag = True
@@ -711,19 +727,20 @@ class BaseTestAPI(unittest.TestCase):
server = TCPServer()
t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1, count=500))
t.start()
- self.addCleanup(t.join)
-
- for x in xrange(20):
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.settimeout(.2)
- s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
- struct.pack('ii', 1, 0))
- try:
- s.connect(server.address)
- except socket.error:
- pass
- finally:
- s.close()
+ try:
+ for x in xrange(20):
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.settimeout(.2)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
+ struct.pack('ii', 1, 0))
+ try:
+ s.connect(server.address)
+ except socket.error:
+ pass
+ finally:
+ s.close()
+ finally:
+ t.join()
class TestAPI_UseSelect(BaseTestAPI):
diff --git a/lib-python/2.7/test/test_atexit.py b/lib-python/2.7/test/test_atexit.py
index bf85b75253..bde898744b 100644
--- a/lib-python/2.7/test/test_atexit.py
+++ b/lib-python/2.7/test/test_atexit.py
@@ -5,12 +5,17 @@ import atexit
from imp import reload
from test import test_support
+
+def exit():
+ raise SystemExit
+
+
class TestCase(unittest.TestCase):
def setUp(self):
- s = StringIO.StringIO()
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
- sys.stdout = sys.stderr = self.subst_io = s
+ self.stream = StringIO.StringIO()
+ sys.stdout = sys.stderr = self.subst_io = self.stream
self.save_handlers = atexit._exithandlers
atexit._exithandlers = []
@@ -55,6 +60,13 @@ class TestCase(unittest.TestCase):
atexit.register(self.raise2)
self.assertRaises(TypeError, atexit._run_exitfuncs)
+ def test_exit(self):
+ # be sure a SystemExit is handled properly
+ atexit.register(exit)
+
+ self.assertRaises(SystemExit, atexit._run_exitfuncs)
+ self.assertEqual(self.stream.getvalue(), '')
+
### helpers
def h1(self):
print "h1"
diff --git a/lib-python/2.7/test/test_audioop.py b/lib-python/2.7/test/test_audioop.py
index 4af73500ea..5f1deb5776 100644
--- a/lib-python/2.7/test/test_audioop.py
+++ b/lib-python/2.7/test/test_audioop.py
@@ -325,6 +325,10 @@ class TestAudioop(unittest.TestCase):
self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None, 30, 10)[0],
expected[w])
+ self.assertRaises(TypeError, audioop.ratecv, b'', 1, 1, 8000, 8000, 42)
+ self.assertRaises(TypeError, audioop.ratecv,
+ b'', 1, 1, 8000, 8000, (1, (42,)))
+
def test_reverse(self):
for w in 1, 2, 4:
self.assertEqual(audioop.reverse(b'', w), b'')
diff --git a/lib-python/2.7/test/test_bdb.py b/lib-python/2.7/test/test_bdb.py
new file mode 100644
index 0000000000..3ad70e21c8
--- /dev/null
+++ b/lib-python/2.7/test/test_bdb.py
@@ -0,0 +1,1034 @@
+""" Test the bdb module.
+
+ A test defines a list of tuples that may be seen as paired tuples, each
+ pair being defined by 'expect_tuple, set_tuple' as follows:
+
+ ([event, [lineno[, co_name[, eargs]]]]), (set_type, [sargs])
+
+ * 'expect_tuple' describes the expected current state of the Bdb instance.
+ It may be the empty tuple and no check is done in that case.
+ * 'set_tuple' defines the set_*() method to be invoked when the Bdb
+ instance reaches this state.
+
+ Example of an 'expect_tuple, set_tuple' pair:
+
+ ('line', 2, 'tfunc_main'), ('step', )
+
+ Definitions of the members of the 'expect_tuple':
+ event:
+ Name of the trace event. The set methods that do not give back
+ control to the tracer [1] do not trigger a tracer event and in
+ that case the next 'event' may be 'None' by convention, its value
+ is not checked.
+ [1] Methods that trigger a trace event are set_step(), set_next(),
+ set_return(), set_until() and set_continue().
+ lineno:
+ Line number. Line numbers are relative to the start of the
+ function when tracing a function in the test_bdb module (i.e. this
+ module).
+ co_name:
+ Name of the function being currently traced.
+ eargs:
+ A tuple:
+ * On an 'exception' event the tuple holds a class object, the
+ current exception must be an instance of this class.
+ * On a 'line' event, the tuple holds a dictionary and a list. The
+ dictionary maps each breakpoint number that has been hit on this
+ line to its hits count. The list holds the list of breakpoint
+ number temporaries that are being deleted.
+
+ Definitions of the members of the 'set_tuple':
+ set_type:
+ The type of the set method to be invoked. This may
+ be the type of one of the Bdb set methods: 'step', 'next',
+ 'until', 'return', 'continue', 'break', 'quit', or the type of one
+ of the set methods added by test_bdb.Bdb: 'ignore', 'enable',
+ 'disable', 'clear', 'up', 'down'.
+ sargs:
+ The arguments of the set method if any, packed in a tuple.
+"""
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+import bdb as _bdb
+import sys
+import os
+import unittest
+import textwrap
+import importlib
+import linecache
+from contextlib import contextmanager
+from itertools import islice, repeat
+import test.support
+
+class BdbException(Exception): pass
+class BdbError(BdbException): """Error raised by the Bdb instance."""
+class BdbSyntaxError(BdbException): """Syntax error in the test case."""
+class BdbNotExpectedError(BdbException): """Unexpected result."""
+
+# When 'dry_run' is set to true, expect tuples are ignored and the actual
+# state of the tracer is printed after running each set_*() method of the test
+# case. The full list of breakpoints and their attributes is also printed
+# after each 'line' event where a breakpoint has been hit.
+dry_run = 0
+
+__file__ = os.path.splitext(__file__)[0] + '.py'
+
+def reset_Breakpoint():
+ _bdb.Breakpoint.next = 1
+ _bdb.Breakpoint.bplist = {}
+ _bdb.Breakpoint.bpbynumber = [None]
+
+def info_breakpoints():
+ bp_list = [bp for bp in _bdb.Breakpoint.bpbynumber if bp]
+ if not bp_list:
+ return ''
+
+ header_added = False
+ for bp in bp_list:
+ if not header_added:
+ info = 'BpNum Temp Enb Hits Ignore Where\n'
+ header_added = True
+
+ disp = 'yes ' if bp.temporary else 'no '
+ enab = 'yes' if bp.enabled else 'no '
+ info += ('%-5d %s %s %-4d %-6d at %s:%d' %
+ (bp.number, disp, enab, bp.hits, bp.ignore,
+ os.path.basename(bp.file), bp.line))
+ if bp.cond:
+ info += '\n\tstop only if %s' % (bp.cond,)
+ info += '\n'
+ return info
+
+class Bdb(_bdb.Bdb, object):
+ """Extend Bdb to enhance test coverage."""
+
+ def trace_dispatch(self, frame, event, arg):
+ self.currentbp = None
+ return super(Bdb, self).trace_dispatch(frame, event, arg)
+
+ def set_break(self, filename, lineno, temporary=False, cond=None,
+ funcname=None):
+ if isinstance(funcname, str):
+ if filename == __file__:
+ globals_ = globals()
+ else:
+ module = importlib.import_module(filename[:-3])
+ globals_ = module.__dict__
+ func = eval(funcname, globals_)
+ code = func.__code__
+ filename = code.co_filename
+ lineno = code.co_firstlineno
+ funcname = code.co_name
+
+ res = super(Bdb, self).set_break(filename, lineno,
+ temporary=temporary, cond=cond, funcname=funcname)
+ if isinstance(res, str):
+ raise BdbError(res)
+ return res
+
+ # Back port of get_bpbynumber() from bdb.Bdb in Python 3.
+ def get_bpbynumber(self, arg):
+ """Return a breakpoint by its index in Breakpoint.bybpnumber.
+
+ For invalid arg values or if the breakpoint doesn't exist,
+ raise a ValueError.
+ """
+ if not arg:
+ raise ValueError('Breakpoint number expected')
+ try:
+ number = int(arg)
+ except ValueError:
+ raise ValueError('Non-numeric breakpoint number %s' % arg)
+ try:
+ bp = _bdb.Breakpoint.bpbynumber[number]
+ except IndexError:
+ raise ValueError('Breakpoint number %d out of range' % number)
+ if bp is None:
+ raise ValueError('Breakpoint %d already deleted' % number)
+ return bp
+
+ def get_stack(self, f, t):
+ self.stack, self.index = super(Bdb, self).get_stack(f, t)
+ self.frame = self.stack[self.index][0]
+ return self.stack, self.index
+
+ def set_ignore(self, bpnum):
+ """Increment the ignore count of Breakpoint number 'bpnum'."""
+ bp = self.get_bpbynumber(bpnum)
+ bp.ignore += 1
+
+ def set_enable(self, bpnum):
+ bp = self.get_bpbynumber(bpnum)
+ bp.enabled = True
+
+ def set_disable(self, bpnum):
+ bp = self.get_bpbynumber(bpnum)
+ bp.enabled = False
+
+ def set_clear(self, fname, lineno):
+ err = self.clear_break(fname, lineno)
+ if err:
+ raise BdbError(err)
+
+ def set_up(self):
+ """Move up in the frame stack."""
+ if not self.index:
+ raise BdbError('Oldest frame')
+ self.index -= 1
+ self.frame = self.stack[self.index][0]
+
+ def set_down(self):
+ """Move down in the frame stack."""
+ if self.index + 1 == len(self.stack):
+ raise BdbError('Newest frame')
+ self.index += 1
+ self.frame = self.stack[self.index][0]
+
+class Tracer(Bdb):
+ """A tracer for testing the bdb module."""
+
+ def __init__(self, expect_set, skip=None, dry_run=False, test_case=None):
+ super(Tracer, self).__init__(skip=skip)
+ self.expect_set = expect_set
+ self.dry_run = dry_run
+ self.header = ('Dry-run results for %s:' % test_case if
+ test_case is not None else None)
+ self.init_test()
+
+ def init_test(self):
+ self.cur_except = None
+ self.expect_set_no = 0
+ self.breakpoint_hits = None
+ self.expected_list = list(islice(self.expect_set, 0, None, 2))
+ self.set_list = list(islice(self.expect_set, 1, None, 2))
+
+ def trace_dispatch(self, frame, event, arg):
+ # On an 'exception' event, call_exc_trace() in Python/ceval.c discards
+ # a BdbException raised by the Tracer instance, so we raise it on the
+ # next trace_dispatch() call that occurs unless the set_quit() or
+ # set_continue() method has been invoked on the 'exception' event.
+ if self.cur_except is not None:
+ raise self.cur_except
+
+ if event == 'exception':
+ try:
+ res = super(Tracer, self).trace_dispatch(frame, event, arg)
+ return res
+ except BdbException as e:
+ self.cur_except = e
+ return self.trace_dispatch
+ else:
+ return super(Tracer, self).trace_dispatch(frame, event, arg)
+
+ def user_call(self, frame, argument_list):
+ # Adopt the same behavior as pdb and, as a side effect, skip also the
+ # first 'call' event when the Tracer is started with Tracer.runcall()
+ # which may be possibly considered as a bug.
+ if not self.stop_here(frame):
+ return
+ self.process_event('call', frame, argument_list)
+ self.next_set_method()
+
+ def user_line(self, frame):
+ self.process_event('line', frame)
+
+ if self.dry_run and self.breakpoint_hits:
+ info = info_breakpoints().strip('\n')
+ # Indent each line.
+ for line in info.split('\n'):
+ print(' ' + line)
+ self.delete_temporaries()
+ self.breakpoint_hits = None
+
+ self.next_set_method()
+
+ def user_return(self, frame, return_value):
+ self.process_event('return', frame, return_value)
+ self.next_set_method()
+
+ def user_exception(self, frame, exc_info):
+ self.exc_info = exc_info
+ self.process_event('exception', frame)
+ self.next_set_method()
+
+ def do_clear(self, arg):
+ # The temporary breakpoints are deleted in user_line().
+ bp_list = [self.currentbp]
+ self.breakpoint_hits = (bp_list, bp_list)
+
+ def delete_temporaries(self):
+ if self.breakpoint_hits:
+ for n in self.breakpoint_hits[1]:
+ self.clear_bpbynumber(n)
+
+ def pop_next(self):
+ self.expect_set_no += 1
+ try:
+ self.expect = self.expected_list.pop(0)
+ except IndexError:
+ raise BdbNotExpectedError(
+ 'expect_set list exhausted, cannot pop item %d' %
+ self.expect_set_no)
+ self.set_tuple = self.set_list.pop(0)
+
+ def process_event(self, event, frame, *args):
+ # Call get_stack() to enable walking the stack with set_up() and
+ # set_down().
+ tb = None
+ if event == 'exception':
+ tb = self.exc_info[2]
+ self.get_stack(frame, tb)
+
+ # A breakpoint has been hit and it is not a temporary.
+ if self.currentbp is not None and not self.breakpoint_hits:
+ bp_list = [self.currentbp]
+ self.breakpoint_hits = (bp_list, [])
+
+ # Pop next event.
+ self.event= event
+ self.pop_next()
+ if self.dry_run:
+ self.print_state(self.header)
+ return
+
+ # Validate the expected results.
+ if self.expect:
+ self.check_equal(self.expect[0], event, 'Wrong event type')
+ self.check_lno_name()
+
+ if event in ('call', 'return'):
+ self.check_expect_max_size(3)
+ elif len(self.expect) > 3:
+ if event == 'line':
+ bps, temporaries = self.expect[3]
+ bpnums = sorted(bps.keys())
+ if not self.breakpoint_hits:
+ self.raise_not_expected(
+ 'No breakpoints hit at expect_set item %d' %
+ self.expect_set_no)
+ self.check_equal(bpnums, self.breakpoint_hits[0],
+ 'Breakpoint numbers do not match')
+ self.check_equal([bps[n] for n in bpnums],
+ [self.get_bpbynumber(n).hits for
+ n in self.breakpoint_hits[0]],
+ 'Wrong breakpoint hit count')
+ self.check_equal(sorted(temporaries), self.breakpoint_hits[1],
+ 'Wrong temporary breakpoints')
+
+ elif event == 'exception':
+ if not isinstance(self.exc_info[1], self.expect[3]):
+ self.raise_not_expected(
+ "Wrong exception at expect_set item %d, got '%s'" %
+ (self.expect_set_no, self.exc_info))
+
+ def check_equal(self, expected, result, msg):
+ if expected == result:
+ return
+ self.raise_not_expected("%s at expect_set item %d, got '%s'" %
+ (msg, self.expect_set_no, result))
+
+ def check_lno_name(self):
+ """Check the line number and function co_name."""
+ s = len(self.expect)
+ if s > 1:
+ lineno = self.lno_abs2rel()
+ self.check_equal(self.expect[1], lineno, 'Wrong line number')
+ if s > 2:
+ self.check_equal(self.expect[2], self.frame.f_code.co_name,
+ 'Wrong function name')
+
+ def check_expect_max_size(self, size):
+ if len(self.expect) > size:
+ raise BdbSyntaxError('Invalid size of the %s expect tuple: %s' %
+ (self.event, self.expect))
+
+ def lno_abs2rel(self):
+ fname = self.canonic(self.frame.f_code.co_filename)
+ lineno = self.frame.f_lineno
+ return ((lineno - self.frame.f_code.co_firstlineno + 1)
+ if fname == self.canonic(__file__) else lineno)
+
+ def lno_rel2abs(self, fname, lineno):
+ return (self.frame.f_code.co_firstlineno + lineno - 1
+ if (lineno and self.canonic(fname) == self.canonic(__file__))
+ else lineno)
+
+ def get_state(self):
+ lineno = self.lno_abs2rel()
+ co_name = self.frame.f_code.co_name
+ state = "('%s', %d, '%s'" % (self.event, lineno, co_name)
+ if self.breakpoint_hits:
+ bps = '{'
+ for n in self.breakpoint_hits[0]:
+ if bps != '{':
+ bps += ', '
+ bps += '%s: %s' % (n, self.get_bpbynumber(n).hits)
+ bps += '}'
+ bps = '(' + bps + ', ' + str(self.breakpoint_hits[1]) + ')'
+ state += ', ' + bps
+ elif self.event == 'exception':
+ state += ', ' + self.exc_info[0].__name__
+ state += '), '
+ return state.ljust(32) + str(self.set_tuple) + ','
+
+ def print_state(self, header=None):
+ if header is not None and self.expect_set_no == 1:
+ print()
+ print(header)
+ print('%d: %s' % (self.expect_set_no, self.get_state()))
+
+ def raise_not_expected(self, msg):
+ msg += '\n'
+ msg += ' Expected: %s\n' % str(self.expect)
+ msg += ' Got: ' + self.get_state()
+ raise BdbNotExpectedError(msg)
+
+ def next_set_method(self):
+ set_type = self.set_tuple[0]
+ args = self.set_tuple[1] if len(self.set_tuple) == 2 else None
+ set_method = getattr(self, 'set_' + set_type)
+
+ # The following set methods give back control to the tracer.
+ if set_type in ('step', 'continue', 'quit'):
+ set_method()
+ return
+ elif set_type in ('next', 'return'):
+ set_method(self.frame)
+ return
+ elif set_type == 'until':
+ set_method(self.frame)
+ return
+
+ # The following set methods do not give back control to the tracer and
+ # next_set_method() is called recursively.
+ if (args and set_type in ('break', 'clear', 'ignore', 'enable',
+ 'disable')) or set_type in ('up', 'down'):
+ if set_type in ('break', 'clear'):
+ fname = args[0]
+ lineno = args[1]
+ lineno = self.lno_rel2abs(fname, lineno)
+ args = [fname, lineno] + list(args[2:])
+ set_method(*args)
+ elif set_type in ('ignore', 'enable', 'disable'):
+ set_method(*args)
+ elif set_type in ('up', 'down'):
+ set_method()
+
+ # Process the next expect_set item.
+ # It is not expected that a test may reach the recursion limit.
+ self.event= None
+ self.pop_next()
+ if self.dry_run:
+ self.print_state()
+ else:
+ if self.expect:
+ self.check_lno_name()
+ self.check_expect_max_size(3)
+ self.next_set_method()
+ else:
+ raise BdbSyntaxError('"%s" is an invalid set_tuple' %
+ self.set_tuple)
+
+class TracerRun():
+ """Provide a context for running a Tracer instance with a test case."""
+
+ def __init__(self, test_case, skip=None):
+ self.test_case = test_case
+ self.dry_run = test_case.dry_run
+ self.tracer = Tracer(test_case.expect_set, skip=skip,
+ dry_run=self.dry_run, test_case=test_case.id())
+
+ def __enter__(self):
+ # test_pdb does not reset Breakpoint class attributes on exit :-(
+ reset_Breakpoint()
+ return self.tracer
+
+ def __exit__(self, type_=None, value=None, traceback=None):
+ reset_Breakpoint()
+ sys.settrace(None)
+
+ not_empty = ''
+ if self.tracer.set_list:
+ not_empty += 'All paired tuples have not been processed, '
+ not_empty += ('the last one was number %d' %
+ self.tracer.expect_set_no)
+
+ # Make a BdbNotExpectedError a unittest failure.
+ if type_ is not None and issubclass(BdbNotExpectedError, type_):
+ if isinstance(value, BaseException) and value.args:
+ err_msg = value.args[0]
+ if not_empty:
+ err_msg += '\n' + not_empty
+ if self.dry_run:
+ print(err_msg)
+ return True
+ else:
+ self.test_case.fail(err_msg)
+ else:
+ assert False, 'BdbNotExpectedError with empty args'
+
+ if not_empty:
+ if self.dry_run:
+ print(not_empty)
+ else:
+ self.test_case.fail(not_empty)
+
+def run_test(modules, set_list, skip=None):
+ """Run a test and print the dry-run results.
+
+ 'modules': A dictionary mapping module names to their source code as a
+ string. The dictionary MUST include one module named
+ 'test_module' with a main() function.
+ 'set_list': A list of set_type tuples to be run on the module.
+
+ For example, running the following script outputs the following results:
+
+ ***************************** SCRIPT ********************************
+
+ from test.test_bdb import run_test, break_in_func
+
+ code = '''
+ def func():
+ lno = 3
+
+ def main():
+ func()
+ lno = 7
+ '''
+
+ set_list = [
+ break_in_func('func', 'test_module.py'),
+ ('continue', ),
+ ('step', ),
+ ('step', ),
+ ('step', ),
+ ('quit', ),
+ ]
+
+ modules = { 'test_module': code }
+ run_test(modules, set_list)
+
+ **************************** results ********************************
+
+ 1: ('line', 2, 'tfunc_import'), ('next',),
+ 2: ('line', 3, 'tfunc_import'), ('step',),
+ 3: ('call', 5, 'main'), ('break', ('test_module.py', None, False, None, 'func')),
+ 4: ('None', 5, 'main'), ('continue',),
+ 5: ('line', 3, 'func', ({1: 1}, [])), ('step',),
+ BpNum Temp Enb Hits Ignore Where
+ 1 no yes 1 0 at test_module.py:2
+ 6: ('return', 3, 'func'), ('step',),
+ 7: ('line', 7, 'main'), ('step',),
+ 8: ('return', 7, 'main'), ('quit',),
+
+ *************************************************************************
+
+ """
+ def gen(a, b):
+ try:
+ while 1:
+ x = next(a)
+ y = next(b)
+ yield x
+ yield y
+ except StopIteration:
+ return
+
+ # Step over the import statement in tfunc_import using 'next' and step
+ # into main() in test_module.
+ sl = [('next', ), ('step', )]
+ sl.extend(set_list)
+
+ test = BaseTestCase()
+ test.dry_run = True
+ test.id = lambda : None
+ test.expect_set = list(gen(repeat(()), iter(sl)))
+ with create_modules(modules):
+ sys.path.append(os.getcwd())
+ with TracerRun(test, skip=skip) as tracer:
+ tracer.runcall(tfunc_import)
+
+@contextmanager
+def create_modules(modules):
+ with test.support.temp_cwd():
+ try:
+ sys.path.insert(0, os.getcwd())
+ for m in modules:
+ fname = m + '.py'
+ with open(fname, 'w') as f:
+ f.write(textwrap.dedent(modules[m]))
+ linecache.checkcache(fname)
+ yield
+ finally:
+ sys.path.pop(0)
+ for m in modules:
+ test.support.forget(m)
+
+def break_in_func(funcname, fname=__file__, temporary=False, cond=None):
+ return 'break', (fname, None, temporary, cond, funcname)
+
+TEST_MODULE = 'test_module_for_bdb'
+TEST_MODULE_FNAME = TEST_MODULE + '.py'
+def tfunc_import():
+ import test_module_for_bdb
+ test_module_for_bdb.main()
+
+def tfunc_main():
+ lno = 2
+ tfunc_first()
+ tfunc_second()
+ lno = 5
+ lno = 6
+ lno = 7
+
+def tfunc_first():
+ lno = 2
+ lno = 3
+ lno = 4
+
+def tfunc_second():
+ lno = 2
+
+class BaseTestCase(unittest.TestCase):
+ """Base class for all tests."""
+ dry_run = dry_run
+
+class StateTestCase(BaseTestCase):
+ """Test the step, next, return, until and quit 'set_' methods."""
+
+ def test_step(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('step', ),
+ ('line', 2, 'tfunc_first'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_step_on_last_statement(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('break', (__file__, 3)),
+ ('None', 1, 'tfunc_first'), ('continue', ),
+ ('line', 3, 'tfunc_first', ({1:1}, [])), ('step', ),
+ ('line', 4, 'tfunc_first'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_next_on_last_statement(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('break', (__file__, 3)),
+ ('None', 1, 'tfunc_first'), ('continue', ),
+ ('line', 3, 'tfunc_first', ({1:1}, [])), ('next', ),
+ ('line', 4, 'tfunc_first'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_next(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('next', ),
+ ('line', 4, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_second'), ('step', ),
+ ('line', 2, 'tfunc_second'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_next_over_import(self):
+ code = """
+ def main():
+ lno = 3
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'), ('next', ),
+ ('line', 3, 'tfunc_import'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_next_on_plain_statement(self):
+ # Check that set_next() is equivalent to set_step() on a plain
+ # statement.
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('next', ),
+ ('line', 2, 'tfunc_first'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_next_in_caller_frame(self):
+ # Check that set_next() in the caller frame causes the tracer
+ # to stop next in the caller frame.
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('up', ),
+ ('None', 3, 'tfunc_main'), ('next', ),
+ ('line', 4, 'tfunc_main'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_return(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('step', ),
+ ('line', 2, 'tfunc_first'), ('return', ),
+ ('return', 4, 'tfunc_first'), ('step', ),
+ ('line', 4, 'tfunc_main'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_return_in_caller_frame(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('up', ),
+ ('None', 3, 'tfunc_main'), ('return', ),
+ ('return', 7, 'tfunc_main'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_until(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('step', ),
+ ('line', 2, 'tfunc_first'), ('until', ),
+ ('line', 3, 'tfunc_first'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_until_in_caller_frame(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('up', ),
+ ('None', 3, 'tfunc_main'), ('until', ),
+ ('line', 4, 'tfunc_main'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+ def test_skip(self):
+ # Check that tracing is skipped over the import statement in
+ # 'tfunc_import()'.
+ code = """
+ def main():
+ lno = 3
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'), ('step', ),
+ ('line', 3, 'tfunc_import'), ('quit', ),
+ ]
+ skip = ('importlib*', TEST_MODULE)
+ with TracerRun(self, skip=skip) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_down(self):
+ # Check that set_down() raises BdbError at the newest frame.
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('down', ),
+ ]
+ with TracerRun(self) as tracer:
+ self.assertRaises(BdbError, tracer.runcall, tfunc_main)
+
+ def test_up(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_main'), ('step', ),
+ ('line', 3, 'tfunc_main'), ('step', ),
+ ('call', 1, 'tfunc_first'), ('up', ),
+ ('None', 3, 'tfunc_main'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_main)
+
+class BreakpointTestCase(BaseTestCase):
+ """Test the breakpoint set method."""
+
+ def test_bp_on_non_existent_module(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'), ('break', ('/non/existent/module.py', 1))
+ ]
+ with TracerRun(self) as tracer:
+ self.assertRaises(BdbError, tracer.runcall, tfunc_import)
+
+ def test_bp_after_last_statement(self):
+ code = """
+ def main():
+ lno = 3
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'), ('break', (TEST_MODULE_FNAME, 4))
+ ]
+ with TracerRun(self) as tracer:
+ self.assertRaises(BdbError, tracer.runcall, tfunc_import)
+
+ def test_temporary_bp(self):
+ code = """
+ def func():
+ lno = 3
+
+ def main():
+ for i in range(2):
+ func()
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME, True),
+ ('None', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME, True),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({1:1}, [1])), ('continue', ),
+ ('line', 3, 'func', ({2:1}, [2])), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_disabled_temporary_bp(self):
+ code = """
+ def func():
+ lno = 3
+
+ def main():
+ for i in range(3):
+ func()
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME),
+ ('None', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME, True),
+ ('None', 2, 'tfunc_import'), ('disable', (2, )),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({1:1}, [])), ('enable', (2, )),
+ ('None', 3, 'func'), ('disable', (1, )),
+ ('None', 3, 'func'), ('continue', ),
+ ('line', 3, 'func', ({2:1}, [2])), ('enable', (1, )),
+ ('None', 3, 'func'), ('continue', ),
+ ('line', 3, 'func', ({1:2}, [])), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_bp_condition(self):
+ code = """
+ def func(a):
+ lno = 3
+
+ def main():
+ for i in range(3):
+ func(i)
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME, False, 'a == 2'),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({1:3}, [])), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_bp_exception_on_condition_evaluation(self):
+ code = """
+ def func(a):
+ lno = 3
+
+ def main():
+ func(0)
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME, False, '1 // 0'),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({1:1}, [])), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_bp_ignore_count(self):
+ code = """
+ def func():
+ lno = 3
+
+ def main():
+ for i in range(2):
+ func()
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME),
+ ('None', 2, 'tfunc_import'), ('ignore', (1, )),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({1:2}, [])), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_ignore_count_on_disabled_bp(self):
+ code = """
+ def func():
+ lno = 3
+
+ def main():
+ for i in range(3):
+ func()
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME),
+ ('None', 2, 'tfunc_import'),
+ break_in_func('func', TEST_MODULE_FNAME),
+ ('None', 2, 'tfunc_import'), ('ignore', (1, )),
+ ('None', 2, 'tfunc_import'), ('disable', (1, )),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({2:1}, [])), ('enable', (1, )),
+ ('None', 3, 'func'), ('continue', ),
+ ('line', 3, 'func', ({2:2}, [])), ('continue', ),
+ ('line', 3, 'func', ({1:2}, [])), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_clear_two_bp_on_same_line(self):
+ code = """
+ def func():
+ lno = 3
+ lno = 4
+
+ def main():
+ for i in range(3):
+ func()
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'), ('break', (TEST_MODULE_FNAME, 3)),
+ ('None', 2, 'tfunc_import'), ('break', (TEST_MODULE_FNAME, 3)),
+ ('None', 2, 'tfunc_import'), ('break', (TEST_MODULE_FNAME, 4)),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({1:1}, [])), ('continue', ),
+ ('line', 4, 'func', ({3:1}, [])), ('clear', (TEST_MODULE_FNAME, 3)),
+ ('None', 4, 'func'), ('continue', ),
+ ('line', 4, 'func', ({3:2}, [])), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+ def test_clear_at_no_bp(self):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'), ('clear', (__file__, 1))
+ ]
+ with TracerRun(self) as tracer:
+ self.assertRaises(BdbError, tracer.runcall, tfunc_import)
+
+class RunTestCase(BaseTestCase):
+ """Test run, runeval and set_trace."""
+
+ def test_run_step(self):
+ # Check that the bdb 'run' method stops at the first line event.
+ code = """
+ lno = 2
+ """
+ self.expect_set = [
+ ('line', 2, '<module>'), ('step', ),
+ ('return', 2, '<module>'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.run(compile(textwrap.dedent(code), '<string>', 'exec'))
+
+ def test_runeval_step(self):
+ # Test bdb 'runeval'.
+ code = """
+ def main():
+ lno = 3
+ """
+ modules = { TEST_MODULE: code }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 1, '<module>'), ('step', ),
+ ('call', 2, 'main'), ('step', ),
+ ('line', 3, 'main'), ('step', ),
+ ('return', 3, 'main'), ('step', ),
+ ('return', 1, '<module>'), ('quit', ),
+ ]
+ import test_module_for_bdb
+ with TracerRun(self) as tracer:
+ tracer.runeval('test_module_for_bdb.main()', globals(), locals())
+
+class IssuesTestCase(BaseTestCase):
+ """Test fixed bdb issues."""
+
+ def test_step_at_return_with_no_trace_in_caller(self):
+ # Issue #13183.
+ # Check that the tracer does step into the caller frame when the
+ # trace function is not set in that frame.
+ code_1 = """
+ from test_module_for_bdb_2 import func
+ def main():
+ func()
+ lno = 5
+ """
+ code_2 = """
+ def func():
+ lno = 3
+ """
+ modules = {
+ TEST_MODULE: code_1,
+ 'test_module_for_bdb_2': code_2,
+ }
+ with create_modules(modules):
+ self.expect_set = [
+ ('line', 2, 'tfunc_import'),
+ break_in_func('func', 'test_module_for_bdb_2.py'),
+ ('None', 2, 'tfunc_import'), ('continue', ),
+ ('line', 3, 'func', ({1:1}, [])), ('step', ),
+ ('return', 3, 'func'), ('step', ),
+ ('line', 5, 'main'), ('quit', ),
+ ]
+ with TracerRun(self) as tracer:
+ tracer.runcall(tfunc_import)
+
+def test_main():
+ test.support.run_unittest(
+ StateTestCase,
+ RunTestCase,
+ BreakpointTestCase,
+ IssuesTestCase,
+ )
+
+if __name__ == "__main__":
+ test_main()
diff --git a/lib-python/2.7/test/test_bisect.py b/lib-python/2.7/test/test_bisect.py
index 5c3330b4e4..9bed28e058 100644
--- a/lib-python/2.7/test/test_bisect.py
+++ b/lib-python/2.7/test/test_bisect.py
@@ -1,3 +1,4 @@
+from __future__ import absolute_import
import sys
import unittest
from test import test_support
diff --git a/lib-python/2.7/test/test_bsddb.py b/lib-python/2.7/test/test_bsddb.py
index 3ff020a0c8..9271d7a921 100644
--- a/lib-python/2.7/test/test_bsddb.py
+++ b/lib-python/2.7/test/test_bsddb.py
@@ -172,7 +172,7 @@ class TestBSDDB(unittest.TestCase):
def test_first_while_deleting(self):
# Test for bug 1725856
- self.assertTrue(len(self.d) >= 2, "test requires >=2 items")
+ self.assertGreaterEqual(len(self.d), 2, "test requires >=2 items")
for _ in self.d:
key = self.f.first()[0]
del self.f[key]
@@ -180,7 +180,7 @@ class TestBSDDB(unittest.TestCase):
def test_last_while_deleting(self):
# Test for bug 1725856's evil twin
- self.assertTrue(len(self.d) >= 2, "test requires >=2 items")
+ self.assertGreaterEqual(len(self.d), 2, "test requires >=2 items")
for _ in self.d:
key = self.f.last()[0]
del self.f[key]
@@ -197,7 +197,7 @@ class TestBSDDB(unittest.TestCase):
def test_has_key(self):
for k in self.d:
self.assertTrue(self.f.has_key(k))
- self.assertTrue(not self.f.has_key('not here'))
+ self.assertFalse(self.f.has_key('not here'))
def test_clear(self):
self.f.clear()
@@ -271,7 +271,7 @@ class TestBSDDB(unittest.TestCase):
self.assertEqual(nc1, nc2)
self.assertEqual(nc1, nc4)
- self.assertTrue(nc3 == nc1+1)
+ self.assertEqual(nc3, nc1+1)
def test_popitem(self):
k, v = self.f.popitem()
diff --git a/lib-python/2.7/test/test_bsddb3.py b/lib-python/2.7/test/test_bsddb3.py
index 099145b460..1a82325d49 100644
--- a/lib-python/2.7/test/test_bsddb3.py
+++ b/lib-python/2.7/test/test_bsddb3.py
@@ -28,6 +28,10 @@ if 'silent' in sys.argv: # take care of old flag, just in case
verbose = False
sys.argv.remove('silent')
+# bpo-30778: test_bsddb3 crashs randomly on Windows XP
+if hasattr(sys, 'getwindowsversion') and sys.getwindowsversion()[:2] < (6, 0):
+ raise unittest.SkipTest("bpo-30778: skip tests on Windows XP")
+
class TimingCheck(unittest.TestCase):
diff --git a/lib-python/2.7/test/test_buffer.py b/lib-python/2.7/test/test_buffer.py
index de80d4469d..c7114cde8b 100644
--- a/lib-python/2.7/test/test_buffer.py
+++ b/lib-python/2.7/test/test_buffer.py
@@ -8,6 +8,7 @@ import copy
import pickle
import sys
import unittest
+import warnings
from test import test_support
class BufferTests(unittest.TestCase):
@@ -39,15 +40,19 @@ class BufferTests(unittest.TestCase):
def test_copy(self):
buf = buffer(b'abc')
- with self.assertRaises(TypeError):
+ with self.assertRaises(TypeError), warnings.catch_warnings():
+ warnings.filterwarnings('ignore', ".*buffer", DeprecationWarning)
copy.copy(buf)
- # See issue #22995
- ## def test_pickle(self):
- ## buf = buffer(b'abc')
- ## for proto in range(pickle.HIGHEST_PROTOCOL + 1):
- ## with self.assertRaises(TypeError):
- ## pickle.dumps(buf, proto)
+ @test_support.cpython_only
+ def test_pickle(self):
+ buf = buffer(b'abc')
+ for proto in range(2):
+ with self.assertRaises(TypeError):
+ pickle.dumps(buf, proto)
+ with test_support.check_py3k_warnings(
+ (".*buffer", DeprecationWarning)):
+ pickle.dumps(buf, 2)
def test_main():
diff --git a/lib-python/2.7/test/test_builtin.py b/lib-python/2.7/test/test_builtin.py
index f4c4509324..7d38031f63 100644
--- a/lib-python/2.7/test/test_builtin.py
+++ b/lib-python/2.7/test/test_builtin.py
@@ -1599,18 +1599,28 @@ class BuiltinTest(unittest.TestCase):
self.assertRaises(ValueError, x.translate, "1", 1)
self.assertRaises(TypeError, x.translate, "1"*256, 1)
+
+def create_exec_script(filename):
+ with open(filename, 'w') as f:
+ f.write('z = z+1\n')
+ f.write('z = z*2\n')
+
+
class TestExecFile(unittest.TestCase):
# Done outside of the method test_z to get the correct scope
z = 0
- f = open(TESTFN, 'w')
- f.write('z = z+1\n')
- f.write('z = z*2\n')
- f.close()
- with check_py3k_warnings(("execfile.. not supported in 3.x",
- DeprecationWarning)):
- execfile(TESTFN)
+ try:
+ create_exec_script(TESTFN)
+ with check_py3k_warnings(("execfile.. not supported in 3.x",
+ DeprecationWarning)):
+ execfile(TESTFN)
+ finally:
+ unlink(TESTFN)
def test_execfile(self):
+ self.addCleanup(unlink, TESTFN)
+ create_exec_script(TESTFN)
+
globals = {'a': 1, 'b': 2}
locals = {'b': 200, 'c': 300}
diff --git a/lib-python/2.7/test/test_bytes.py b/lib-python/2.7/test/test_bytes.py
index 4d4068ec4a..adab54ccfb 100644
--- a/lib-python/2.7/test/test_bytes.py
+++ b/lib-python/2.7/test/test_bytes.py
@@ -4,6 +4,7 @@ XXX This is a mess. Common tests should be unified with string_tests.py (and
the latter should be modernized).
"""
+import array
import os
import re
import sys
@@ -58,11 +59,49 @@ class BaseBytesTest(unittest.TestCase):
self.assertRaises(IndexError, lambda: b[-sys.maxint-2])
self.assertRaises(IndexError, lambda: b[-10**100])
+ def test_from_iterable(self):
+ b = self.type2test(range(256))
+ self.assertEqual(len(b), 256)
+ self.assertEqual(list(b), list(range(256)))
+
+ # Non-sequence iterable.
+ b = self.type2test({42})
+ self.assertEqual(b, b"*")
+ b = self.type2test({43, 45})
+ self.assertIn(tuple(b), {(43, 45), (45, 43)})
+
+ # Iterator that has a __length_hint__.
+ b = self.type2test(iter(range(256)))
+ self.assertEqual(len(b), 256)
+ self.assertEqual(list(b), list(range(256)))
+
+ # Iterator that doesn't have a __length_hint__.
+ b = self.type2test(i for i in range(256) if i % 2)
+ self.assertEqual(len(b), 128)
+ self.assertEqual(list(b), list(range(256))[1::2])
+
+ # Sequence without __iter__.
+ class S:
+ def __getitem__(self, i):
+ return (1, 2, 3)[i]
+ b = self.type2test(S())
+ self.assertEqual(b, b"\x01\x02\x03")
+
+ def test_from_tuple(self):
+ # There is a special case for tuples.
+ b = self.type2test(tuple(range(256)))
+ self.assertEqual(len(b), 256)
+ self.assertEqual(list(b), list(range(256)))
+ b = self.type2test((1, 2, 3))
+ self.assertEqual(b, b"\x01\x02\x03")
+
def test_from_list(self):
- ints = list(range(256))
- b = self.type2test(i for i in ints)
+ # There is a special case for lists.
+ b = self.type2test(list(range(256)))
self.assertEqual(len(b), 256)
- self.assertEqual(list(b), ints)
+ self.assertEqual(list(b), list(range(256)))
+ b = self.type2test([1, 2, 3])
+ self.assertEqual(b, b"\x01\x02\x03")
def test_from_index(self):
b = self.type2test([Indexable(), Indexable(1), Indexable(254),
@@ -71,6 +110,20 @@ class BaseBytesTest(unittest.TestCase):
self.assertRaises(ValueError, self.type2test, [Indexable(-1)])
self.assertRaises(ValueError, self.type2test, [Indexable(256)])
+ def test_from_buffer(self):
+ a = self.type2test(array.array('B', [1, 2, 3]))
+ self.assertEqual(a, b"\x01\x02\x03")
+ a = self.type2test(b"\x01\x02\x03")
+ self.assertEqual(a, b"\x01\x02\x03")
+
+ # Issues #29159 and #34974.
+ # Fallback when __index__ raises a TypeError
+ class B(bytes):
+ def __index__(self):
+ raise TypeError
+
+ self.assertEqual(self.type2test(B(b"foobar")), b"foobar")
+
def test_from_ssize(self):
self.assertEqual(self.type2test(0), b'')
self.assertEqual(self.type2test(1), b'\x00')
@@ -103,6 +156,20 @@ class BaseBytesTest(unittest.TestCase):
self.assertRaises(ValueError, self.type2test, [sys.maxint+1])
self.assertRaises(ValueError, self.type2test, [10**100])
+ def test_constructor_exceptions(self):
+ # Issue #34974: bytes and bytearray constructors replace unexpected
+ # exceptions.
+ class BadInt:
+ def __index__(self):
+ 1//0
+ self.assertRaises(ZeroDivisionError, self.type2test, BadInt())
+ self.assertRaises(ZeroDivisionError, self.type2test, [BadInt()])
+
+ class BadIterable:
+ def __iter__(self):
+ 1//0
+ self.assertRaises(ZeroDivisionError, self.type2test, BadIterable())
+
def test_compare(self):
b1 = self.type2test([1, 2, 3])
b2 = self.type2test([1, 2, 3])
@@ -336,8 +403,16 @@ class BaseBytesTest(unittest.TestCase):
self.assertEqual(b.replace(b'i', b'a'), b'massassappa')
self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi')
+ def test_replace_int_error(self):
+ self.assertRaises(TypeError, self.type2test(b'a b').replace, 32, b'')
+
def test_split_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').split, u' ')
+ self.assertRaises(TypeError, self.type2test(b'a b').rsplit, u' ')
+
+ def test_split_int_error(self):
+ self.assertRaises(TypeError, self.type2test(b'a b').split, 32)
+ self.assertRaises(TypeError, self.type2test(b'a b').rsplit, 32)
def test_split_unicodewhitespace(self):
for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'):
@@ -346,9 +421,6 @@ class BaseBytesTest(unittest.TestCase):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f'])
- def test_rsplit_string_error(self):
- self.assertRaises(TypeError, self.type2test(b'a b').rsplit, u' ')
-
def test_rsplit_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f'])
@@ -364,6 +436,14 @@ class BaseBytesTest(unittest.TestCase):
self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b''))
self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi'))
+ def test_partition_string_error(self):
+ self.assertRaises(TypeError, self.type2test(b'a b').partition, u' ')
+ self.assertRaises(TypeError, self.type2test(b'a b').rpartition, u' ')
+
+ def test_partition_int_error(self):
+ self.assertRaises(TypeError, self.type2test(b'a b').partition, 32)
+ self.assertRaises(TypeError, self.type2test(b'a b').rpartition, 32)
+
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
@@ -378,9 +458,19 @@ class BaseBytesTest(unittest.TestCase):
self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab')
def test_strip_string_error(self):
- self.assertRaises(TypeError, self.type2test(b'abc').strip, u'b')
- self.assertRaises(TypeError, self.type2test(b'abc').lstrip, u'b')
- self.assertRaises(TypeError, self.type2test(b'abc').rstrip, u'b')
+ self.assertRaises(TypeError, self.type2test(b'abc').strip, u'ac')
+ self.assertRaises(TypeError, self.type2test(b'abc').lstrip, u'ac')
+ self.assertRaises(TypeError, self.type2test(b'abc').rstrip, u'ac')
+
+ def test_strip_int_error(self):
+ self.assertRaises(TypeError, self.type2test(b' abc ').strip, 32)
+ self.assertRaises(TypeError, self.type2test(b' abc ').lstrip, 32)
+ self.assertRaises(TypeError, self.type2test(b' abc ').rstrip, 32)
+
+ def test_xjust_int_error(self):
+ self.assertRaises(TypeError, self.type2test(b'abc').center, 7, 32)
+ self.assertRaises(TypeError, self.type2test(b'abc').ljust, 7, 32)
+ self.assertRaises(TypeError, self.type2test(b'abc').rjust, 7, 32)
def test_ord(self):
b = self.type2test(b'\0A\x7f\x80\xff')
diff --git a/lib-python/2.7/test/test_capi.py b/lib-python/2.7/test/test_capi.py
index 6afce630ba..a90126df68 100644
--- a/lib-python/2.7/test/test_capi.py
+++ b/lib-python/2.7/test/test_capi.py
@@ -2,6 +2,7 @@
# these are all functions _testcapi exports whose name begins with 'test_'.
from __future__ import with_statement
+import string
import sys
import time
import random
@@ -113,6 +114,20 @@ class TestPendingCalls(unittest.TestCase):
self.pendingcalls_wait(l, n)
+class TestGetIndices(unittest.TestCase):
+
+ def test_get_indices(self):
+ self.assertEqual(_testcapi.get_indices(slice(10L, 20, 1), 100), (0, 10, 20, 1))
+ self.assertEqual(_testcapi.get_indices(slice(10.1, 20, 1), 100), None)
+ self.assertEqual(_testcapi.get_indices(slice(10, 20L, 1), 100), (0, 10, 20, 1))
+ self.assertEqual(_testcapi.get_indices(slice(10, 20.1, 1), 100), None)
+
+ self.assertEqual(_testcapi.get_indices(slice(10L, 20, 1L), 100), (0, 10, 20, 1))
+ self.assertEqual(_testcapi.get_indices(slice(10.1, 20, 1L), 100), None)
+ self.assertEqual(_testcapi.get_indices(slice(10, 20L, 1L), 100), (0, 10, 20, 1))
+ self.assertEqual(_testcapi.get_indices(slice(10, 20.1, 1L), 100), None)
+
+
@unittest.skipUnless(threading and thread and 'TestThreadState' not in skips, 'Threading required for this test.')
class TestThreadState(unittest.TestCase):
@@ -138,18 +153,17 @@ class TestThreadState(unittest.TestCase):
t.join()
+class Test_testcapi(unittest.TestCase):
+ locals().update((name, getattr(_testcapi, name))
+ for name in dir(_testcapi)
+ if name.startswith('test_')
+ and not name.endswith('_code')
+ and name not in skips)
+
+
def test_main():
- for name in dir(_testcapi):
- if name.startswith('test_') and name not in skips:
- test = getattr(_testcapi, name)
- if support.verbose:
- print "internal", name
- try:
- test()
- except _testcapi.error:
- raise support.TestFailed, sys.exc_info()[1]
-
- support.run_unittest(CAPITest, TestPendingCalls, TestThreadState)
+ support.run_unittest(CAPITest, TestPendingCalls,
+ TestThreadState, TestGetIndices, Test_testcapi)
if __name__ == "__main__":
test_main()
diff --git a/lib-python/2.7/test/test_cgi.py b/lib-python/2.7/test/test_cgi.py
index c9cf09525d..743c2afbd4 100644
--- a/lib-python/2.7/test/test_cgi.py
+++ b/lib-python/2.7/test/test_cgi.py
@@ -1,3 +1,4 @@
+from io import BytesIO
from test.test_support import run_unittest, check_warnings
import cgi
import os
@@ -316,6 +317,60 @@ Content-Type: text/plain
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
+ def test_max_num_fields(self):
+ # For application/x-www-form-urlencoded
+ data = '&'.join(['a=a']*11)
+ environ = {
+ 'CONTENT_LENGTH': str(len(data)),
+ 'CONTENT_TYPE': 'application/x-www-form-urlencoded',
+ 'REQUEST_METHOD': 'POST',
+ }
+
+ with self.assertRaises(ValueError):
+ cgi.FieldStorage(
+ fp=BytesIO(data.encode()),
+ environ=environ,
+ max_num_fields=10,
+ )
+
+ # For multipart/form-data
+ data = """---123
+Content-Disposition: form-data; name="a"
+
+3
+---123
+Content-Type: application/x-www-form-urlencoded
+
+a=4
+---123
+Content-Type: application/x-www-form-urlencoded
+
+a=5
+---123--
+"""
+ environ = {
+ 'CONTENT_LENGTH': str(len(data)),
+ 'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
+ 'QUERY_STRING': 'a=1&a=2',
+ 'REQUEST_METHOD': 'POST',
+ }
+
+ # 2 GET entities
+ # 1 top level POST entities
+ # 1 entity within the second POST entity
+ # 1 entity within the third POST entity
+ with self.assertRaises(ValueError):
+ cgi.FieldStorage(
+ fp=BytesIO(data.encode()),
+ environ=environ,
+ max_num_fields=4,
+ )
+ cgi.FieldStorage(
+ fp=BytesIO(data.encode()),
+ environ=environ,
+ max_num_fields=5,
+ )
+
def testQSAndFormData(self):
data = """
---123
diff --git a/lib-python/2.7/test/test_class.py b/lib-python/2.7/test/test_class.py
index e5cdf088f4..5cd138d289 100644
--- a/lib-python/2.7/test/test_class.py
+++ b/lib-python/2.7/test/test_class.py
@@ -635,6 +635,49 @@ class ClassTests(unittest.TestCase):
self.assertRaises(TypeError, type(c).__getattribute__, c, [])
self.assertRaises(TypeError, type(c).__setattr__, c, [], [])
+ def testSetattrWrapperNameIntern(self):
+ # Issue #25794: __setattr__ should intern the attribute name
+ class A(object):
+ pass
+
+ def add(self, other):
+ return 'summa'
+
+ name = ''.join(list('__add__')) # shouldn't be optimized
+ self.assertIsNot(name, '__add__') # not interned
+ type.__setattr__(A, name, add)
+ self.assertEqual(A() + 1, 'summa')
+
+ name2 = ''.join(list('__add__'))
+ self.assertIsNot(name2, '__add__')
+ self.assertIsNot(name2, name)
+ type.__delattr__(A, name2)
+ with self.assertRaises(TypeError):
+ A() + 1
+
+ @test_support.requires_unicode
+ def testSetattrWrapperNameUnicode(self):
+ # Issue #25794: __setattr__ should intern the attribute name
+ class A(object):
+ pass
+
+ def add(self, other):
+ return 'summa'
+
+ type.__setattr__(A, u'__add__', add)
+ self.assertEqual(A() + 1, 'summa')
+
+ type.__delattr__(A, u'__add__')
+ with self.assertRaises(TypeError):
+ A() + 1
+
+ def testSetattrNonStringName(self):
+ class A(object):
+ pass
+
+ with self.assertRaises(TypeError):
+ type.__setattr__(A, bytearray(b'x'), None)
+
def test_main():
with test_support.check_py3k_warnings(
(".+__(get|set|del)slice__ has been removed", DeprecationWarning),
diff --git a/lib-python/2.7/test/test_codecencodings_cn.py b/lib-python/2.7/test/test_codecencodings_cn.py
index cd102fd2bc..a1bbcb7879 100644
--- a/lib-python/2.7/test/test_codecencodings_cn.py
+++ b/lib-python/2.7/test/test_codecencodings_cn.py
@@ -4,12 +4,12 @@
#
from test import test_support
-from test import test_multibytecodec_support
+from test import multibytecodec_support
import unittest
-class Test_GB2312(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_GB2312(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gb2312'
- tstring = test_multibytecodec_support.load_teststring('gb2312')
+ tstring = multibytecodec_support.load_teststring('gb2312')
codectests = (
# invalid bytes
("abc\x81\x81\xc1\xc4", "strict", None),
@@ -20,9 +20,9 @@ class Test_GB2312(test_multibytecodec_support.TestBase, unittest.TestCase):
("\xc1\x64", "strict", None),
)
-class Test_GBK(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_GBK(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gbk'
- tstring = test_multibytecodec_support.load_teststring('gbk')
+ tstring = multibytecodec_support.load_teststring('gbk')
codectests = (
# invalid bytes
("abc\x80\x80\xc1\xc4", "strict", None),
@@ -34,9 +34,9 @@ class Test_GBK(test_multibytecodec_support.TestBase, unittest.TestCase):
(u"\u30fb", "strict", None),
)
-class Test_GB18030(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_GB18030(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'gb18030'
- tstring = test_multibytecodec_support.load_teststring('gb18030')
+ tstring = multibytecodec_support.load_teststring('gb18030')
codectests = (
# invalid bytes
("abc\x80\x80\xc1\xc4", "strict", None),
@@ -46,12 +46,18 @@ class Test_GB18030(test_multibytecodec_support.TestBase, unittest.TestCase):
("abc\x80\x80\xc1\xc4", "ignore", u"abc\u804a"),
("abc\x84\x39\x84\x39\xc1\xc4", "replace", u"abc\ufffd\u804a"),
(u"\u30fb", "strict", "\x819\xa79"),
+ # issue29990
+ ("\xff\x30\x81\x30", "strict", None),
+ ("\x81\x30\xff\x30", "strict", None),
+ ("abc\x81\x39\xff\x39\xc1\xc4", "replace", u"abc\ufffd\u804a"),
+ ("abc\xab\x36\xff\x30def", "replace", u'abc\ufffddef'),
+ ("abc\xbf\x38\xff\x32\xc1\xc4", "ignore", u"abc\u804a"),
)
has_iso10646 = True
-class Test_HZ(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_HZ(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'hz'
- tstring = test_multibytecodec_support.load_teststring('hz')
+ tstring = multibytecodec_support.load_teststring('hz')
codectests = (
# test '~\n' (3 lines)
(b'This sentence is in ASCII.\n'
@@ -76,6 +82,10 @@ class Test_HZ(test_multibytecodec_support.TestBase, unittest.TestCase):
(b'ab~cd', 'replace', u'ab\uFFFDd'),
(b'ab\xffcd', 'replace', u'ab\uFFFDcd'),
(b'ab~{\x81\x81\x41\x44~}cd', 'replace', u'ab\uFFFD\uFFFD\u804Acd'),
+ # issue 30003
+ (u'ab~cd', 'strict', b'ab~~cd'), # escape ~
+ (b'~{Dc~~:C~}', 'strict', None), # ~~ only in ASCII mode
+ (b'~{Dc~\n:C~}', 'strict', None), # ~\n only in ASCII mode
)
def test_main():
diff --git a/lib-python/2.7/test/test_codecencodings_hk.py b/lib-python/2.7/test/test_codecencodings_hk.py
index 391c3164f0..00774abfaa 100644
--- a/lib-python/2.7/test/test_codecencodings_hk.py
+++ b/lib-python/2.7/test/test_codecencodings_hk.py
@@ -4,12 +4,12 @@
#
from test import test_support
-from test import test_multibytecodec_support
+from test import multibytecodec_support
import unittest
-class Test_Big5HKSCS(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_Big5HKSCS(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'big5hkscs'
- tstring = test_multibytecodec_support.load_teststring('big5hkscs')
+ tstring = multibytecodec_support.load_teststring('big5hkscs')
codectests = (
# invalid bytes
("abc\x80\x80\xc1\xc4", "strict", None),
diff --git a/lib-python/2.7/test/test_codecencodings_iso2022.py b/lib-python/2.7/test/test_codecencodings_iso2022.py
index 9f23628e52..75f4cfc788 100644
--- a/lib-python/2.7/test/test_codecencodings_iso2022.py
+++ b/lib-python/2.7/test/test_codecencodings_iso2022.py
@@ -1,7 +1,7 @@
# Codec encoding tests for ISO 2022 encodings.
from test import test_support
-from test import test_multibytecodec_support
+from test import multibytecodec_support
import unittest
COMMON_CODEC_TESTS = (
@@ -11,23 +11,23 @@ COMMON_CODEC_TESTS = (
(b'ab\x1B$def', 'replace', u'ab\uFFFD'),
)
-class Test_ISO2022_JP(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_ISO2022_JP(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_jp'
- tstring = test_multibytecodec_support.load_teststring('iso2022_jp')
+ tstring = multibytecodec_support.load_teststring('iso2022_jp')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', u'ab\x1BNdef'),
)
-class Test_ISO2022_JP2(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_ISO2022_JP2(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_jp_2'
- tstring = test_multibytecodec_support.load_teststring('iso2022_jp')
+ tstring = multibytecodec_support.load_teststring('iso2022_jp')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', u'abdef'),
)
-class Test_ISO2022_KR(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_ISO2022_KR(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'iso2022_kr'
- tstring = test_multibytecodec_support.load_teststring('iso2022_kr')
+ tstring = multibytecodec_support.load_teststring('iso2022_kr')
codectests = COMMON_CODEC_TESTS + (
(b'ab\x1BNdef', 'replace', u'ab\x1BNdef'),
)
diff --git a/lib-python/2.7/test/test_codecencodings_jp.py b/lib-python/2.7/test/test_codecencodings_jp.py
index f3cf923508..e0ad054a58 100644
--- a/lib-python/2.7/test/test_codecencodings_jp.py
+++ b/lib-python/2.7/test/test_codecencodings_jp.py
@@ -4,12 +4,12 @@
#
from test import test_support
-from test import test_multibytecodec_support
+from test import multibytecodec_support
import unittest
-class Test_CP932(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_CP932(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'cp932'
- tstring = test_multibytecodec_support.load_teststring('shift_jis')
+ tstring = multibytecodec_support.load_teststring('shift_jis')
codectests = (
# invalid bytes
("abc\x81\x00\x81\x00\x82\x84", "strict", None),
@@ -22,10 +22,10 @@ class Test_CP932(test_multibytecodec_support.TestBase, unittest.TestCase):
("\x81\x5f\x81\x61\x81\x7c", "replace", u"\uff3c\u2225\uff0d"),
)
-class Test_EUC_JISX0213(test_multibytecodec_support.TestBase,
+class Test_EUC_JISX0213(multibytecodec_support.TestBase,
unittest.TestCase):
encoding = 'euc_jisx0213'
- tstring = test_multibytecodec_support.load_teststring('euc_jisx0213')
+ tstring = multibytecodec_support.load_teststring('euc_jisx0213')
codectests = (
# invalid bytes
("abc\x80\x80\xc1\xc4", "strict", None),
@@ -52,10 +52,10 @@ eucjp_commontests = (
("\xc1\x64", "strict", None),
)
-class Test_EUC_JP_COMPAT(test_multibytecodec_support.TestBase,
+class Test_EUC_JP_COMPAT(multibytecodec_support.TestBase,
unittest.TestCase):
encoding = 'euc_jp'
- tstring = test_multibytecodec_support.load_teststring('euc_jp')
+ tstring = multibytecodec_support.load_teststring('euc_jp')
codectests = eucjp_commontests + (
("\xa1\xc0\\", "strict", u"\uff3c\\"),
(u"\xa5", "strict", "\x5c"),
@@ -70,17 +70,17 @@ shiftjis_commonenctests = (
("abc\x80\x80\x82\x84def", "ignore", u"abc\uff44def"),
)
-class Test_SJIS_COMPAT(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_SJIS_COMPAT(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'shift_jis'
- tstring = test_multibytecodec_support.load_teststring('shift_jis')
+ tstring = multibytecodec_support.load_teststring('shift_jis')
codectests = shiftjis_commonenctests + (
("\\\x7e", "strict", u"\\\x7e"),
("\x81\x5f\x81\x61\x81\x7c", "strict", u"\uff3c\u2016\u2212"),
)
-class Test_SJISX0213(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_SJISX0213(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'shift_jisx0213'
- tstring = test_multibytecodec_support.load_teststring('shift_jisx0213')
+ tstring = multibytecodec_support.load_teststring('shift_jisx0213')
codectests = (
# invalid bytes
("abc\x80\x80\x82\x84", "strict", None),
diff --git a/lib-python/2.7/test/test_codecencodings_kr.py b/lib-python/2.7/test/test_codecencodings_kr.py
index 45ea62b5b0..7b2f2325e1 100644
--- a/lib-python/2.7/test/test_codecencodings_kr.py
+++ b/lib-python/2.7/test/test_codecencodings_kr.py
@@ -4,12 +4,12 @@
#
from test import test_support
-from test import test_multibytecodec_support
+from test import multibytecodec_support
import unittest
-class Test_CP949(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_CP949(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'cp949'
- tstring = test_multibytecodec_support.load_teststring('cp949')
+ tstring = multibytecodec_support.load_teststring('cp949')
codectests = (
# invalid bytes
("abc\x80\x80\xc1\xc4", "strict", None),
@@ -19,9 +19,9 @@ class Test_CP949(test_multibytecodec_support.TestBase, unittest.TestCase):
("abc\x80\x80\xc1\xc4", "ignore", u"abc\uc894"),
)
-class Test_EUCKR(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_EUCKR(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'euc_kr'
- tstring = test_multibytecodec_support.load_teststring('euc_kr')
+ tstring = multibytecodec_support.load_teststring('euc_kr')
codectests = (
# invalid bytes
("abc\x80\x80\xc1\xc4", "strict", None),
@@ -49,9 +49,9 @@ class Test_EUCKR(test_multibytecodec_support.TestBase, unittest.TestCase):
("\xc1\xc4", "strict", u"\uc894"),
)
-class Test_JOHAB(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_JOHAB(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'johab'
- tstring = test_multibytecodec_support.load_teststring('johab')
+ tstring = multibytecodec_support.load_teststring('johab')
codectests = (
# invalid bytes
("abc\x80\x80\xc1\xc4", "strict", None),
diff --git a/lib-python/2.7/test/test_codecencodings_tw.py b/lib-python/2.7/test/test_codecencodings_tw.py
index c62d321dd6..748840b92c 100644
--- a/lib-python/2.7/test/test_codecencodings_tw.py
+++ b/lib-python/2.7/test/test_codecencodings_tw.py
@@ -4,12 +4,12 @@
#
from test import test_support
-from test import test_multibytecodec_support
+from test import multibytecodec_support
import unittest
-class Test_Big5(test_multibytecodec_support.TestBase, unittest.TestCase):
+class Test_Big5(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'big5'
- tstring = test_multibytecodec_support.load_teststring('big5')
+ tstring = multibytecodec_support.load_teststring('big5')
codectests = (
# invalid bytes
("abc\x80\x80\xc1\xc4", "strict", None),
diff --git a/lib-python/2.7/test/test_codecmaps_cn.py b/lib-python/2.7/test/test_codecmaps_cn.py
index b1d1eb4283..73b10bc047 100644
--- a/lib-python/2.7/test/test_codecmaps_cn.py
+++ b/lib-python/2.7/test/test_codecmaps_cn.py
@@ -4,20 +4,20 @@
#
from test import test_support
-from test import test_multibytecodec_support
+from test import multibytecodec_support
import unittest
-class TestGB2312Map(test_multibytecodec_support.TestBase_Mapping,
+class TestGB2312Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'gb2312'
mapfileurl = 'http://www.pythontest.net/unicode/EUC-CN.TXT'
-class TestGBKMap(test_multibytecodec_support.TestBase_Mapping,
+class TestGBKMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'gbk'
mapfileurl = 'http://www.pythontest.net/unicode/CP936.TXT'
-class TestGB18030Map(test_multibytecodec_support.TestBase_Mapping,
+class TestGB18030Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'gb18030'
mapfileurl = 'http://www.pythontest.net/unicode/gb-18030-2000.xml'
diff --git a/lib-python/2.7/test/test_codecmaps_hk.py b/lib-python/2.7/test/test_codecmaps_hk.py
index 0a41b2452e..feda7a713e 100644
--- a/lib-python/2.7/test/test_codecmaps_hk.py
+++ b/lib-python/2.7/test/test_codecmaps_hk.py
@@ -4,10 +4,10 @@
#
from test import test_support
-from test import test_multibytecodec_support
+from test import multibytecodec_support
import unittest
-class TestBig5HKSCSMap(test_multibytecodec_support.TestBase_Mapping,
+class TestBig5HKSCSMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'big5hkscs'
mapfileurl = 'http://www.pythontest.net/unicode/BIG5HKSCS-2004.TXT'
diff --git a/lib-python/2.7/test/test_codecmaps_jp.py b/lib-python/2.7/test/test_codecmaps_jp.py
index 907645d7bd..f37a81c90d 100644
--- a/lib-python/2.7/test/test_codecmaps_jp.py
+++ b/lib-python/2.7/test/test_codecmaps_jp.py
@@ -4,10 +4,10 @@
#
from test import test_support
-from test import test_multibytecodec_support
+from test import multibytecodec_support
import unittest
-class TestCP932Map(test_multibytecodec_support.TestBase_Mapping,
+class TestCP932Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'cp932'
mapfileurl = 'http://www.pythontest.net/unicode/CP932.TXT'
@@ -22,14 +22,14 @@ class TestCP932Map(test_multibytecodec_support.TestBase_Mapping,
supmaps.append((chr(i), unichr(i+0xfec0)))
-class TestEUCJPCOMPATMap(test_multibytecodec_support.TestBase_Mapping,
+class TestEUCJPCOMPATMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'euc_jp'
mapfilename = 'EUC-JP.TXT'
mapfileurl = 'http://www.pythontest.net/unicode/EUC-JP.TXT'
-class TestSJISCOMPATMap(test_multibytecodec_support.TestBase_Mapping,
+class TestSJISCOMPATMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'shift_jis'
mapfilename = 'SHIFTJIS.TXT'
@@ -43,14 +43,14 @@ class TestSJISCOMPATMap(test_multibytecodec_support.TestBase_Mapping,
('\x81_', u'\\'),
]
-class TestEUCJISX0213Map(test_multibytecodec_support.TestBase_Mapping,
+class TestEUCJISX0213Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'euc_jisx0213'
mapfilename = 'EUC-JISX0213.TXT'
mapfileurl = 'http://www.pythontest.net/unicode/EUC-JISX0213.TXT'
-class TestSJISX0213Map(test_multibytecodec_support.TestBase_Mapping,
+class TestSJISX0213Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'shift_jisx0213'
mapfilename = 'SHIFT_JISX0213.TXT'
diff --git a/lib-python/2.7/test/test_codecmaps_kr.py b/lib-python/2.7/test/test_codecmaps_kr.py
index 0ba71bfb25..9e1df5e96e 100644
--- a/lib-python/2.7/test/test_codecmaps_kr.py
+++ b/lib-python/2.7/test/test_codecmaps_kr.py
@@ -4,16 +4,16 @@
#
from test import test_support
-from test import test_multibytecodec_support
+from test import multibytecodec_support
import unittest
-class TestCP949Map(test_multibytecodec_support.TestBase_Mapping,
+class TestCP949Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'cp949'
mapfileurl = 'http://www.pythontest.net/unicode/CP949.TXT'
-class TestEUCKRMap(test_multibytecodec_support.TestBase_Mapping,
+class TestEUCKRMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'euc_kr'
mapfileurl = 'http://www.pythontest.net/unicode/EUC-KR.TXT'
@@ -23,7 +23,7 @@ class TestEUCKRMap(test_multibytecodec_support.TestBase_Mapping,
pass_dectest = [('\xa4\xd4', u'\u3164')]
-class TestJOHABMap(test_multibytecodec_support.TestBase_Mapping,
+class TestJOHABMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'johab'
mapfileurl = 'http://www.pythontest.net/unicode/JOHAB.TXT'
diff --git a/lib-python/2.7/test/test_codecmaps_tw.py b/lib-python/2.7/test/test_codecmaps_tw.py
index 0d57343df3..5b500ff84f 100644
--- a/lib-python/2.7/test/test_codecmaps_tw.py
+++ b/lib-python/2.7/test/test_codecmaps_tw.py
@@ -4,15 +4,15 @@
#
from test import test_support
-from test import test_multibytecodec_support
+from test import multibytecodec_support
import unittest
-class TestBIG5Map(test_multibytecodec_support.TestBase_Mapping,
+class TestBIG5Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'big5'
mapfileurl = 'http://www.pythontest.net/unicode/BIG5.TXT'
-class TestCP950Map(test_multibytecodec_support.TestBase_Mapping,
+class TestCP950Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'cp950'
mapfileurl = 'http://www.pythontest.net/unicode/CP950.TXT'
diff --git a/lib-python/2.7/test/test_codecs.py b/lib-python/2.7/test/test_codecs.py
index efc40cf2c2..0ec8bf5a4b 100644
--- a/lib-python/2.7/test/test_codecs.py
+++ b/lib-python/2.7/test/test_codecs.py
@@ -149,19 +149,33 @@ class ReadTest(unittest.TestCase):
self.assertEqual(f.read(), ''.join(lines[1:]))
self.assertEqual(f.read(), '')
+ # Issue #32110: Test readline() followed by read(n)
+ f = getreader()
+ self.assertEqual(f.readline(), lines[0])
+ self.assertEqual(f.read(1), lines[1][0])
+ self.assertEqual(f.read(0), '')
+ self.assertEqual(f.read(100), data[len(lines[0]) + 1:][:100])
+
# Issue #16636: Test readline() followed by readlines()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.readlines(), lines[1:])
self.assertEqual(f.read(), '')
- # Test read() followed by read()
+ # Test read(n) followed by read()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(), data[5:])
self.assertEqual(f.read(), '')
- # Issue #12446: Test read() followed by readlines()
+ # Issue #32110: Test read(n) followed by read(n)
+ f = getreader()
+ self.assertEqual(f.read(size=40, chars=5), data[:5])
+ self.assertEqual(f.read(1), data[5])
+ self.assertEqual(f.read(0), '')
+ self.assertEqual(f.read(100), data[6:106])
+
+ # Issue #12446: Test read(n) followed by readlines()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.readlines(), [lines[0][5:]] + lines[1:])
diff --git a/lib-python/2.7/test/test_compile.py b/lib-python/2.7/test/test_compile.py
index 7eb0c90100..a4669e7f7d 100644
--- a/lib-python/2.7/test/test_compile.py
+++ b/lib-python/2.7/test/test_compile.py
@@ -394,7 +394,7 @@ if 1:
'from sys import stdin)',
'from sys import stdin, stdout,\nstderr',
'from sys import stdin si',
- 'from sys import stdin,'
+ 'from sys import stdin,',
'from sys import (*)',
'from sys import (stdin,, stdout, stderr)',
'from sys import (stdin, stdout),',
diff --git a/lib-python/2.7/test/test_compiler.py b/lib-python/2.7/test/test_compiler.py
index 4598811703..c98e494742 100644
--- a/lib-python/2.7/test/test_compiler.py
+++ b/lib-python/2.7/test/test_compiler.py
@@ -26,7 +26,7 @@ class CompilerTest(unittest.TestCase):
# warning: if 'os' or 'test_support' are moved in some other dir,
# they should be changed here.
libdir = os.path.dirname(os.__file__)
- testdir = os.path.dirname(test.test_support.__file__)
+ testdir = test.test_support.TEST_HOME_DIR
for dir in [testdir]:
for basename in "test_os.py",:
diff --git a/lib-python/2.7/test/test_complex.py b/lib-python/2.7/test/test_complex.py
index c0383b27e6..02b292f4bb 100644
--- a/lib-python/2.7/test/test_complex.py
+++ b/lib-python/2.7/test/test_complex.py
@@ -8,6 +8,13 @@ INF = float("inf")
NAN = float("nan")
# These tests ensure that complex math does the right thing
+# decorator for skipping tests on non-IEEE 754 platforms
+have_getformat = hasattr(float, "__getformat__")
+requires_IEEE_754 = unittest.skipUnless(have_getformat and
+ float.__getformat__("double").startswith("IEEE"),
+ "test requires IEEE 754 doubles")
+
+
class ComplexTest(unittest.TestCase):
def assertAlmostEqual(self, a, b):
@@ -441,6 +448,28 @@ class ComplexTest(unittest.TestCase):
b = 'y %s x' % op
self.assertTrue(type(eval(a)) is type(eval(b)) is xcomplex)
+ @requires_IEEE_754
+ def test_constructor_special_numbers(self):
+ class complex2(complex):
+ pass
+ for x in 0.0, -0.0, INF, -INF, NAN:
+ for y in 0.0, -0.0, INF, -INF, NAN:
+ z = complex(x, y)
+ self.assertFloatsAreIdentical(z.real, x)
+ self.assertFloatsAreIdentical(z.imag, y)
+ z = complex2(x, y)
+ self.assertIs(type(z), complex2)
+ self.assertFloatsAreIdentical(z.real, x)
+ self.assertFloatsAreIdentical(z.imag, y)
+ z = complex(complex2(x, y))
+ self.assertIs(type(z), complex)
+ self.assertFloatsAreIdentical(z.real, x)
+ self.assertFloatsAreIdentical(z.imag, y)
+ z = complex2(complex(x, y))
+ self.assertIs(type(z), complex2)
+ self.assertFloatsAreIdentical(z.real, x)
+ self.assertFloatsAreIdentical(z.imag, y)
+
def test_hash(self):
for x in xrange(-30, 30):
self.assertEqual(hash(x), hash(complex(x, 0)))
diff --git a/lib-python/2.7/test/test_cookielib.py b/lib-python/2.7/test/test_cookielib.py
index f2dd9727d1..f3711b966e 100644
--- a/lib-python/2.7/test/test_cookielib.py
+++ b/lib-python/2.7/test/test_cookielib.py
@@ -6,7 +6,7 @@ import os
import re
import time
-from cookielib import http2time, time2isoz, time2netscape
+from cookielib import http2time, time2isoz, iso2time, time2netscape
from unittest import TestCase
from test import test_support
@@ -117,6 +117,19 @@ class DateTimeTests(TestCase):
"http2time(test) %s" % (test, http2time(test))
)
+ def test_http2time_redos_regression_actually_completes(self):
+ # LOOSE_HTTP_DATE_RE was vulnerable to malicious input which caused catastrophic backtracking (REDoS).
+ # If we regress to cubic complexity, this test will take a very long time to succeed.
+ # If fixed, it should complete within a fraction of a second.
+ http2time("01 Jan 1970{}00:00:00 GMT!".format(" " * 10 ** 5))
+ http2time("01 Jan 1970 00:00:00{}GMT!".format(" " * 10 ** 5))
+
+ def test_iso2time_performance_regression(self):
+ # If ISO_DATE_RE regresses to quadratic complexity, this test will take a very long time to succeed.
+ # If fixed, it should complete within a fraction of a second.
+ iso2time('1994-02-03{}14:15:29 -0100!'.format(' '*10**6))
+ iso2time('1994-02-03 14:15:29{}-0100!'.format(' '*10**6))
+
class HeaderTests(TestCase):
@@ -368,6 +381,7 @@ class CookieTests(TestCase):
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
+ ("http://foo.bar.com/", "bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
@@ -378,6 +392,8 @@ class CookieTests(TestCase):
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
+ ("http://barfoo.com", ".foo.com", False),
+ ("http://barfoo.com", "foo.com", False),
]:
request = urllib2.Request(url)
r = pol.domain_return_ok(domain, request)
@@ -646,6 +662,35 @@ class CookieTests(TestCase):
req = Request("http://www.example.com")
self.assertEqual(request_path(req), "/")
+ def test_path_prefix_match(self):
+ from cookielib import CookieJar, DefaultCookiePolicy
+ from urllib2 import Request
+
+ pol = DefaultCookiePolicy()
+ strict_ns_path_pol = DefaultCookiePolicy(strict_ns_set_path=True)
+
+ c = CookieJar(pol)
+ base_url = "http://bar.com"
+ interact_netscape(c, base_url, 'spam=eggs; Path=/foo')
+ cookie = c._cookies['bar.com']['/foo']['spam']
+
+ for path, ok in [('/foo', True),
+ ('/foo/', True),
+ ('/foo/bar', True),
+ ('/', False),
+ ('/foobad/foo', False)]:
+ url = '{0}{1}'.format(base_url, path)
+ req = Request(url)
+ h = interact_netscape(c, url)
+ if ok:
+ self.assertIn('spam=eggs', h,
+ "cookie not set for {0}".format(path))
+ self.assertTrue(strict_ns_path_pol.set_ok_path(cookie, req))
+ else:
+ self.assertNotIn('spam=eggs', h,
+ "cookie set for {0}".format(path))
+ self.assertFalse(strict_ns_path_pol.set_ok_path(cookie, req))
+
def test_request_port(self):
from urllib2 import Request
from cookielib import request_port, DEFAULT_HTTP_PORT
@@ -938,6 +983,33 @@ class CookieTests(TestCase):
c.add_cookie_header(req)
self.assertFalse(req.has_header("Cookie"))
+ c.clear()
+
+ pol.set_blocked_domains([])
+ req = Request("http://acme.com/")
+ res = FakeResponse(headers, "http://acme.com/")
+ cookies = c.make_cookies(res, req)
+ c.extract_cookies(res, req)
+ self.assertEqual(len(c), 1)
+
+ req = Request("http://acme.com/")
+ c.add_cookie_header(req)
+ self.assertTrue(req.has_header("Cookie"))
+
+ req = Request("http://badacme.com/")
+ c.add_cookie_header(req)
+ self.assertFalse(pol.return_ok(cookies[0], req))
+ self.assertFalse(req.has_header("Cookie"))
+
+ p = pol.set_blocked_domains(["acme.com"])
+ req = Request("http://acme.com/")
+ c.add_cookie_header(req)
+ self.assertFalse(req.has_header("Cookie"))
+
+ req = Request("http://badacme.com/")
+ c.add_cookie_header(req)
+ self.assertFalse(req.has_header("Cookie"))
+
def test_secure(self):
from cookielib import CookieJar, DefaultCookiePolicy
diff --git a/lib-python/2.7/test/test_copy_reg.py b/lib-python/2.7/test/test_copy_reg.py
index 8cdb8b7d2a..17ccbd084d 100644
--- a/lib-python/2.7/test/test_copy_reg.py
+++ b/lib-python/2.7/test/test_copy_reg.py
@@ -17,6 +17,12 @@ class WithWeakref(object):
class WithPrivate(object):
__slots__ = ('__spam',)
+class _WithLeadingUnderscoreAndPrivate(object):
+ __slots__ = ('__spam',)
+
+class ___(object):
+ __slots__ = ('__spam',)
+
class WithSingleString(object):
__slots__ = 'spam'
@@ -105,6 +111,10 @@ class CopyRegTestCase(unittest.TestCase):
self.assertEqual(copy_reg._slotnames(WithWeakref), [])
expected = ['_WithPrivate__spam']
self.assertEqual(copy_reg._slotnames(WithPrivate), expected)
+ expected = ['_WithLeadingUnderscoreAndPrivate__spam']
+ self.assertEqual(copy_reg._slotnames(_WithLeadingUnderscoreAndPrivate),
+ expected)
+ self.assertEqual(copy_reg._slotnames(___), ['__spam'])
self.assertEqual(copy_reg._slotnames(WithSingleString), ['spam'])
expected = ['eggs', 'spam']
expected.sort()
diff --git a/lib-python/2.7/test/test_cprofile.py b/lib-python/2.7/test/test_cprofile.py
index af3fe62a9a..83d52296e2 100644
--- a/lib-python/2.7/test/test_cprofile.py
+++ b/lib-python/2.7/test/test_cprofile.py
@@ -1,7 +1,9 @@
"""Test suite for the cProfile module."""
import sys
+import unittest
from test.test_support import run_unittest, TESTFN, unlink
+from test.support.script_helper import assert_python_failure
# rip off all interesting stuff from test_profile
import cProfile
@@ -26,8 +28,14 @@ class CProfileTest(ProfileTest):
unlink(TESTFN)
+class TestCommandLine(unittest.TestCase):
+ def test_sort(self):
+ rc, out, err = assert_python_failure('-m', 'cProfile', '-s', 'demo')
+ self.assertGreater(rc, 0)
+ self.assertIn(b"option -s: invalid choice: 'demo'", err)
+
def test_main():
- run_unittest(CProfileTest)
+ run_unittest(CProfileTest, TestCommandLine)
def main():
if '-r' not in sys.argv:
diff --git a/lib-python/2.7/test/test_crypt.py b/lib-python/2.7/test/test_crypt.py
index 4db200d403..7cd9c71981 100644
--- a/lib-python/2.7/test/test_crypt.py
+++ b/lib-python/2.7/test/test_crypt.py
@@ -1,14 +1,20 @@
+import sys
from test import test_support
import unittest
crypt = test_support.import_module('crypt')
+if sys.platform.startswith('openbsd'):
+ raise unittest.SkipTest('The only supported method on OpenBSD is Blowfish')
+
class CryptTestCase(unittest.TestCase):
def test_crypt(self):
- c = crypt.crypt('mypassword', 'ab')
- if test_support.verbose:
- print 'Test encryption: ', c
+ cr = crypt.crypt('mypassword', 'ab')
+ if cr is not None:
+ cr2 = crypt.crypt('mypassword', cr)
+ self.assertEqual(cr2, cr)
+
def test_main():
test_support.run_unittest(CryptTestCase)
diff --git a/lib-python/2.7/test/test_csv.py b/lib-python/2.7/test/test_csv.py
index 265d024ce4..03a97fcadf 100644
--- a/lib-python/2.7/test/test_csv.py
+++ b/lib-python/2.7/test/test_csv.py
@@ -1041,6 +1041,15 @@ Stonecutters Seafood and Chop House+ Lemont+ IL+ 12/19/02+ Week Back
self.assertEqual(sniffer.has_header(self.header2 + self.sample8),
True)
+ def test_guess_quote_and_delimiter(self):
+ sniffer = csv.Sniffer()
+ for header in (";'123;4';", "'123;4';", ";'123;4'", "'123;4'"):
+ dialect = sniffer.sniff(header, ",;")
+ self.assertEqual(dialect.delimiter, ';')
+ self.assertEqual(dialect.quotechar, "'")
+ self.assertIs(dialect.doublequote, False)
+ self.assertIs(dialect.skipinitialspace, False)
+
def test_sniff(self):
sniffer = csv.Sniffer()
dialect = sniffer.sniff(self.sample1)
diff --git a/lib-python/2.7/test/test_curses.py b/lib-python/2.7/test/test_curses.py
index bce014c1e0..c62e96f323 100644
--- a/lib-python/2.7/test/test_curses.py
+++ b/lib-python/2.7/test/test_curses.py
@@ -15,8 +15,9 @@ import sys
import tempfile
import unittest
-from test.test_support import (requires, import_module, verbose, run_unittest,
- cpython_only)
+from test.support import (requires, import_module, verbose, run_unittest,
+ SaveSignals, cpython_only)
+
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
@@ -25,8 +26,12 @@ requires('curses')
# If either of these don't exist, skip the tests.
curses = import_module('curses')
-import_module('curses.panel')
import_module('curses.ascii')
+import_module('curses.textpad')
+try:
+ import curses.panel
+except ImportError:
+ pass
def requires_curses_func(name):
return unittest.skipUnless(hasattr(curses, name),
@@ -62,6 +67,8 @@ class TestCurses(unittest.TestCase):
del cls.tmp
def setUp(self):
+ self.save_signals = SaveSignals()
+ self.save_signals.save()
if verbose:
# just to make the test output a little more readable
print('')
@@ -71,6 +78,7 @@ class TestCurses(unittest.TestCase):
def tearDown(self):
curses.resetty()
curses.endwin()
+ self.save_signals.restore()
def test_window_funcs(self):
"Test the methods of windows"
@@ -84,7 +92,7 @@ class TestCurses(unittest.TestCase):
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
meth(*args)
- for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
+ for meth in [stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
@@ -117,6 +125,13 @@ class TestCurses(unittest.TestCase):
win.border(65, 66, 67, 68,
69, [], 71, 72)
+ win.box(65, 67)
+ win.box('!', '_')
+ win.box(b':', b'~')
+ self.assertRaises(TypeError, win.box, 65, 66, 67)
+ self.assertRaises(TypeError, win.box, 65)
+ win.box()
+
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
@@ -132,7 +147,9 @@ class TestCurses(unittest.TestCase):
stdscr.idcok(1)
stdscr.idlok(1)
- stdscr.immedok(1)
+ if hasattr(stdscr, 'immedok'):
+ stdscr.immedok(1)
+ stdscr.immedok(0)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
@@ -166,25 +183,27 @@ class TestCurses(unittest.TestCase):
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
- stdscr.syncok(1)
+ if hasattr(stdscr, 'syncok') and not sys.platform.startswith("sunos"):
+ stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
- stdscr.chgat(5, 2, 3, curses.A_BLINK)
- stdscr.chgat(3, curses.A_BOLD)
- stdscr.chgat(5, 8, curses.A_UNDERLINE)
- stdscr.chgat(curses.A_BLINK)
+ if hasattr(stdscr, 'chgat'):
+ stdscr.chgat(5, 2, 3, curses.A_BLINK)
+ stdscr.chgat(3, curses.A_BOLD)
+ stdscr.chgat(5, 8, curses.A_UNDERLINE)
+ stdscr.chgat(curses.A_BLINK)
stdscr.refresh()
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
- if hasattr(curses, 'resize'):
- stdscr.resize()
- if hasattr(curses, 'enclose'):
- stdscr.enclose()
+ if hasattr(stdscr, 'resize'):
+ stdscr.resize(25, 80)
+ if hasattr(stdscr, 'enclose'):
+ stdscr.enclose(10, 10)
self.assertRaises(ValueError, stdscr.getstr, -400)
self.assertRaises(ValueError, stdscr.getstr, 2, 3, -400)
@@ -196,14 +215,18 @@ class TestCurses(unittest.TestCase):
"Test module-level functions"
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
- curses.filter, curses.flash, curses.flushinp,
+ curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
- curses.termname, curses.erasechar, curses.getsyx]:
+ curses.termname, curses.erasechar]:
func()
+ if hasattr(curses, 'filter'):
+ curses.filter()
+ if hasattr(curses, 'getsyx'):
+ curses.getsyx()
# Functions that actually need arguments
if curses.tigetstr("cnorm"):
@@ -227,20 +250,23 @@ class TestCurses(unittest.TestCase):
curses.putp(b'abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
- curses.setsyx(5,5)
+ if hasattr(curses, 'setsyx'):
+ curses.setsyx(5,5)
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm(b'cr')
- curses.typeahead(sys.__stdin__.fileno())
+ if hasattr(curses, 'typeahead'):
+ curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
- curses.use_env(1)
+ if hasattr(curses, 'use_env'):
+ curses.use_env(1)
# Functions only available on a few platforms
def test_colors_funcs(self):
if not curses.has_colors():
- self.skip('requires colors support')
+ self.skipTest('requires colors support')
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
@@ -263,12 +289,13 @@ class TestCurses(unittest.TestCase):
def test_getmouse(self):
(availmask, oldmask) = curses.mousemask(curses.BUTTON1_PRESSED)
if availmask == 0:
- self.skip('mouse stuff not available')
+ self.skipTest('mouse stuff not available')
curses.mouseinterval(10)
# just verify these don't cause errors
curses.ungetmouse(0, 0, 0, 0, curses.BUTTON1_PRESSED)
m = curses.getmouse()
+ @requires_curses_func('panel')
def test_userptr_without_set(self):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
@@ -278,6 +305,7 @@ class TestCurses(unittest.TestCase):
p.userptr()
@cpython_only
+ @requires_curses_func('panel')
def test_userptr_memory_leak(self):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
@@ -291,8 +319,10 @@ class TestCurses(unittest.TestCase):
"set_userptr leaked references")
@cpython_only
+ @requires_curses_func('panel')
def test_userptr_segfault(self):
- panel = curses.panel.new_panel(self.stdscr)
+ w = curses.newwin(10, 10)
+ panel = curses.panel.new_panel(w)
class A:
def __del__(self):
panel.set_userptr(None)
@@ -300,7 +330,8 @@ class TestCurses(unittest.TestCase):
panel.set_userptr(None)
def test_new_curses_panel(self):
- panel = curses.panel.new_panel(self.stdscr)
+ w = curses.newwin(10, 10)
+ panel = curses.panel.new_panel(w)
self.assertRaises(TypeError, type(panel))
@requires_curses_func('is_term_resized')
@@ -330,6 +361,16 @@ class TestCurses(unittest.TestCase):
b = curses.tparm(curses.tigetstr("cup"), 5, 3)
self.assertIs(type(b), bytes)
+ def test_issue13051(self):
+ stdscr = self.stdscr
+ if not hasattr(stdscr, 'resize'):
+ raise unittest.SkipTest('requires curses.window.resize')
+ box = curses.textpad.Textbox(stdscr, insert_mode=True)
+ lines, cols = stdscr.getmaxyx()
+ stdscr.resize(lines-2, cols-2)
+ # this may cause infinite recursion, leading to a RuntimeError
+ box._insert_printable_char('a')
+
class TestAscii(unittest.TestCase):
@@ -361,6 +402,25 @@ class TestAscii(unittest.TestCase):
check(curses.ascii.ispunct, c in string.punctuation)
check(curses.ascii.isxdigit, c in string.hexdigits)
+ for i in (-2, -1, 256, sys.maxunicode, sys.maxunicode+1):
+ self.assertFalse(curses.ascii.isalnum(i))
+ self.assertFalse(curses.ascii.isalpha(i))
+ self.assertFalse(curses.ascii.isdigit(i))
+ self.assertFalse(curses.ascii.islower(i))
+ self.assertFalse(curses.ascii.isspace(i))
+ self.assertFalse(curses.ascii.isupper(i))
+
+ self.assertFalse(curses.ascii.isascii(i))
+ self.assertFalse(curses.ascii.isctrl(i))
+ self.assertFalse(curses.ascii.iscntrl(i))
+ self.assertFalse(curses.ascii.isblank(i))
+ self.assertFalse(curses.ascii.isgraph(i))
+ self.assertFalse(curses.ascii.isprint(i))
+ self.assertFalse(curses.ascii.ispunct(i))
+ self.assertFalse(curses.ascii.isxdigit(i))
+
+ self.assertFalse(curses.ascii.ismeta(-1))
+
def test_ascii(self):
ascii = curses.ascii.ascii
self.assertEqual(ascii('\xc1'), 'A')
diff --git a/lib-python/2.7/test/test_datetime.py b/lib-python/2.7/test/test_datetime.py
index 20abe74bbc..2620fbb9fb 100644
--- a/lib-python/2.7/test/test_datetime.py
+++ b/lib-python/2.7/test/test_datetime.py
@@ -492,6 +492,69 @@ class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase):
self.assertEqual(str(t3), str(t4))
self.assertEqual(t4.as_hours(), -1)
+ def test_issue31752(self):
+ # The interpreter shouldn't crash because divmod() returns negative
+ # remainder.
+ for inttype in (int, long):
+ class BadInt(inttype):
+ def __mul__(self, other):
+ return Prod()
+ def __rmul__(self, other):
+ return Prod()
+ def __floordiv__(self, other):
+ return Prod()
+ def __rfloordiv__(self, other):
+ return Prod()
+
+ class BadLong(long):
+ def __mul__(self, other):
+ return Prod()
+ def __rmul__(self, other):
+ return Prod()
+ def __floordiv__(self, other):
+ return Prod()
+ def __rfloordiv__(self, other):
+ return Prod()
+
+ class Prod:
+ def __add__(self, other):
+ return Sum()
+ def __radd__(self, other):
+ return Sum()
+
+ for inttype2 in (int, long):
+ class Sum(inttype2):
+ def __divmod__(self, other):
+ return divmodresult
+
+ for divmodresult in [None, (), (0, 1, 2), (0, -1)]:
+ # The following examples should not crash.
+ try:
+ timedelta(microseconds=BadInt(1))
+ except TypeError:
+ pass
+ try:
+ timedelta(hours=BadInt(1))
+ except TypeError:
+ pass
+ try:
+ timedelta(weeks=BadInt(1))
+ except (TypeError, ValueError):
+ pass
+ try:
+ timedelta(1) * BadInt(1)
+ except (TypeError, ValueError):
+ pass
+ try:
+ BadInt(1) * timedelta(1)
+ except TypeError:
+ pass
+ try:
+ timedelta(1) // BadInt(1)
+ except TypeError:
+ pass
+
+
#############################################################################
# date tests
diff --git a/lib-python/2.7/test/test_decimal.py b/lib-python/2.7/test/test_decimal.py
index 14b7f42f56..f481075cee 100644
--- a/lib-python/2.7/test/test_decimal.py
+++ b/lib-python/2.7/test/test_decimal.py
@@ -914,10 +914,10 @@ class DecimalFormatTest(unittest.TestCase):
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\xd9\xab':
- self.skipTest('inappropriate decimal point separator'
+ self.skipTest('inappropriate decimal point separator '
'({!r} not {!r})'.format(decimal_point, '\xd9\xab'))
if thousands_sep != '\xd9\xac':
- self.skipTest('inappropriate thousands separator'
+ self.skipTest('inappropriate thousands separator '
'({!r} not {!r})'.format(thousands_sep, '\xd9\xac'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
diff --git a/lib-python/2.7/test/test_deque.py b/lib-python/2.7/test/test_deque.py
index 2ab3f21c6a..b5a1248065 100644
--- a/lib-python/2.7/test/test_deque.py
+++ b/lib-python/2.7/test/test_deque.py
@@ -663,6 +663,21 @@ class TestSubclass(unittest.TestCase):
d1 == d2 # not clear if this is supposed to be True or False,
# but it used to give a SystemError
+ @test_support.cpython_only
+ def test_bug_31608(self):
+ # The interpreter used to crash in specific cases where a deque
+ # subclass returned a non-deque.
+ class X(deque):
+ pass
+ d = X()
+ def bad___new__(cls, *args, **kwargs):
+ return [42]
+ X.__new__ = bad___new__
+ with self.assertRaises(TypeError):
+ d * 42 # shouldn't crash
+ with self.assertRaises(TypeError):
+ d + deque([1, 2, 3]) # shouldn't crash
+
class SubclassWithKwargs(deque):
def __init__(self, newarg=1):
diff --git a/lib-python/2.7/test/test_descr.py b/lib-python/2.7/test/test_descr.py
index a8555dd50e..354d3693b2 100644
--- a/lib-python/2.7/test/test_descr.py
+++ b/lib-python/2.7/test/test_descr.py
@@ -1,15 +1,22 @@
import __builtin__
+import copy
import gc
+import pickle
import sys
import types
import unittest
import popen2 # trigger early the warning from popen2.py
+import warnings
import weakref
from copy import deepcopy
from test import test_support
+def func(*args):
+ return args
+
+
class OperatorsTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
@@ -397,6 +404,14 @@ class OperatorsTest(unittest.TestCase):
a.setstate(100)
self.assertEqual(a.getstate(), 100)
+ def test_wrap_lenfunc_bad_cast(self):
+ try:
+ large_range = xrange(sys.maxsize)
+ except OverflowError as exc:
+ self.skipTest("xrange(sys.maxsize) failed with: %s" % exc)
+ self.assertEqual(large_range.__len__(), sys.maxsize)
+
+
class ClassPropertiesAndMethods(unittest.TestCase):
def assertHasAttr(self, obj, name):
@@ -1415,6 +1430,21 @@ order (MRO) for bases """
else:
self.fail("classmethod shouldn't accept keyword args")
+ @test_support.cpython_only
+ def test_classmethod_copy_pickle(self):
+ cm = classmethod(func)
+ with test_support.check_py3k_warnings(
+ (".*classmethod", DeprecationWarning)):
+ copy.copy(cm)
+ with test_support.check_py3k_warnings(
+ (".*classmethod", DeprecationWarning)):
+ copy.deepcopy(cm)
+ for proto in range(2):
+ self.assertRaises(TypeError, pickle.dumps, cm, proto)
+ with test_support.check_py3k_warnings(
+ (".*classmethod", DeprecationWarning)):
+ pickle.dumps(cm, 2)
+
@test_support.impl_detail("the module 'xxsubtype' is internal")
def test_classmethods_in_c(self):
# Testing C-based class methods...
@@ -1463,6 +1493,21 @@ order (MRO) for bases """
self.assertEqual(d.foo(1), (d, 1))
self.assertEqual(D.foo(d, 1), (d, 1))
+ @test_support.cpython_only
+ def test_staticmethod_copy_pickle(self):
+ sm = staticmethod(func)
+ with test_support.check_py3k_warnings(
+ (".*staticmethod", DeprecationWarning)):
+ copy.copy(sm)
+ with test_support.check_py3k_warnings(
+ (".*staticmethod", DeprecationWarning)):
+ copy.deepcopy(sm)
+ for proto in range(2):
+ self.assertRaises(TypeError, pickle.dumps, sm, proto)
+ with test_support.check_py3k_warnings(
+ (".*staticmethod", DeprecationWarning)):
+ pickle.dumps(sm, 2)
+
@test_support.impl_detail("the module 'xxsubtype' is internal")
def test_staticmethods_in_c(self):
# Testing C-based static methods...
@@ -1551,6 +1596,86 @@ order (MRO) for bases """
self.assertEqual(b.foo, 3)
self.assertEqual(b.__class__, D)
+ @unittest.expectedFailure
+ def test_bad_new(self):
+ self.assertRaises(TypeError, object.__new__)
+ self.assertRaises(TypeError, object.__new__, '')
+ self.assertRaises(TypeError, list.__new__, object)
+ self.assertRaises(TypeError, object.__new__, list)
+ class C(object):
+ __new__ = list.__new__
+ self.assertRaises(TypeError, C)
+ class C(list):
+ __new__ = object.__new__
+ self.assertRaises(TypeError, C)
+
+ def test_object_new(self):
+ class A(object):
+ pass
+ object.__new__(A)
+ self.assertRaises(TypeError, object.__new__, A, 5)
+ object.__init__(A())
+ self.assertRaises(TypeError, object.__init__, A(), 5)
+
+ class A(object):
+ def __init__(self, foo):
+ self.foo = foo
+ object.__new__(A)
+ object.__new__(A, 5)
+ object.__init__(A(3))
+ self.assertRaises(TypeError, object.__init__, A(3), 5)
+
+ class A(object):
+ def __new__(cls, foo):
+ return object.__new__(cls)
+ object.__new__(A)
+ self.assertRaises(TypeError, object.__new__, A, 5)
+ object.__init__(A(3))
+ object.__init__(A(3), 5)
+
+ class A(object):
+ def __new__(cls, foo):
+ return object.__new__(cls)
+ def __init__(self, foo):
+ self.foo = foo
+ object.__new__(A)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always', DeprecationWarning)
+ a = object.__new__(A, 5)
+ self.assertEqual(type(a), A)
+ self.assertEqual(len(w), 1)
+ object.__init__(A(3))
+ a = A(3)
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter('always', DeprecationWarning)
+ object.__init__(a, 5)
+ self.assertEqual(a.foo, 3)
+ self.assertEqual(len(w), 1)
+
+ @unittest.expectedFailure
+ def test_restored_object_new(self):
+ class A(object):
+ def __new__(cls, *args, **kwargs):
+ raise AssertionError
+ self.assertRaises(AssertionError, A)
+ class B(A):
+ __new__ = object.__new__
+ def __init__(self, foo):
+ self.foo = foo
+ with warnings.catch_warnings():
+ warnings.simplefilter('error', DeprecationWarning)
+ b = B(3)
+ self.assertEqual(b.foo, 3)
+ self.assertEqual(b.__class__, B)
+ del B.__new__
+ self.assertRaises(AssertionError, B)
+ del A.__new__
+ with warnings.catch_warnings():
+ warnings.simplefilter('error', DeprecationWarning)
+ b = B(3)
+ self.assertEqual(b.foo, 3)
+ self.assertEqual(b.__class__, B)
+
def test_altmro(self):
# Testing mro() and overriding it...
class A(object):
@@ -2081,6 +2206,21 @@ order (MRO) for bases """
else:
self.fail("expected ZeroDivisionError from bad property")
+ @test_support.cpython_only
+ def test_property_copy_pickle(self):
+ p = property(func)
+ with test_support.check_py3k_warnings(
+ (".*property", DeprecationWarning)):
+ copy.copy(p)
+ with test_support.check_py3k_warnings(
+ (".*property", DeprecationWarning)):
+ copy.deepcopy(p)
+ for proto in range(2):
+ self.assertRaises(TypeError, pickle.dumps, p, proto)
+ with test_support.check_py3k_warnings(
+ (".*property", DeprecationWarning)):
+ pickle.dumps(p, 2)
+
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_properties_doc_attrib(self):
@@ -3774,6 +3914,24 @@ order (MRO) for bases """
self.assertEqual(isinstance(d, D), True)
self.assertEqual(d.foo, 1)
+ class C(object):
+ @staticmethod
+ def __new__(*args):
+ return args
+ self.assertEqual(C(1, 2), (C, 1, 2))
+ class D(C):
+ pass
+ self.assertEqual(D(1, 2), (D, 1, 2))
+
+ class C(object):
+ @classmethod
+ def __new__(*args):
+ return args
+ self.assertEqual(C(1, 2), (C, C, 1, 2))
+ class D(C):
+ pass
+ self.assertEqual(D(1, 2), (D, D, 1, 2))
+
def test_imul_bug(self):
# Testing for __imul__ problems...
# SF bug 544647
diff --git a/lib-python/2.7/test/test_dict.py b/lib-python/2.7/test/test_dict.py
index 8e25f4e9cc..32bd6fa34e 100644
--- a/lib-python/2.7/test/test_dict.py
+++ b/lib-python/2.7/test/test_dict.py
@@ -3,6 +3,7 @@ from test import test_support
import UserDict, random, string
import gc, weakref
+import sys
class DictTest(unittest.TestCase):
@@ -423,6 +424,12 @@ class DictTest(unittest.TestCase):
d = {1: BadRepr()}
self.assertRaises(Exc, repr, d)
+ def test_repr_deep(self):
+ d = {}
+ for i in range(sys.getrecursionlimit() + 100):
+ d = {1: d}
+ self.assertRaises(RuntimeError, repr, d)
+
def test_le(self):
self.assertFalse({} < {})
self.assertFalse({1: 2} < {1L: 2L})
@@ -691,6 +698,99 @@ class DictTest(unittest.TestCase):
test_support.check_free_after_iterating(self, lambda d: iter(d.viewvalues()), dict)
test_support.check_free_after_iterating(self, lambda d: iter(d.viewitems()), dict)
+ @test_support.cpython_only
+ def test_equal_operator_modifying_operand(self):
+ # test fix for seg fault reported in issue 27945 part 3.
+ class X(object):
+ def __del__(self):
+ dict_b.clear()
+
+ def __eq__(self, other):
+ dict_a.clear()
+ return True
+
+ def __hash__(self):
+ return 13
+
+ dict_a = {X(): 0}
+ dict_b = {X(): X()}
+ self.assertTrue(dict_a == dict_b)
+
+ def test_fromkeys_operator_modifying_dict_operand(self):
+ # test fix for seg fault reported in issue 27945 part 4a.
+ class X(int):
+ def __hash__(self):
+ return 13
+
+ def __eq__(self, other):
+ if len(d) > 1:
+ d.clear()
+ return False
+
+ d = {} # this is required to exist so that d can be constructed!
+ d = {X(1): 1, X(2): 2}
+ try:
+ dict.fromkeys(d) # shouldn't crash
+ except RuntimeError: # implementation defined
+ pass
+
+ def test_fromkeys_operator_modifying_set_operand(self):
+ # test fix for seg fault reported in issue 27945 part 4b.
+ class X(int):
+ def __hash__(self):
+ return 13
+
+ def __eq__(self, other):
+ if len(d) > 1:
+ d.clear()
+ return False
+
+ d = {} # this is required to exist so that d can be constructed!
+ d = {X(1), X(2)}
+ try:
+ dict.fromkeys(d) # shouldn't crash
+ except RuntimeError: # implementation defined
+ pass
+
+ def test_dictitems_contains_use_after_free(self):
+ class X(object):
+ def __eq__(self, other):
+ d.clear()
+ return NotImplemented
+
+ __hash__ = object.__hash__ # silence Py3k warning
+
+ d = {0: set()}
+ try:
+ (0, X()) in d.iteritems() # shouldn't crash
+ except RuntimeError: # implementation defined
+ pass
+
+ def test_init_use_after_free(self):
+ class X(object):
+ def __hash__(self):
+ pair[:] = []
+ return 13
+
+ pair = [X(), 123]
+ dict([pair])
+
+ def test_oob_indexing_dictiter_iternextitem(self):
+ class X(int):
+ def __del__(self):
+ d.clear()
+
+ d = {i: X(i) for i in range(8)}
+
+ def iter_and_mutate():
+ for result in d.iteritems():
+ if result[0] == 2:
+ d[2] = None # free d[2] --> X(2).__del__ was called
+ gc.collect()
+
+ self.assertRaises(RuntimeError, iter_and_mutate)
+
+
from test import mapping_tests
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
diff --git a/lib-python/2.7/test/test_difflib.py b/lib-python/2.7/test/test_difflib.py
index 35f2c36ca7..d8277b79b8 100644
--- a/lib-python/2.7/test/test_difflib.py
+++ b/lib-python/2.7/test/test_difflib.py
@@ -269,13 +269,33 @@ class TestOutputFormat(unittest.TestCase):
self.assertEqual(fmt(3,6), '4,6')
self.assertEqual(fmt(0,0), '0')
+class TestJunkAPIs(unittest.TestCase):
+ def test_is_line_junk_true(self):
+ for line in ['#', ' ', ' #', '# ', ' # ', '']:
+ self.assertTrue(difflib.IS_LINE_JUNK(line), repr(line))
+
+ def test_is_line_junk_false(self):
+ for line in ['##', ' ##', '## ', 'abc ', 'abc #', 'Mr. Moose is up!']:
+ self.assertFalse(difflib.IS_LINE_JUNK(line), repr(line))
+
+ def test_is_line_junk_REDOS(self):
+ evil_input = ('\t' * 1000000) + '##'
+ self.assertFalse(difflib.IS_LINE_JUNK(evil_input))
+
+ def test_is_character_junk_true(self):
+ for char in [' ', '\t']:
+ self.assertTrue(difflib.IS_CHARACTER_JUNK(char), repr(char))
+
+ def test_is_character_junk_false(self):
+ for char in ['a', '#', '\n', '\f', '\r', '\v']:
+ self.assertFalse(difflib.IS_CHARACTER_JUNK(char), repr(char))
def test_main():
difflib.HtmlDiff._default_prefix = 0
Doctests = doctest.DocTestSuite(difflib)
run_unittest(
TestWithAscii, TestAutojunk, TestSFpatches, TestSFbugs,
- TestOutputFormat, Doctests)
+ TestOutputFormat, TestJunkAPIs)
if __name__ == '__main__':
test_main()
diff --git a/lib-python/2.7/test/test_doctest.py b/lib-python/2.7/test/test_doctest.py
index 9afc48bb5f..fb8d316791 100644
--- a/lib-python/2.7/test/test_doctest.py
+++ b/lib-python/2.7/test/test_doctest.py
@@ -2355,7 +2355,11 @@ def test_unittest_reportflags():
Then the default eporting options are ignored:
>>> result = suite.run(unittest.TestResult())
- >>> print result.failures[0][1] # doctest: +ELLIPSIS
+
+ *NOTE*: These doctest are intentionally not placed in raw string to depict
+ the trailing whitespace using `\x20` in the diff below.
+
+ >>> print(result.failures[0][1]) # doctest: +ELLIPSIS
Traceback ...
Failed example:
favorite_color
@@ -2368,7 +2372,7 @@ def test_unittest_reportflags():
Differences (ndiff with -expected +actual):
a
- <BLANKLINE>
- +
+ +\x20
b
<BLANKLINE>
<BLANKLINE>
@@ -2717,6 +2721,47 @@ def old_test4(): """
TestResults(failed=0, attempted=4)
"""
+def test_no_trailing_whitespace_stripping():
+ r"""
+ The fancy reports had a bug for a long time where any trailing whitespace on
+ the reported diff lines was stripped, making it impossible to see the
+ differences in line reported as different that differed only in the amount of
+ trailing whitespace. The whitespace still isn't particularly visible unless
+ you use NDIFF, but at least it is now there to be found.
+
+ *NOTE*: This snippet was intentionally put inside a raw string to get rid of
+ leading whitespace error in executing the example below
+
+ >>> def f(x):
+ ... r'''
+ ... >>> print('\n'.join(['a ', 'b']))
+ ... a
+ ... b
+ ... '''
+ """
+ """
+ *NOTE*: These doctest are not placed in raw string to depict the trailing whitespace
+ using `\x20`
+
+ >>> test = doctest.DocTestFinder().find(f)[0]
+ >>> flags = doctest.REPORT_NDIFF
+ >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
+ ... # doctest: +ELLIPSIS
+ **********************************************************************
+ File ..., line 3, in f
+ Failed example:
+ print('\n'.join(['a ', 'b']))
+ Differences (ndiff with -expected +actual):
+ - a
+ + a
+ b
+ TestResults(failed=1, attempted=1)
+
+ *NOTE*: `\x20` is for checking the trailing whitespace on the +a line above.
+ We cannot use actual spaces there, as a commit hook prevents from committing
+ patches that contain trailing whitespace. More info on Issue 24746.
+ """
+
######################################################################
## Main
######################################################################
diff --git a/lib-python/2.7/test/test_ensurepip.py b/lib-python/2.7/test/test_ensurepip.py
index 3316fcfaa1..cb9d10a104 100644
--- a/lib-python/2.7/test/test_ensurepip.py
+++ b/lib-python/2.7/test/test_ensurepip.py
@@ -21,6 +21,7 @@ class EnsurepipMixin:
def setUp(self):
run_pip_patch = mock.patch("ensurepip._run_pip")
self.run_pip = run_pip_patch.start()
+ self.run_pip.return_value = 0
self.addCleanup(run_pip_patch.stop)
# Avoid side effects on the actual os module
@@ -258,7 +259,7 @@ class TestBootstrappingMainFunction(EnsurepipMixin, unittest.TestCase):
self.assertFalse(self.run_pip.called)
def test_basic_bootstrapping(self):
- ensurepip._main([])
+ exit_code = ensurepip._main([])
self.run_pip.assert_called_once_with(
[
@@ -270,6 +271,13 @@ class TestBootstrappingMainFunction(EnsurepipMixin, unittest.TestCase):
additional_paths = self.run_pip.call_args[0][1]
self.assertEqual(len(additional_paths), 2)
+ self.assertEqual(exit_code, 0)
+
+ def test_bootstrapping_error_code(self):
+ self.run_pip.return_value = 2
+ exit_code = ensurepip._main([])
+ self.assertEqual(exit_code, 2)
+
class TestUninstallationMainFunction(EnsurepipMixin, unittest.TestCase):
@@ -284,7 +292,7 @@ class TestUninstallationMainFunction(EnsurepipMixin, unittest.TestCase):
def test_basic_uninstall(self):
with fake_pip():
- ensurepip._uninstall._main([])
+ exit_code = ensurepip._uninstall._main([])
self.run_pip.assert_called_once_with(
[
@@ -293,6 +301,13 @@ class TestUninstallationMainFunction(EnsurepipMixin, unittest.TestCase):
]
)
+ self.assertEqual(exit_code, 0)
+
+ def test_uninstall_error_code(self):
+ with fake_pip():
+ self.run_pip.return_value = 2
+ exit_code = ensurepip._uninstall._main([])
+ self.assertEqual(exit_code, 2)
if __name__ == "__main__":
test.test_support.run_unittest(__name__)
diff --git a/lib-python/2.7/test/test_file.py b/lib-python/2.7/test/test_file.py
index b5996ff4f8..1de89c61c0 100644
--- a/lib-python/2.7/test/test_file.py
+++ b/lib-python/2.7/test/test_file.py
@@ -12,7 +12,9 @@ from weakref import proxy
import io
import _pyio as pyio
-from test.test_support import TESTFN, run_unittest, gc_collect
+from test.support import TESTFN, run_unittest
+from test.support import gc_collect
+from test import support
from UserList import UserList
class AutoFileTests(unittest.TestCase):
@@ -24,7 +26,7 @@ class AutoFileTests(unittest.TestCase):
def tearDown(self):
if self.f:
self.f.close()
- os.remove(TESTFN)
+ support.unlink(TESTFN)
def testWeakRefs(self):
# verify weak references
@@ -144,8 +146,12 @@ class PyAutoFileTests(AutoFileTests):
class OtherFileTests(unittest.TestCase):
+ def tearDown(self):
+ support.unlink(TESTFN)
+
def testModeStrings(self):
# check invalid mode strings
+ self.open(TESTFN, 'wb').close()
for mode in ("", "aU", "wU+"):
try:
f = self.open(TESTFN, mode)
@@ -192,7 +198,6 @@ class OtherFileTests(unittest.TestCase):
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
- os.unlink(TESTFN)
f = self.open(TESTFN, 'wb')
try:
@@ -216,7 +221,6 @@ class OtherFileTests(unittest.TestCase):
self.fail("File size after ftruncate wrong %d" % size)
finally:
f.close()
- os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
@@ -237,86 +241,82 @@ class OtherFileTests(unittest.TestCase):
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("b", b" "*100),))]
- try:
- # Prepare the testfile
- bag = self.open(TESTFN, "wb")
- bag.write(filler * nchunks)
- bag.writelines(testlines)
- bag.close()
- # Test for appropriate errors mixing read* and iteration
- for methodname, args in methods:
- f = self.open(TESTFN, 'rb')
- if next(f) != filler:
- self.fail, "Broken testfile"
- meth = getattr(f, methodname)
- meth(*args) # This simply shouldn't fail
- f.close()
-
- # Test to see if harmless (by accident) mixing of read* and
- # iteration still works. This depends on the size of the internal
- # iteration buffer (currently 8192,) but we can test it in a
- # flexible manner. Each line in the bag o' ham is 4 bytes
- # ("h", "a", "m", "\n"), so 4096 lines of that should get us
- # exactly on the buffer boundary for any power-of-2 buffersize
- # between 4 and 16384 (inclusive).
+ # Prepare the testfile
+ bag = self.open(TESTFN, "wb")
+ bag.write(filler * nchunks)
+ bag.writelines(testlines)
+ bag.close()
+ # Test for appropriate errors mixing read* and iteration
+ for methodname, args in methods:
f = self.open(TESTFN, 'rb')
- for i in range(nchunks):
- next(f)
- testline = testlines.pop(0)
- try:
- line = f.readline()
- except ValueError:
- self.fail("readline() after next() with supposedly empty "
- "iteration-buffer failed anyway")
- if line != testline:
- self.fail("readline() after next() with empty buffer "
- "failed. Got %r, expected %r" % (line, testline))
- testline = testlines.pop(0)
- buf = array("b", b"\x00" * len(testline))
+ self.assertEqual(next(f), filler)
+ meth = getattr(f, methodname)
+ meth(*args) # This simply shouldn't fail
+ f.close()
+
+ # Test to see if harmless (by accident) mixing of read* and
+ # iteration still works. This depends on the size of the internal
+ # iteration buffer (currently 8192,) but we can test it in a
+ # flexible manner. Each line in the bag o' ham is 4 bytes
+ # ("h", "a", "m", "\n"), so 4096 lines of that should get us
+ # exactly on the buffer boundary for any power-of-2 buffersize
+ # between 4 and 16384 (inclusive).
+ f = self.open(TESTFN, 'rb')
+ for i in range(nchunks):
+ next(f)
+ testline = testlines.pop(0)
+ try:
+ line = f.readline()
+ except ValueError:
+ self.fail("readline() after next() with supposedly empty "
+ "iteration-buffer failed anyway")
+ if line != testline:
+ self.fail("readline() after next() with empty buffer "
+ "failed. Got %r, expected %r" % (line, testline))
+ testline = testlines.pop(0)
+ buf = array("b", b"\x00" * len(testline))
+ try:
+ f.readinto(buf)
+ except ValueError:
+ self.fail("readinto() after next() with supposedly empty "
+ "iteration-buffer failed anyway")
+ line = buf.tostring()
+ if line != testline:
+ self.fail("readinto() after next() with empty buffer "
+ "failed. Got %r, expected %r" % (line, testline))
+
+ testline = testlines.pop(0)
+ try:
+ line = f.read(len(testline))
+ except ValueError:
+ self.fail("read() after next() with supposedly empty "
+ "iteration-buffer failed anyway")
+ if line != testline:
+ self.fail("read() after next() with empty buffer "
+ "failed. Got %r, expected %r" % (line, testline))
+ try:
+ lines = f.readlines()
+ except ValueError:
+ self.fail("readlines() after next() with supposedly empty "
+ "iteration-buffer failed anyway")
+ if lines != testlines:
+ self.fail("readlines() after next() with empty buffer "
+ "failed. Got %r, expected %r" % (line, testline))
+ # Reading after iteration hit EOF shouldn't hurt either
+ f.close()
+ f = self.open(TESTFN, 'rb')
+ try:
+ for line in f:
+ pass
try:
+ f.readline()
f.readinto(buf)
+ f.read()
+ f.readlines()
except ValueError:
- self.fail("readinto() after next() with supposedly empty "
- "iteration-buffer failed anyway")
- line = buf.tostring()
- if line != testline:
- self.fail("readinto() after next() with empty buffer "
- "failed. Got %r, expected %r" % (line, testline))
-
- testline = testlines.pop(0)
- try:
- line = f.read(len(testline))
- except ValueError:
- self.fail("read() after next() with supposedly empty "
- "iteration-buffer failed anyway")
- if line != testline:
- self.fail("read() after next() with empty buffer "
- "failed. Got %r, expected %r" % (line, testline))
- try:
- lines = f.readlines()
- except ValueError:
- self.fail("readlines() after next() with supposedly empty "
- "iteration-buffer failed anyway")
- if lines != testlines:
- self.fail("readlines() after next() with empty buffer "
- "failed. Got %r, expected %r" % (line, testline))
- # Reading after iteration hit EOF shouldn't hurt either
- f.close()
- f = self.open(TESTFN, 'rb')
- try:
- for line in f:
- pass
- try:
- f.readline()
- f.readinto(buf)
- f.read()
- f.readlines()
- except ValueError:
- self.fail("read* failed after next() consumed file")
- finally:
- f.close()
+ self.fail("read* failed after next() consumed file")
finally:
- os.unlink(TESTFN)
+ f.close()
class COtherFileTests(OtherFileTests):
open = io.open
@@ -326,14 +326,8 @@ class PyOtherFileTests(OtherFileTests):
def test_main():
- # Historically, these tests have been sloppy about removing TESTFN.
- # So get rid of it no matter what.
- try:
- run_unittest(CAutoFileTests, PyAutoFileTests,
- COtherFileTests, PyOtherFileTests)
- finally:
- if os.path.exists(TESTFN):
- os.unlink(TESTFN)
+ run_unittest(CAutoFileTests, PyAutoFileTests,
+ COtherFileTests, PyOtherFileTests)
if __name__ == '__main__':
test_main()
diff --git a/lib-python/2.7/test/test_file2k.py b/lib-python/2.7/test/test_file2k.py
index 55b92a49e5..4b91f8a172 100644
--- a/lib-python/2.7/test/test_file2k.py
+++ b/lib-python/2.7/test/test_file2k.py
@@ -670,6 +670,33 @@ class FileThreadingTests(unittest.TestCase):
self.f.writelines('')
self._test_close_open_io(io_func)
+ def test_iteration_torture(self):
+ # bpo-31530
+ with open(self.filename, "wb") as fp:
+ for i in xrange(2**20):
+ fp.write(b"0"*50 + b"\n")
+ with open(self.filename, "rb") as f:
+ def it():
+ for l in f:
+ pass
+ self._run_workers(it, 10)
+
+ def test_iteration_seek(self):
+ # bpo-31530: Crash when concurrently seek and iterate over a file.
+ with open(self.filename, "wb") as fp:
+ for i in xrange(10000):
+ fp.write(b"0"*50 + b"\n")
+ with open(self.filename, "rb") as f:
+ it = iter([1] + [0]*10) # one thread reads, others seek
+ def iterate():
+ if next(it):
+ for l in f:
+ pass
+ else:
+ for i in xrange(100):
+ f.seek(i*100, 0)
+ self._run_workers(iterate, 10)
+
@unittest.skipUnless(os.name == 'posix', 'test requires a posix system.')
class TestFileSignalEINTR(unittest.TestCase):
diff --git a/lib-python/2.7/test/test_fileio.py b/lib-python/2.7/test/test_fileio.py
index 67ee9606bc..e937b4a7b1 100644
--- a/lib-python/2.7/test/test_fileio.py
+++ b/lib-python/2.7/test/test_fileio.py
@@ -12,7 +12,7 @@ from functools import wraps
from UserList import UserList
from test.test_support import TESTFN, check_warnings, run_unittest, make_bad_fd
-from test.test_support import py3k_bytes as bytes, cpython_only
+from test.test_support import py3k_bytes as bytes, cpython_only, check_py3k_warnings
from test.test_support import gc_collect
from test.script_helper import run_python
@@ -103,6 +103,10 @@ class AutoFileTests(unittest.TestCase):
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
+ def testWriteUnicode(self):
+ with check_py3k_warnings():
+ self.f.write(u'')
+
def testRepr(self):
self.assertEqual(repr(self.f), "<_io.FileIO name=%r mode='%s'>"
% (self.f.name, self.f.mode))
@@ -216,7 +220,7 @@ class AutoFileTests(unittest.TestCase):
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
- f.write('a')
+ f.write(b'a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
diff --git a/lib-python/2.7/test/test_fnmatch.py b/lib-python/2.7/test/test_fnmatch.py
index 37ec50d71a..1438da7a08 100644
--- a/lib-python/2.7/test/test_fnmatch.py
+++ b/lib-python/2.7/test/test_fnmatch.py
@@ -2,9 +2,10 @@
from test import test_support
import unittest
+import os
-from fnmatch import fnmatch, fnmatchcase, _MAXCACHE, _cache
-from fnmatch import fnmatch, fnmatchcase, _MAXCACHE, _cache, _purge
+from fnmatch import (fnmatch, fnmatchcase, translate, filter,
+ _MAXCACHE, _cache, _purge)
class FnmatchTestCase(unittest.TestCase):
@@ -12,13 +13,13 @@ class FnmatchTestCase(unittest.TestCase):
def tearDown(self):
_purge()
- def check_match(self, filename, pattern, should_match=1, fn=fnmatch):
+ def check_match(self, filename, pattern, should_match=True, fn=fnmatch):
if should_match:
self.assertTrue(fn(filename, pattern),
"expected %r to match pattern %r"
% (filename, pattern))
else:
- self.assertTrue(not fn(filename, pattern),
+ self.assertFalse(fn(filename, pattern),
"expected %r not to match pattern %r"
% (filename, pattern))
@@ -32,15 +33,15 @@ class FnmatchTestCase(unittest.TestCase):
check('abc', '*')
check('abc', 'ab[cd]')
check('abc', 'ab[!de]')
- check('abc', 'ab[de]', 0)
- check('a', '??', 0)
- check('a', 'b', 0)
+ check('abc', 'ab[de]', False)
+ check('a', '??', False)
+ check('a', 'b', False)
# these test that '\' is handled correctly in character sets;
# see SF bug #409651
check('\\', r'[\]')
check('a', r'[!\]')
- check('\\', r'[!\]', 0)
+ check('\\', r'[!\]', False)
# test that filenames with newlines in them are handled correctly.
# http://bugs.python.org/issue6665
@@ -49,10 +50,29 @@ class FnmatchTestCase(unittest.TestCase):
check('\nfoo', 'foo*', False)
check('\n', '*')
+ def test_mix_unicode_str(self):
+ check = self.check_match
+ check('test', u'*')
+ check(u'test', '*')
+ check('test', u'*', fn=fnmatchcase)
+ check(u'test', '*', fn=fnmatchcase)
+ with test_support.check_warnings(("", UnicodeWarning), quiet=True):
+ check('test\xff', u'*\xff')
+ check(u'test\xff', '*\xff')
+ check('test\xff', u'*\xff', fn=fnmatchcase)
+ check(u'test\xff', '*\xff', fn=fnmatchcase)
+
def test_fnmatchcase(self):
check = self.check_match
- check('AbC', 'abc', 0, fnmatchcase)
- check('abc', 'AbC', 0, fnmatchcase)
+ check('abc', 'abc', True, fnmatchcase)
+ check('AbC', 'abc', False, fnmatchcase)
+ check('abc', 'AbC', False, fnmatchcase)
+ check('AbC', 'AbC', True, fnmatchcase)
+
+ check('usr/bin', 'usr/bin', True, fnmatchcase)
+ check('usr\\bin', 'usr/bin', False, fnmatchcase)
+ check('usr/bin', 'usr\\bin', False, fnmatchcase)
+ check('usr\\bin', 'usr\\bin', True, fnmatchcase)
def test_cache_clearing(self):
# check that caches do not grow too large
@@ -64,8 +84,81 @@ class FnmatchTestCase(unittest.TestCase):
self.assertLessEqual(len(_cache), _MAXCACHE)
+ @test_support.requires_unicode
+ def test_unicode(self):
+ with test_support.check_warnings(("", UnicodeWarning), quiet=True):
+ self.check_match(u'test', u'te*')
+ self.check_match(u'test\xff', u'te*\xff')
+ self.check_match(u'test'+unichr(0x20ac), u'te*'+unichr(0x20ac))
+ self.check_match(u'foo\nbar', u'foo*')
+
+ def test_case(self):
+ ignorecase = os.path.normcase('ABC') == os.path.normcase('abc')
+ check = self.check_match
+ check('abc', 'abc')
+ check('AbC', 'abc', ignorecase)
+ check('abc', 'AbC', ignorecase)
+ check('AbC', 'AbC')
+
+ def test_sep(self):
+ normsep = os.path.normcase('\\') == os.path.normcase('/')
+ check = self.check_match
+ check('usr/bin', 'usr/bin')
+ check('usr\\bin', 'usr/bin', normsep)
+ check('usr/bin', 'usr\\bin', normsep)
+ check('usr\\bin', 'usr\\bin')
+
+
+class TranslateTestCase(unittest.TestCase):
+
+ def test_translate(self):
+ self.assertEqual(translate('*'), r'.*\Z(?ms)')
+ self.assertEqual(translate('?'), r'.\Z(?ms)')
+ self.assertEqual(translate('a?b*'), r'a.b.*\Z(?ms)')
+ self.assertEqual(translate('[abc]'), r'[abc]\Z(?ms)')
+ self.assertEqual(translate('[]]'), r'[]]\Z(?ms)')
+ self.assertEqual(translate('[!x]'), r'[^x]\Z(?ms)')
+ self.assertEqual(translate('[^x]'), r'[\^x]\Z(?ms)')
+ self.assertEqual(translate('[x'), r'\[x\Z(?ms)')
+
+
+class FilterTestCase(unittest.TestCase):
+
+ def test_filter(self):
+ self.assertEqual(filter(['Python', 'Ruby', 'Perl', 'Tcl'], 'P*'),
+ ['Python', 'Perl'])
+ self.assertEqual(filter([u'Python', u'Ruby', u'Perl', u'Tcl'], u'P*'),
+ [u'Python', u'Perl'])
+ with test_support.check_warnings(("", UnicodeWarning), quiet=True):
+ self.assertEqual(filter([u'test\xff'], u'*\xff'), [u'test\xff'])
+
+ @test_support.requires_unicode
+ def test_mix_bytes_str(self):
+ with test_support.check_warnings(("", UnicodeWarning), quiet=True):
+ self.assertEqual(filter(['test'], u'*'), ['test'])
+ self.assertEqual(filter([u'test'], '*'), [u'test'])
+ self.assertEqual(filter(['test\xff'], u'*'), ['test\xff'])
+ self.assertEqual(filter([u'test\xff'], '*'), [u'test\xff'])
+ self.assertEqual(filter(['test\xff'], u'*\xff'), ['test\xff'])
+ self.assertEqual(filter([u'test\xff'], '*\xff'), [u'test\xff'])
+
+ def test_case(self):
+ ignorecase = os.path.normcase('P') == os.path.normcase('p')
+ self.assertEqual(filter(['Test.py', 'Test.rb', 'Test.PL'], '*.p*'),
+ ['Test.py', 'Test.PL'] if ignorecase else ['Test.py'])
+ self.assertEqual(filter(['Test.py', 'Test.rb', 'Test.PL'], '*.P*'),
+ ['Test.py', 'Test.PL'] if ignorecase else ['Test.PL'])
+
+ def test_sep(self):
+ normsep = os.path.normcase('\\') == os.path.normcase('/')
+ self.assertEqual(filter(['usr/bin', 'usr', 'usr\\lib'], 'usr/*'),
+ ['usr/bin', 'usr\\lib'] if normsep else ['usr/bin'])
+ self.assertEqual(filter(['usr/bin', 'usr', 'usr\\lib'], 'usr\\*'),
+ ['usr/bin', 'usr\\lib'] if normsep else ['usr\\lib'])
+
+
def test_main():
- test_support.run_unittest(FnmatchTestCase)
+ test_support.run_unittest(FnmatchTestCase, TranslateTestCase, FilterTestCase)
if __name__ == "__main__":
diff --git a/lib-python/2.7/test/test_fpformat.py b/lib-python/2.7/test/test_fpformat.py
index e6de3b0c11..428623ebb3 100644
--- a/lib-python/2.7/test/test_fpformat.py
+++ b/lib-python/2.7/test/test_fpformat.py
@@ -67,6 +67,16 @@ class FpformatTest(unittest.TestCase):
else:
self.fail("No exception on non-numeric sci")
+ def test_REDOS(self):
+ # This attack string will hang on the old decoder pattern.
+ attack = '+0' + ('0' * 1000000) + '++'
+ digs = 5 # irrelevant
+
+ # fix returns input if it does not decode
+ self.assertEqual(fpformat.fix(attack, digs), attack)
+ # sci raises NotANumber
+ with self.assertRaises(NotANumber):
+ fpformat.sci(attack, digs)
def test_main():
run_unittest(FpformatTest)
diff --git a/lib-python/2.7/test/test_ftplib.py b/lib-python/2.7/test/test_ftplib.py
index 05e2595e71..416ee46439 100644
--- a/lib-python/2.7/test/test_ftplib.py
+++ b/lib-python/2.7/test/test_ftplib.py
@@ -218,12 +218,18 @@ class DummyFTPServer(asyncore.dispatcher, threading.Thread):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
- self.bind(address)
- self.listen(5)
- self.active = False
- self.active_lock = threading.Lock()
- self.host, self.port = self.socket.getsockname()[:2]
- self.handler_instance = None
+ try:
+ self.bind(address)
+ self.listen(5)
+ self.active = False
+ self.active_lock = threading.Lock()
+ self.host, self.port = self.socket.getsockname()[:2]
+ self.handler_instance = None
+ except:
+ # unregister the server on bind() error,
+ # needed by TestIPv6Environment.setUpClass()
+ self.del_channel()
+ raise
def start(self):
assert not self.active
@@ -439,6 +445,9 @@ class TestFTPClass(TestCase):
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
+ self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r\n0')
+ self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\n0')
+ self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r0')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
@@ -672,6 +681,7 @@ class TestTLS_FTPClass(TestCase):
# clear text
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
+ self.assertEqual(sock.recv(1024), LIST_DATA.encode('ascii'))
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
@@ -679,6 +689,9 @@ class TestTLS_FTPClass(TestCase):
self.client.prot_p()
sock = self.client.transfercmd('list')
self.assertIsInstance(sock, ssl.SSLSocket)
+ # consume from SSL socket to finalize handshake and avoid
+ # "SSLError [SSL] shutdown while in init"
+ self.assertEqual(sock.recv(1024), LIST_DATA.encode('ascii'))
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
@@ -686,6 +699,7 @@ class TestTLS_FTPClass(TestCase):
self.client.prot_c()
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
+ self.assertEqual(sock.recv(1024), LIST_DATA.encode('ascii'))
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
@@ -707,11 +721,11 @@ class TestTLS_FTPClass(TestCase):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
- self.client.ssl_version = ssl.PROTOCOL_TLSv1
+ self.client.ssl_version = ssl.PROTOCOL_TLS
def test_context(self):
self.client.quit()
- ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
+ ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
@@ -736,7 +750,7 @@ class TestTLS_FTPClass(TestCase):
def test_check_hostname(self):
self.client.quit()
- ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
+ ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = True
ctx.load_verify_locations(CAFILE)
diff --git a/lib-python/2.7/test/test_functools.py b/lib-python/2.7/test/test_functools.py
index 1279737480..05ead7aafa 100644
--- a/lib-python/2.7/test/test_functools.py
+++ b/lib-python/2.7/test/test_functools.py
@@ -602,6 +602,67 @@ class TestTotalOrdering(unittest.TestCase):
with self.assertRaises(TypeError):
TestTO(8) <= ()
+ def test_bug_25732(self):
+ @functools.total_ordering
+ class A:
+ def __init__(self, value):
+ self.value = value
+ def __gt__(self, other):
+ return self.value > other.value
+ def __eq__(self, other):
+ return self.value == other.value
+ def __hash__(self):
+ return hash(self.value)
+ self.assertTrue(A(1) != A(2))
+ self.assertFalse(A(1) != A(1))
+
+ @functools.total_ordering
+ class A(object):
+ def __init__(self, value):
+ self.value = value
+ def __gt__(self, other):
+ return self.value > other.value
+ def __eq__(self, other):
+ return self.value == other.value
+ def __hash__(self):
+ return hash(self.value)
+ self.assertTrue(A(1) != A(2))
+ self.assertFalse(A(1) != A(1))
+
+ @functools.total_ordering
+ class A:
+ def __init__(self, value):
+ self.value = value
+ def __gt__(self, other):
+ return self.value > other.value
+ def __eq__(self, other):
+ return self.value == other.value
+ def __ne__(self, other):
+ raise RuntimeError(self, other)
+ def __hash__(self):
+ return hash(self.value)
+ with self.assertRaises(RuntimeError):
+ A(1) != A(2)
+ with self.assertRaises(RuntimeError):
+ A(1) != A(1)
+
+ @functools.total_ordering
+ class A(object):
+ def __init__(self, value):
+ self.value = value
+ def __gt__(self, other):
+ return self.value > other.value
+ def __eq__(self, other):
+ return self.value == other.value
+ def __ne__(self, other):
+ raise RuntimeError(self, other)
+ def __hash__(self):
+ return hash(self.value)
+ with self.assertRaises(RuntimeError):
+ A(1) != A(2)
+ with self.assertRaises(RuntimeError):
+ A(1) != A(1)
+
def test_main(verbose=None):
test_classes = (
TestPartial,
diff --git a/lib-python/2.7/test/test_gc.py b/lib-python/2.7/test/test_gc.py
index ed01c9802f..7e47b2d3a2 100644
--- a/lib-python/2.7/test/test_gc.py
+++ b/lib-python/2.7/test/test_gc.py
@@ -1,5 +1,6 @@
import unittest
-from test.test_support import verbose, run_unittest, start_threads
+from test.support import (verbose, run_unittest, start_threads,
+ requires_type_collecting)
import sys
import time
import gc
@@ -90,6 +91,7 @@ class GCTests(unittest.TestCase):
del a
self.assertNotEqual(gc.collect(), 0)
+ @requires_type_collecting
def test_newinstance(self):
class A(object):
pass
diff --git a/lib-python/2.7/test/test_gdb.py b/lib-python/2.7/test/test_gdb.py
index 38e570f4bf..b96acc0988 100644
--- a/lib-python/2.7/test/test_gdb.py
+++ b/lib-python/2.7/test/test_gdb.py
@@ -3,13 +3,14 @@
# The code for testing gdb was adapted from similar work in Unladen Swallow's
# Lib/test/test_jit_gdb.py
+import locale
import os
import re
import subprocess
import sys
import sysconfig
+import textwrap
import unittest
-import sysconfig
from test import test_support
from test.test_support import run_unittest, findfile
@@ -57,6 +58,23 @@ if sys.platform.startswith("sunos"):
checkout_hook_path = os.path.join(os.path.dirname(sys.executable),
'python-gdb.py')
+
+def cet_protection():
+ cflags = sysconfig.get_config_var('CFLAGS')
+ if not cflags:
+ return False
+ flags = cflags.split()
+ # True if "-mcet -fcf-protection" options are found, but false
+ # if "-fcf-protection=none" or "-fcf-protection=return" is found.
+ return (('-mcet' in flags)
+ and any((flag.startswith('-fcf-protection')
+ and not flag.endswith(("=none", "=return")))
+ for flag in flags))
+
+# Control-flow enforcement technology
+CET_PROTECTION = cet_protection()
+
+
def run_gdb(*args, **env_vars):
"""Runs gdb in batch mode with the additional arguments given by *args.
@@ -170,6 +188,12 @@ class DebuggerTests(unittest.TestCase):
commands += ['set print entry-values no']
if cmds_after_breakpoint:
+ if CET_PROTECTION:
+ # bpo-32962: When Python is compiled with -mcet
+ # -fcf-protection, function arguments are unusable before
+ # running the first instruction of the function entry point.
+ # The 'next' command makes the required first step.
+ commands += ['next']
commands += cmds_after_breakpoint
else:
commands += ['backtrace']
@@ -191,44 +215,22 @@ class DebuggerTests(unittest.TestCase):
elif script:
args += [script]
- # print args
- # print ' '.join(args)
-
# Use "args" to invoke gdb, capturing stdout, stderr:
out, err = run_gdb(*args, PYTHONHASHSEED='0')
- errlines = err.splitlines()
- unexpected_errlines = []
-
- # Ignore some benign messages on stderr.
- ignore_patterns = (
- 'Function "%s" not defined.' % breakpoint,
- "warning: no loadable sections found in added symbol-file"
- " system-supplied DSO",
- "warning: Unable to find libthread_db matching"
- " inferior's thread library, thread debugging will"
- " not be available.",
- "warning: Cannot initialize thread debugging"
- " library: Debugger service failed",
- 'warning: Could not load shared library symbols for '
- 'linux-vdso.so',
- 'warning: Could not load shared library symbols for '
- 'linux-gate.so',
- 'warning: Could not load shared library symbols for '
- 'linux-vdso64.so',
- 'Do you need "set solib-search-path" or '
- '"set sysroot"?',
- 'warning: Source file is more recent than executable.',
- # Issue #19753: missing symbols on System Z
- 'Missing separate debuginfo for ',
- 'Try: zypper install -C ',
- )
- for line in errlines:
- if not line.startswith(ignore_patterns):
- unexpected_errlines.append(line)
+ for line in err.splitlines():
+ print >>sys.stderr, line
+
+ # bpo-34007: Sometimes some versions of the shared libraries that
+ # are part of the traceback are compiled in optimised mode and the
+ # Program Counter (PC) is not present, not allowing gdb to walk the
+ # frames back. When this happens, the Python bindings of gdb raise
+ # an exception, making the test impossible to succeed.
+ if "PC not saved" in err:
+ raise unittest.SkipTest("gdb cannot walk the frame object"
+ " because the Program Counter is"
+ " not present")
- # Ensure no unexpected error messages:
- self.assertEqual(unexpected_errlines, [])
return out
def get_gdb_repr(self, source,
@@ -241,6 +243,11 @@ class DebuggerTests(unittest.TestCase):
#
# For a nested structure, the first time we hit the breakpoint will
# give us the top-level structure
+
+ # NOTE: avoid decoding too much of the traceback as some
+ # undecodable characters may lurk there in optimized mode
+ # (issue #19743).
+ cmds_after_breakpoint = cmds_after_breakpoint or ["backtrace 1"]
gdb_output = self.get_stack_trace(source, breakpoint='PyObject_Print',
cmds_after_breakpoint=cmds_after_breakpoint,
import_site=import_site)
@@ -395,7 +402,7 @@ except RuntimeError, e:
# Test division by zero:
gdb_repr, gdb_output = self.get_gdb_repr('''
try:
- a = 1 / 0
+ a = 1 // 0
except ZeroDivisionError, e:
print e
''')
@@ -873,7 +880,32 @@ print 42
breakpoint='time_gmtime',
cmds_after_breakpoint=['py-bt-full'],
)
- self.assertIn('#0 <built-in function gmtime', gdb_output)
+ self.assertIn('#1 <built-in function gmtime', gdb_output)
+
+ @unittest.skipIf(python_is_optimized(),
+ "Python was compiled with optimizations")
+ def test_wrapper_call(self):
+ cmd = textwrap.dedent('''
+ class MyList(list):
+ def __init__(self):
+ super(MyList, self).__init__() # wrapper_call()
+
+ print("first break point")
+ l = MyList()
+ ''')
+ cmds_after_breakpoint = ['break wrapper_call', 'continue']
+ if CET_PROTECTION:
+ # bpo-32962: same case as in get_stack_trace():
+ # we need an additional 'next' command in order to read
+ # arguments of the innermost function of the call stack.
+ cmds_after_breakpoint.append('next')
+ cmds_after_breakpoint.append('py-bt')
+
+ # Verify with "py-bt":
+ gdb_output = self.get_stack_trace(cmd,
+ cmds_after_breakpoint=cmds_after_breakpoint)
+ self.assertRegexpMatches(gdb_output,
+ r"<method-wrapper u?'__init__' of MyList object at ")
class PyPrintTests(DebuggerTests):
diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py
index 9896569eac..85fbcc75e2 100644
--- a/lib-python/2.7/test/test_gdbm.py
+++ b/lib-python/2.7/test/test_gdbm.py
@@ -62,9 +62,13 @@ class TestGdbm(unittest.TestCase):
self.g = gdbm.open(filename, 'c')
size0 = os.path.getsize(filename)
- self.g['x'] = 'x' * 10000
+ # bpo-33901: on macOS with gdbm 1.15, an empty database uses 16 MiB
+ # and adding an entry of 10,000 B has no effect on the file size.
+ # Add size0 bytes to make sure that the file size changes.
+ value_size = max(size0, 10000)
+ self.g['x'] = 'x' * value_size
size1 = os.path.getsize(filename)
- self.assertTrue(size0 < size1)
+ self.assertGreater(size1, size0)
del self.g['x']
# 'size' is supposed to be the same even after deleting an entry.
@@ -72,7 +76,8 @@ class TestGdbm(unittest.TestCase):
self.g.reorganize()
size2 = os.path.getsize(filename)
- self.assertTrue(size1 > size2 >= size0)
+ self.assertLess(size2, size1)
+ self.assertGreaterEqual(size2, size0)
def test_sync(self):
# check if sync works at all, not sure how to check it
diff --git a/lib-python/2.7/test/test_generators.py b/lib-python/2.7/test/test_generators.py
index 7e30a224a0..330a0f7116 100644
--- a/lib-python/2.7/test/test_generators.py
+++ b/lib-python/2.7/test/test_generators.py
@@ -1898,6 +1898,16 @@ test_generators just happened to be the test that drew these out.
"""
+crash_test = """
+>>> def foo(): yield
+>>> gen = foo()
+>>> gen.next()
+>>> print gen.gi_frame.f_restricted # This would segfault.
+False
+
+"""
+
+
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
@@ -1907,6 +1917,7 @@ __test__ = {"tut": tutorial_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
"refleaks": refleaks_tests,
+ "crash": crash_test,
}
# Magic test name that regrtest.py invokes *after* importing this module.
diff --git a/lib-python/2.7/test/test_getargs2.py b/lib-python/2.7/test/test_getargs2.py
index e0efcaae9c..7f819fb10c 100644
--- a/lib-python/2.7/test/test_getargs2.py
+++ b/lib-python/2.7/test/test_getargs2.py
@@ -1,9 +1,10 @@
import unittest
import math
+import string
import sys
from test import test_support
# Skip this test if the _testcapi module isn't available.
-test_support.import_module('_testcapi')
+_testcapi = test_support.import_module('_testcapi')
from _testcapi import getargs_keywords
import warnings
@@ -799,12 +800,6 @@ class Unicode_TestCase(unittest.TestCase):
self.assertRaises(TypeError, getargs_u_hash, None)
-def test_main():
- tests = [Signed_TestCase, Unsigned_TestCase, LongLong_TestCase,
- Tuple_TestCase, Keywords_TestCase,
- Bytes_TestCase, String_TestCase, Unicode_TestCase]
- test_support.run_unittest(*tests)
-
class Object_TestCase(unittest.TestCase):
def test_S(self):
from _testcapi import getargs_S
@@ -840,5 +835,142 @@ class Object_TestCase(unittest.TestCase):
self.assertRaises(TypeError, getargs_U, buffer(obj))
+class SkipitemTest(unittest.TestCase):
+
+ def test_skipitem(self):
+ """
+ If this test failed, you probably added a new "format unit"
+ in Python/getargs.c, but neglected to update our poor friend
+ skipitem() in the same file. (If so, shame on you!)
+
+ With a few exceptions**, this function brute-force tests all
+ printable ASCII*** characters (32 to 126 inclusive) as format units,
+ checking to see that PyArg_ParseTupleAndKeywords() return consistent
+ errors both when the unit is attempted to be used and when it is
+ skipped. If the format unit doesn't exist, we'll get one of two
+ specific error messages (one for used, one for skipped); if it does
+ exist we *won't* get that error--we'll get either no error or some
+ other error. If we get the specific "does not exist" error for one
+ test and not for the other, there's a mismatch, and the test fails.
+
+ ** Some format units have special funny semantics and it would
+ be difficult to accommodate them here. Since these are all
+ well-established and properly skipped in skipitem() we can
+ get away with not testing them--this test is really intended
+ to catch *new* format units.
+
+ *** Python C source files must be ASCII. Therefore it's impossible
+ to have non-ASCII format units.
+
+ """
+ empty_tuple = ()
+ tuple_1 = (0,)
+ dict_b = {'b':1}
+ keywords = ["a", "b"]
+
+ for i in range(32, 127):
+ c = chr(i)
+
+ # skip parentheses, the error reporting is inconsistent about them
+ # skip 'e', it's always a two-character code
+ # skip '|', it doesn't represent arguments anyway
+ if c in '()e|':
+ continue
+
+ # test the format unit when not skipped
+ format = c + "i"
+ try:
+ _testcapi.parse_tuple_and_keywords(tuple_1, dict_b,
+ format, keywords)
+ when_not_skipped = False
+ except TypeError as e:
+ s = "argument 1 (impossible<bad format char>)"
+ when_not_skipped = (str(e) == s)
+ except RuntimeError:
+ when_not_skipped = False
+
+ # test the format unit when skipped
+ optional_format = "|" + format
+ try:
+ _testcapi.parse_tuple_and_keywords(empty_tuple, dict_b,
+ optional_format, keywords)
+ when_skipped = False
+ except RuntimeError as e:
+ s = "impossible<bad format char>: '{}'".format(format)
+ when_skipped = (str(e) == s)
+
+ message = ("test_skipitem_parity: "
+ "detected mismatch between convertsimple and skipitem "
+ "for format unit '{}' ({}), not skipped {}, skipped {}".format(
+ c, i, when_skipped, when_not_skipped))
+ self.assertIs(when_skipped, when_not_skipped, message)
+
+ def test_skipitem_with_suffix(self):
+ parse = _testcapi.parse_tuple_and_keywords
+ empty_tuple = ()
+ tuple_1 = (0,)
+ dict_b = {'b':1}
+ keywords = ["a", "b"]
+
+ supported = ('s#', 's*', 'z#', 'z*', 'u#', 't#', 'w#', 'w*')
+ for c in string.ascii_letters:
+ for c2 in '#*':
+ f = c + c2
+ optional_format = "|" + f + "i"
+ if f in supported:
+ parse(empty_tuple, dict_b, optional_format, keywords)
+ else:
+ with self.assertRaisesRegexp((RuntimeError, TypeError),
+ 'impossible<bad format char>'):
+ parse(empty_tuple, dict_b, optional_format, keywords)
+
+ for c in map(chr, range(32, 128)):
+ f = 'e' + c
+ optional_format = "|" + f + "i"
+ if c in 'st':
+ parse(empty_tuple, dict_b, optional_format, keywords)
+ else:
+ with self.assertRaisesRegexp(RuntimeError,
+ 'impossible<bad format char>'):
+ parse(empty_tuple, dict_b, optional_format, keywords)
+
+
+class ParseTupleAndKeywords_Test(unittest.TestCase):
+
+ def test_parse_tuple_and_keywords(self):
+ # Test handling errors in the parse_tuple_and_keywords helper itself
+ self.assertRaises(TypeError, _testcapi.parse_tuple_and_keywords,
+ (), {}, 42, [])
+ self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
+ (), {}, '', 42)
+ self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
+ (), {}, '', [''] * 42)
+ self.assertRaises(TypeError, _testcapi.parse_tuple_and_keywords,
+ (), {}, '', [42])
+
+ def test_bad_use(self):
+ # Test handling invalid format and keywords in
+ # PyArg_ParseTupleAndKeywords()
+ self.assertRaises(TypeError, _testcapi.parse_tuple_and_keywords,
+ (1,), {}, '||O', ['a'])
+ self.assertRaises(RuntimeError, _testcapi.parse_tuple_and_keywords,
+ (1,), {}, '|O', ['a', 'b'])
+ self.assertRaises(RuntimeError, _testcapi.parse_tuple_and_keywords,
+ (1,), {}, '|OO', ['a'])
+
+
+class Test_testcapi(unittest.TestCase):
+ locals().update((name, getattr(_testcapi, name))
+ for name in dir(_testcapi)
+ if name.startswith('test_') and name.endswith('_code'))
+
+
+def test_main():
+ tests = [Signed_TestCase, Unsigned_TestCase, LongLong_TestCase,
+ Tuple_TestCase, Keywords_TestCase,
+ Bytes_TestCase, String_TestCase, Unicode_TestCase,
+ SkipitemTest, ParseTupleAndKeywords_Test, Test_testcapi]
+ test_support.run_unittest(*tests)
+
if __name__ == "__main__":
test_main()
diff --git a/lib-python/2.7/test/test_glob.py b/lib-python/2.7/test/test_glob.py
index b360d09dfa..84b2e7ddd2 100644
--- a/lib-python/2.7/test/test_glob.py
+++ b/lib-python/2.7/test/test_glob.py
@@ -49,10 +49,10 @@ class GlobTests(unittest.TestCase):
pattern = os.path.join(*parts)
p = os.path.join(self.tempdir, pattern)
res = glob.glob(p)
- self.assertEqual(list(glob.iglob(p)), res)
+ self.assertItemsEqual(glob.iglob(p), res)
ures = [fsdecode(x) for x in res]
- self.assertEqual(glob.glob(fsdecode(p)), ures)
- self.assertEqual(list(glob.iglob(fsdecode(p))), ures)
+ self.assertItemsEqual(glob.glob(fsdecode(p)), ures)
+ self.assertItemsEqual(glob.iglob(fsdecode(p)), ures)
return res
def assertSequencesEqual_noorder(self, l1, l2):
diff --git a/lib-python/2.7/test/test_grammar.py b/lib-python/2.7/test/test_grammar.py
index 5f77c1d018..228586ece0 100644
--- a/lib-python/2.7/test/test_grammar.py
+++ b/lib-python/2.7/test/test_grammar.py
@@ -5,13 +5,14 @@ from test.test_support import run_unittest, check_syntax_error, \
check_py3k_warnings
import unittest
import sys
+import warnings
# testing import *
from sys import *
class TokenTests(unittest.TestCase):
- def testBackslash(self):
+ def test_backslash(self):
# Backslash means line continuation:
x = 1 \
+ 1
@@ -21,29 +22,42 @@ class TokenTests(unittest.TestCase):
x = 0
self.assertEqual(x, 0, 'backslash ending comment')
- def testPlainIntegers(self):
+ def test_plain_integers(self):
+ self.assertEqual(type(000), type(0))
self.assertEqual(0xff, 255)
self.assertEqual(0377, 255)
+ self.assertEqual(0o377, 255)
self.assertEqual(2147483647, 017777777777)
+ self.assertEqual(2147483647, 0o17777777777)
+ self.assertEqual(0b1001, 9)
# "0x" is not a valid literal
self.assertRaises(SyntaxError, eval, "0x")
from sys import maxint
if maxint == 2147483647:
- self.assertEqual(-2147483647-1, -020000000000)
+ self.assertEqual(-2147483647-1, -0o20000000000)
# XXX -2147483648
self.assertTrue(037777777777 > 0)
+ self.assertTrue(0o37777777777 > 0)
self.assertTrue(0xffffffff > 0)
- for s in '2147483648', '040000000000', '0x100000000':
+ self.assertTrue(0b1111111111111111111111111111111 > 0)
+ for s in ('2147483648', '040000000000', '0o40000000000',
+ '0x100000000',
+ '0b10000000000000000000000000000000'):
try:
x = eval(s)
except OverflowError:
self.fail("OverflowError on huge integer literal %r" % s)
elif maxint == 9223372036854775807:
self.assertEqual(-9223372036854775807-1, -01000000000000000000000)
+ self.assertEqual(-9223372036854775807-1, -0o1000000000000000000000)
self.assertTrue(01777777777777777777777 > 0)
+ self.assertTrue(0o1777777777777777777777 > 0)
self.assertTrue(0xffffffffffffffff > 0)
+ self.assertTrue(0b11111111111111111111111111111111111111111111111111111111111111 > 0)
for s in '9223372036854775808', '02000000000000000000000', \
- '0x10000000000000000':
+ '0o2000000000000000000000', \
+ '0x10000000000000000', \
+ '0b100000000000000000000000000000000000000000000000000000000000000':
try:
x = eval(s)
except OverflowError:
@@ -51,7 +65,7 @@ class TokenTests(unittest.TestCase):
else:
self.fail('Weird maxint value %r' % maxint)
- def testLongIntegers(self):
+ def test_long_integers(self):
x = 0L
x = 0l
x = 0xffffffffffffffffL
@@ -61,7 +75,7 @@ class TokenTests(unittest.TestCase):
x = 123456789012345678901234567890L
x = 123456789012345678901234567890l
- def testFloats(self):
+ def test_floats(self):
x = 3.14
x = 314.
x = 0.314
@@ -81,7 +95,7 @@ class TokenTests(unittest.TestCase):
self.assertEqual(1 if 0else 0, 0)
self.assertRaises(SyntaxError, eval, "0 if 1Else 0")
- def testStringLiterals(self):
+ def test_string_literals(self):
x = ''; y = ""; self.assertTrue(len(x) == 0 and x == y)
x = '\''; y = "'"; self.assertTrue(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; self.assertTrue(len(x) == 1 and x == y and ord(x) == 34)
@@ -133,11 +147,11 @@ class GrammarTests(unittest.TestCase):
# expr_input: testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
- def testEvalInput(self):
+ def test_eval_input(self):
# testlist ENDMARKER
x = eval('1, 0 or 1')
- def testFuncdef(self):
+ def test_funcdef(self):
### 'def' NAME parameters ':' suite
### parameters: '(' [varargslist] ')'
### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
@@ -153,9 +167,10 @@ class GrammarTests(unittest.TestCase):
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
- # Silence Py3k warning
- exec('def f4(two, (compound, (argument, list))): pass')
- exec('def f5((compound, first), two): pass')
+ with check_py3k_warnings(('tuple parameter unpacking has been removed',
+ SyntaxWarning)):
+ exec('def f4(two, (compound, (argument, list))): pass')
+ exec('def f5((compound, first), two): pass')
self.assertEqual(f2.func_code.co_varnames, ('one_argument',))
self.assertEqual(f3.func_code.co_varnames, ('two', 'arguments'))
if sys.platform.startswith('java'):
@@ -174,8 +189,9 @@ class GrammarTests(unittest.TestCase):
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
- # Silence Py3k warning
- exec('def v3(a, (b, c), *rest): return a, b, c, rest')
+ with check_py3k_warnings(('tuple parameter unpacking has been removed',
+ SyntaxWarning)):
+ exec('def v3(a, (b, c), *rest): return a, b, c, rest')
f1()
f2(1)
@@ -212,7 +228,9 @@ class GrammarTests(unittest.TestCase):
d01()
d01(1)
d01(*(1,))
+ d01(*[] or [2])
d01(**{'a':2})
+ d01(**{'a':2} or {})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
@@ -280,9 +298,12 @@ class GrammarTests(unittest.TestCase):
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
- # Silence Py3k warning
- exec('def d31v((x)): pass')
- exec('def d32v((x,)): pass')
+ with check_py3k_warnings(('parenthesized argument names are invalid',
+ SyntaxWarning)):
+ exec('def d31v((x)): pass')
+ with check_py3k_warnings(('tuple parameter unpacking has been removed',
+ SyntaxWarning)):
+ exec('def d32v((x,)): pass')
d31v(1)
d32v((1,))
@@ -293,12 +314,19 @@ class GrammarTests(unittest.TestCase):
{'x':2, 'y':5}))
self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
+ self.assertEqual(f(**{'eggs':'scrambled', 'spam':'fried'}),
+ ((), {'eggs':'scrambled', 'spam':'fried'}))
+ self.assertEqual(f(spam='fried', **{'eggs':'scrambled'}),
+ ((), {'eggs':'scrambled', 'spam':'fried'}))
# Check ast errors in *args and *kwargs
check_syntax_error(self, "f(*g(1=2))")
check_syntax_error(self, "f(**g(1=2))")
- def testLambdef(self):
+ # Check trailing commas are permitted in funcdef argument list
+ def f(a,): pass
+
+ def test_lambdef(self):
### lambdef: 'lambda' [varargslist] ':' test
l1 = lambda : 0
self.assertEqual(l1(), 0)
@@ -311,12 +339,18 @@ class GrammarTests(unittest.TestCase):
self.assertEqual(l5(1, 2), 5)
self.assertEqual(l5(1, 2, 3), 6)
check_syntax_error(self, "lambda x: x = 2")
- check_syntax_error(self, "lambda (None,): None")
+ with check_py3k_warnings(('tuple parameter unpacking has been removed',
+ SyntaxWarning)):
+ check_syntax_error(self, "lambda (None,): None")
+
+ # check that trailing commas are permitted
+ l10 = lambda a,: 0
+
### stmt: simple_stmt | compound_stmt
# Tested below
- def testSimpleStmt(self):
+ def test_simple_stmt(self):
### simple_stmt: small_stmt (';' small_stmt)* [';']
x = 1; pass; del x
def foo():
@@ -327,7 +361,7 @@ class GrammarTests(unittest.TestCase):
### small_stmt: expr_stmt | print_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
# Tested below
- def testExprStmt(self):
+ def test_expr_stmt(self):
# (exprlist '=')* exprlist
1
1, 2, 3
@@ -340,7 +374,7 @@ class GrammarTests(unittest.TestCase):
check_syntax_error(self, "x + 1 = 1")
check_syntax_error(self, "a + 1 = b + 2")
- def testPrintStmt(self):
+ def test_print_stmt(self):
# 'print' (test ',')* [test]
import StringIO
@@ -410,7 +444,7 @@ hello world
check_syntax_error(self, 'print ,')
check_syntax_error(self, 'print >> x,')
- def testDelStmt(self):
+ def test_del_stmt(self):
# 'del' exprlist
abc = [1,2,3]
x, y, z = abc
@@ -419,18 +453,18 @@ hello world
del abc
del x, y, (z, xyz)
- def testPassStmt(self):
+ def test_pass_stmt(self):
# 'pass'
pass
# flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
- def testBreakStmt(self):
+ def test_break_stmt(self):
# 'break'
while 1: break
- def testContinueStmt(self):
+ def test_continue_stmt(self):
# 'continue'
i = 1
while i: i = 0; continue
@@ -482,7 +516,7 @@ hello world
self.fail("continue then break in try/except in loop broken!")
test_inner()
- def testReturn(self):
+ def test_return(self):
# 'return' [testlist]
def g1(): return
def g2(): return 1
@@ -490,17 +524,150 @@ hello world
x = g2()
check_syntax_error(self, "class foo:return 1")
- def testYield(self):
- check_syntax_error(self, "class foo:yield 1")
+ def test_break_in_finally(self):
+ count = 0
+ while count < 2:
+ count += 1
+ try:
+ pass
+ finally:
+ break
+ self.assertEqual(count, 1)
- def testRaise(self):
+ count = 0
+ while count < 2:
+ count += 1
+ try:
+ continue
+ finally:
+ break
+ self.assertEqual(count, 1)
+
+ count = 0
+ while count < 2:
+ count += 1
+ try:
+ 1.0/0.0
+ finally:
+ break
+ self.assertEqual(count, 1)
+
+ for count in [0, 1]:
+ self.assertEqual(count, 0)
+ try:
+ pass
+ finally:
+ break
+ self.assertEqual(count, 0)
+
+ for count in [0, 1]:
+ self.assertEqual(count, 0)
+ try:
+ continue
+ finally:
+ break
+ self.assertEqual(count, 0)
+
+ for count in [0, 1]:
+ self.assertEqual(count, 0)
+ try:
+ 1.0/0.0
+ finally:
+ break
+ self.assertEqual(count, 0)
+
+ def test_return_in_finally(self):
+ def g1():
+ try:
+ pass
+ finally:
+ return 1
+ self.assertEqual(g1(), 1)
+
+ def g2():
+ try:
+ return 2
+ finally:
+ return 3
+ self.assertEqual(g2(), 3)
+
+ def g3():
+ try:
+ 1.0/0.0
+ finally:
+ return 4
+ self.assertEqual(g3(), 4)
+
+ def test_yield(self):
+ # Allowed as standalone statement
+ def g(): yield 1
+ # Allowed as RHS of assignment
+ def g(): x = yield 1
+ # Ordinary yield accepts implicit tuples
+ def g(): yield 1, 1
+ def g(): x = yield 1, 1
+ # Requires parentheses as subexpression
+ def g(): 1, (yield 1)
+ check_syntax_error(self, "def g(): 1, yield 1")
+ # Requires parentheses as call argument
+ def g(): f((yield 1))
+ def g(): f((yield 1), 1)
+ check_syntax_error(self, "def g(): f(yield 1)")
+ check_syntax_error(self, "def g(): f(yield 1, 1)")
+ # Not allowed at top level
+ check_syntax_error(self, "yield")
+ # Not allowed at class scope
+ check_syntax_error(self, "class foo:yield 1")
+ # Check annotation refleak on SyntaxError
+ check_syntax_error(self, "def g(a:(yield)): pass")
+
+ def test_yield_in_comprehensions(self):
+ # Check yield in comprehensions
+ def g(): [x for x in [(yield 1)]]
+
+ def check(code, warntext):
+ with check_py3k_warnings((warntext, DeprecationWarning)):
+ compile(code, '<test string>', 'exec')
+ if sys.py3kwarning:
+ with warnings.catch_warnings():
+ warnings.filterwarnings('error', category=DeprecationWarning)
+ with self.assertRaises(SyntaxError) as cm:
+ compile(code, '<test string>', 'exec')
+ self.assertIn(warntext, str(cm.exception))
+
+ check("def g(): [(yield x) for x in ()]",
+ "'yield' inside list comprehension")
+ check("def g(): [x for x in () if not (yield x)]",
+ "'yield' inside list comprehension")
+ check("def g(): [y for x in () for y in [(yield x)]]",
+ "'yield' inside list comprehension")
+ check("def g(): {(yield x) for x in ()}",
+ "'yield' inside set comprehension")
+ check("def g(): {(yield x): x for x in ()}",
+ "'yield' inside dict comprehension")
+ check("def g(): {x: (yield x) for x in ()}",
+ "'yield' inside dict comprehension")
+ check("def g(): ((yield x) for x in ())",
+ "'yield' inside generator expression")
+ with check_py3k_warnings(("'yield' inside list comprehension",
+ DeprecationWarning)):
+ check_syntax_error(self, "class C: [(yield x) for x in ()]")
+ check("class C: ((yield x) for x in ())",
+ "'yield' inside generator expression")
+ with check_py3k_warnings(("'yield' inside list comprehension",
+ DeprecationWarning)):
+ check_syntax_error(self, "[(yield x) for x in ()]")
+ check("((yield x) for x in ())",
+ "'yield' inside generator expression")
+
+ def test_raise(self):
# 'raise' test [',' test]
try: raise RuntimeError, 'just testing'
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
- def testImport(self):
+ def test_import(self):
# 'import' dotted_as_names
import sys
import time, sys
@@ -513,13 +680,13 @@ hello world
from sys import (path, argv)
from sys import (path, argv,)
- def testGlobal(self):
+ def test_global(self):
# 'global' NAME (',' NAME)*
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
- def testExec(self):
+ def test_exec(self):
# 'exec' expr ['in' expr [',' expr]]
z = None
del z
@@ -551,7 +718,7 @@ hello world
if (g, l) != ({'a':1}, {'b':2}):
self.fail('exec ... in g (%s), l (%s)' %(g,l))
- def testAssert(self):
+ def test_assert(self):
# assertTruestmt: 'assert' test [',' test]
assert 1
assert 1, 1
@@ -590,7 +757,7 @@ hello world
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
- def testIf(self):
+ def test_if(self):
# 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
@@ -603,7 +770,7 @@ hello world
elif 0: pass
else: pass
- def testWhile(self):
+ def test_while(self):
# 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
@@ -618,7 +785,7 @@ hello world
x = 2
self.assertEqual(x, 2)
- def testFor(self):
+ def test_for(self):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
@@ -645,30 +812,30 @@ hello world
result.append(x)
self.assertEqual(result, [1, 2, 3])
- def testTry(self):
+ def test_try(self):
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr [('as' | ',') expr]]
try:
- 1/0
+ 1/0.0
except ZeroDivisionError:
pass
else:
pass
- try: 1/0
+ try: 1/0.0
except EOFError: pass
except TypeError as msg: pass
except RuntimeError, msg: pass
except: pass
else: pass
- try: 1/0
+ try: 1/0.0
except (EOFError, TypeError, ZeroDivisionError): pass
- try: 1/0
+ try: 1/0.0
except (EOFError, TypeError, ZeroDivisionError), msg: pass
try: pass
finally: pass
- def testSuite(self):
+ def test_suite(self):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
@@ -683,7 +850,7 @@ hello world
pass
#
- def testTest(self):
+ def test_test(self):
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
@@ -694,7 +861,7 @@ hello world
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
- def testComparison(self):
+ def test_comparison(self):
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
@@ -710,40 +877,56 @@ hello world
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in 1 is 1 is not 1: pass
- # Silence Py3k warning
- if eval('1 <> 1'): pass
- if eval('1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1'): pass
-
- def testBinaryMaskOps(self):
+ with check_py3k_warnings(('<> not supported in 3.x; use !=',
+ DeprecationWarning)):
+ if eval('1 <> 1'): pass
+ with check_py3k_warnings(('<> not supported in 3.x; use !=',
+ DeprecationWarning)):
+ if eval('1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1'): pass
+ if sys.py3kwarning:
+ with warnings.catch_warnings():
+ warnings.filterwarnings('error', category=DeprecationWarning)
+ with self.assertRaises(DeprecationWarning) as cm:
+ compile('1 <> 1', '<test string>', 'eval')
+ self.assertIn('<> not supported in 3.x; use !=',
+ str(cm.exception))
+
+ def test_binary_mask_ops(self):
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
- def testShiftOps(self):
+ def test_shift_ops(self):
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
- def testAdditiveOps(self):
+ def test_additive_ops(self):
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
- def testMultiplicativeOps(self):
+ def test_multiplicative_ops(self):
x = 1 * 1
- x = 1 / 1
+ with check_py3k_warnings(('classic int division', DeprecationWarning)):
+ x = 1 / 1
+ x = 1 / 1.0
x = 1 % 1
- x = 1 / 1 * 1 % 1
+ with check_py3k_warnings(('classic int division', DeprecationWarning)):
+ x = 1 / 1 * 1 % 1
+ x = 1 / 1.0 * 1 % 1
- def testUnaryOps(self):
+ def test_unary_ops(self):
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
- x = -1*1/1 + 1*1 - ---1*1
+ with check_py3k_warnings(('classic int division', DeprecationWarning)):
+ x = -1*1/1 + 1*1 - ---1*1
+ x = -1*1/1.0 + 1*1 - ---1*1
- def testSelectors(self):
+ def test_selectors(self):
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
@@ -770,10 +953,10 @@ hello world
d[1,2] = 3
d[1,2,3] = 4
L = list(d)
- L.sort()
+ L.sort(key=lambda x: (type(x).__name__, x))
self.assertEqual(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
- def testAtoms(self):
+ def test_atoms(self):
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
### dictorsetmaker: (test ':' test (',' test ':' test)* [',']) | (test (',' test)* [','])
@@ -800,10 +983,10 @@ hello world
x = {'one', 'two', 'three'}
x = {2, 3, 4,}
- # Silence Py3k warning
- x = eval('`x`')
- x = eval('`1 or 2 or 3`')
- self.assertEqual(eval('`1,2`'), '(1, 2)')
+ with check_py3k_warnings(('backquote not supported', SyntaxWarning)):
+ x = eval('`x`')
+ x = eval('`1 or 2 or 3`')
+ self.assertEqual(eval('`1,2`'), '(1, 2)')
x = x
x = 'x'
@@ -813,7 +996,7 @@ hello world
### testlist: test (',' test)* [',']
# These have been exercised enough above
- def testClassdef(self):
+ def test_classdef(self):
# 'class' NAME ['(' [testlist] ')'] ':' suite
class B: pass
class B2(): pass
@@ -824,6 +1007,7 @@ hello world
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
+
# decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
# decorators: decorator+
# decorated: decorators (classdef | funcdef)
@@ -835,14 +1019,14 @@ hello world
pass
self.assertEqual(G.decorated, True)
- def testDictcomps(self):
+ def test_dictcomps(self):
# dictorsetmaker: ( (test ':' test (comp_for |
# (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [','])) )
nums = [1, 2, 3]
self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4})
- def testListcomps(self):
+ def test_listcomps(self):
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
@@ -865,7 +1049,7 @@ hello world
[[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
def test_in_func(l):
- return [None < x < 3 for x in l if x > 2]
+ return [0 < x < 3 for x in l if x > 2]
self.assertEqual(test_in_func(nums), [False, False, False])
@@ -905,7 +1089,7 @@ hello world
self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
('Macdonalds', 'Cheeseburger')])
- def testGenexps(self):
+ def test_genexps(self):
# generator expression tests
g = ([x for x in range(10)] for x in range(1))
self.assertEqual(g.next(), [x for x in range(10)])
@@ -940,7 +1124,7 @@ hello world
check_syntax_error(self, "foo(x for x in range(10), 100)")
check_syntax_error(self, "foo(100, x for x in range(10))")
- def testComprehensionSpecials(self):
+ def test_comprehension_specials(self):
# test for outmost iterable precomputation
x = 10; g = (i for i in range(x)); x = 5
self.assertEqual(len(list(g)), 10)
@@ -979,11 +1163,11 @@ hello world
with manager() as x, manager():
pass
- def testIfElseExpr(self):
+ def test_if_else_expr(self):
# Test ifelse expressions in various cases
def _checkeval(msg, ret):
"helper to check that evaluation of expressions is done correctly"
- print x
+ print(msg)
return ret
self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
@@ -1002,7 +1186,8 @@ hello world
self.assertEqual((6 + 1 if 1 else 2), 7)
self.assertEqual((6 - 1 if 1 else 2), 5)
self.assertEqual((6 * 2 if 1 else 4), 12)
- self.assertEqual((6 / 2 if 1 else 3), 3)
+ with check_py3k_warnings(('classic int division', DeprecationWarning)):
+ self.assertEqual((6 / 2 if 1 else 3), 3)
self.assertEqual((6 < 4 if 0 else 2), 2)
def test_paren_evaluation(self):
@@ -1015,13 +1200,7 @@ hello world
def test_main():
- with check_py3k_warnings(
- ("backquote not supported", SyntaxWarning),
- ("tuple parameter unpacking has been removed", SyntaxWarning),
- ("parenthesized argument names are invalid", SyntaxWarning),
- ("classic int division", DeprecationWarning),
- (".+ not supported in 3.x", DeprecationWarning)):
- run_unittest(TokenTests, GrammarTests)
+ run_unittest(TokenTests, GrammarTests)
if __name__ == '__main__':
test_main()
diff --git a/lib-python/2.7/test/test_gzip.py b/lib-python/2.7/test/test_gzip.py
index 5025b91d34..cdb1af5c3d 100644
--- a/lib-python/2.7/test/test_gzip.py
+++ b/lib-python/2.7/test/test_gzip.py
@@ -6,6 +6,7 @@ from test import test_support
import os
import io
import struct
+import tempfile
gzip = test_support.import_module('gzip')
data1 = """ int length=DEFAULTALLOC, err = Z_OK;
@@ -331,6 +332,32 @@ class TestGzip(unittest.TestCase):
with gzip.GzipFile(fileobj=f, mode="w") as g:
self.assertEqual(g.name, "")
+ def test_fileobj_from_io_open(self):
+ fd = os.open(self.filename, os.O_WRONLY | os.O_CREAT)
+ with io.open(fd, "wb") as f:
+ with gzip.GzipFile(fileobj=f, mode="w") as g:
+ self.assertEqual(g.name, "")
+
+ def test_fileobj_mode(self):
+ gzip.GzipFile(self.filename, "wb").close()
+ with open(self.filename, "r+b") as f:
+ with gzip.GzipFile(fileobj=f, mode='r') as g:
+ self.assertEqual(g.mode, gzip.READ)
+ with gzip.GzipFile(fileobj=f, mode='w') as g:
+ self.assertEqual(g.mode, gzip.WRITE)
+ with gzip.GzipFile(fileobj=f, mode='a') as g:
+ self.assertEqual(g.mode, gzip.WRITE)
+ with self.assertRaises(IOError):
+ gzip.GzipFile(fileobj=f, mode='z')
+ for mode in "rb", "r+b":
+ with open(self.filename, mode) as f:
+ with gzip.GzipFile(fileobj=f) as g:
+ self.assertEqual(g.mode, gzip.READ)
+ for mode in "wb", "ab":
+ with open(self.filename, mode) as f:
+ with gzip.GzipFile(fileobj=f) as g:
+ self.assertEqual(g.mode, gzip.WRITE)
+
def test_read_with_extra(self):
# Gzip data with an extra field
gzdata = (b'\x1f\x8b\x08\x04\xb2\x17cQ\x02\xff'
@@ -339,6 +366,14 @@ class TestGzip(unittest.TestCase):
with gzip.GzipFile(fileobj=io.BytesIO(gzdata)) as f:
self.assertEqual(f.read(), b'Test')
+ def test_fileobj_without_name(self):
+ # Issue #33038: GzipFile should not assume that file objects that have
+ # a .name attribute use a non-None value.
+ with tempfile.SpooledTemporaryFile() as f:
+ with gzip.GzipFile(fileobj=f, mode='wb') as archive:
+ archive.write(b'data')
+ self.assertEqual(archive.name, '')
+
def test_main(verbose=None):
test_support.run_unittest(TestGzip)
diff --git a/lib-python/2.7/test/test_hashlib.py b/lib-python/2.7/test/test_hashlib.py
index 471ebb4dd1..b8d6388fea 100644
--- a/lib-python/2.7/test/test_hashlib.py
+++ b/lib-python/2.7/test/test_hashlib.py
@@ -371,25 +371,25 @@ class HashLibTestCase(unittest.TestCase):
data = smallest_data*200000
expected_hash = hashlib.sha1(data*num_threads).hexdigest()
- def hash_in_chunks(chunk_size, event):
+ def hash_in_chunks(chunk_size):
index = 0
while index < len(data):
hasher.update(data[index:index+chunk_size])
index += chunk_size
- event.set()
- events = []
+ threads = []
for threadnum in xrange(num_threads):
chunk_size = len(data) // (10**threadnum)
assert chunk_size > 0
assert chunk_size % len(smallest_data) == 0
- event = threading.Event()
- events.append(event)
- threading.Thread(target=hash_in_chunks,
- args=(chunk_size, event)).start()
-
- for event in events:
- event.wait()
+ thread = threading.Thread(target=hash_in_chunks,
+ args=(chunk_size,))
+ threads.append(thread)
+
+ for thread in threads:
+ thread.start()
+ for thread in threads:
+ thread.join()
self.assertEqual(expected_hash, hasher.hexdigest())
diff --git a/lib-python/2.7/test/test_httpservers.py b/lib-python/2.7/test/test_httpservers.py
index 11f0d5d614..25236c6ef4 100644
--- a/lib-python/2.7/test/test_httpservers.py
+++ b/lib-python/2.7/test/test_httpservers.py
@@ -55,8 +55,8 @@ class TestServerThread(threading.Thread):
self.test_object = test_object
def run(self):
- self.server = HTTPServer(('', 0), self.request_handler)
- self.test_object.PORT = self.server.socket.getsockname()[1]
+ self.server = HTTPServer(('localhost', 0), self.request_handler)
+ self.test_object.HOST, self.test_object.PORT = self.server.socket.getsockname()
self.test_object.server_started.set()
self.test_object = None
try:
@@ -66,6 +66,7 @@ class TestServerThread(threading.Thread):
def stop(self):
self.server.shutdown()
+ self.join()
class BaseTestCase(unittest.TestCase):
@@ -83,7 +84,7 @@ class BaseTestCase(unittest.TestCase):
test_support.threading_cleanup(*self._threads)
def request(self, uri, method='GET', body=None, headers={}):
- self.connection = httplib.HTTPConnection('localhost', self.PORT)
+ self.connection = httplib.HTTPConnection(self.HOST, self.PORT)
self.connection.request(method, uri, body, headers)
return self.connection.getresponse()
@@ -186,7 +187,7 @@ class BaseHTTPServerTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
- self.con = httplib.HTTPConnection('localhost', self.PORT)
+ self.con = httplib.HTTPConnection(self.HOST, self.PORT)
self.con.connect()
def test_command(self):
diff --git a/lib-python/2.7/test/test_imaplib.py b/lib-python/2.7/test/test_imaplib.py
index 405b7ea8dd..acaad63b6a 100644
--- a/lib-python/2.7/test/test_imaplib.py
+++ b/lib-python/2.7/test/test_imaplib.py
@@ -166,14 +166,18 @@ class BaseThreadedNetworkedTests(unittest.TestCase):
def test_linetoolong(self):
+ maxline = 10
+
class TooLongHandler(SimpleIMAPHandler):
def handle(self):
# Send a very long response line
- self.wfile.write('* OK ' + imaplib._MAXLINE*'x' + '\r\n')
+ self.wfile.write('* OK ' + maxline * 'x' + '\r\n')
- with self.reaped_server(TooLongHandler) as server:
- self.assertRaises(imaplib.IMAP4.error,
- self.imap_class, *server.server_address)
+ with self.reaped_server(TooLongHandler) as server, \
+ support.swap_attr(imaplib, '_MAXLINE', maxline):
+ with self.assertRaisesRegexp(imaplib.IMAP4.error,
+ 'got more than 10 bytes'):
+ self.imap_class(*server.server_address)
class ThreadedNetworkedTests(BaseThreadedNetworkedTests):
@@ -187,9 +191,6 @@ class ThreadedNetworkedTestsSSL(BaseThreadedNetworkedTests):
server_class = SecureTCPServer
imap_class = IMAP4_SSL
- def test_linetoolong(self):
- raise unittest.SkipTest("test is not reliable on 2.7; see issue 20118")
-
class RemoteIMAPTest(unittest.TestCase):
host = 'cyrus.andrew.cmu.edu'
diff --git a/lib-python/2.7/test/test_import.py b/lib-python/2.7/test/test_import.py
index 767e5f06e1..b0cc589279 100644
--- a/lib-python/2.7/test/test_import.py
+++ b/lib-python/2.7/test/test_import.py
@@ -410,20 +410,19 @@ class ImportTests(unittest.TestCase):
def test_replace_parent_in_sys_modules(self):
dir_name = os.path.abspath(TESTFN)
os.mkdir(dir_name)
- try:
- pkg_dir = os.path.join(dir_name, 'sa')
- os.mkdir(pkg_dir)
- with open(os.path.join(pkg_dir, '__init__.py'), 'w') as init_file:
- init_file.write("import v1")
- with open(os.path.join(pkg_dir, 'v1.py'), 'w') as v1_file:
- v1_file.write("import sys;"
- "sys.modules['sa'] = sys.modules[__name__];"
- "import sa")
- sys.path.insert(0, dir_name)
- # a segfault means the test failed!
- import sa
- finally:
- rmtree(dir_name)
+ self.addCleanup(rmtree, dir_name)
+ pkg_dir = os.path.join(dir_name, 'sa')
+ os.mkdir(pkg_dir)
+ with open(os.path.join(pkg_dir, '__init__.py'), 'w') as init_file:
+ init_file.write("import v1")
+ with open(os.path.join(pkg_dir, 'v1.py'), 'w') as v1_file:
+ v1_file.write("import sys;"
+ "sys.modules['sa'] = sys.modules[__name__];"
+ "import sa")
+ sys.path.insert(0, dir_name)
+ self.addCleanup(sys.path.pop, 0)
+ # a segfault means the test failed!
+ import sa
def test_fromlist_type(self):
with self.assertRaises(TypeError) as cm:
@@ -560,7 +559,7 @@ class PathsTests(unittest.TestCase):
try:
os.listdir(unc)
except OSError as e:
- if e.errno in (errno.EPERM, errno.EACCES):
+ if e.errno in (errno.EPERM, errno.EACCES, errno.ENOENT):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
diff --git a/lib-python/2.7/test/test_import_magic.py b/lib-python/2.7/test/test_import_magic.py
new file mode 100644
index 0000000000..68ada0f64a
--- /dev/null
+++ b/lib-python/2.7/test/test_import_magic.py
@@ -0,0 +1,60 @@
+import imp
+import sys
+
+import unittest
+
+# Note:
+# In Python 3.x, this test case is in Lib/test/test_importlib/test_util.py
+
+class MagicNumberTests(unittest.TestCase):
+ """
+ Test release compatibility issues relating to precompiled bytecode
+ """
+ @unittest.skipUnless(
+ sys.version_info.releaselevel in ('candidate', 'final'),
+ 'only applies to candidate or final python release levels'
+ )
+ def test_magic_number(self):
+ """
+ Each python minor release should generally have a MAGIC_NUMBER
+ that does not change once the release reaches candidate status.
+
+ Once a release reaches candidate status, the value of the constant
+ EXPECTED_MAGIC_NUMBER in this test should be changed.
+ This test will then check that the actual MAGIC_NUMBER matches
+ the expected value for the release.
+
+ In exceptional cases, it may be required to change the MAGIC_NUMBER
+ for a maintenance release. In this case the change should be
+ discussed in python-dev. If a change is required, community
+ stakeholders such as OS package maintainers must be notified
+ in advance. Such exceptional releases will then require an
+ adjustment to this test case.
+ """
+ EXPECTED_MAGIC_NUMBER = 62211
+ raw_magic = imp.get_magic()
+ actual = (ord(raw_magic[1]) << 8) + ord(raw_magic[0])
+
+ msg = (
+ "To avoid breaking backwards compatibility with cached bytecode "
+ "files that can't be automatically regenerated by the current "
+ "user, candidate and final releases require the current "
+ "importlib.util.MAGIC_NUMBER to match the expected "
+ "magic number in this test. Set the expected "
+ "magic number in this test to the current MAGIC_NUMBER to "
+ "continue with the release.\n\n"
+ "Changing the MAGIC_NUMBER for a maintenance release "
+ "requires discussion in python-dev and notification of "
+ "community stakeholders."
+ )
+ # PyPy uses cPython magic + 7 (see pypy/module/imp/importing.py)
+ EXPECTED_MAGIC_NUMBER += 7
+ self.assertEqual(EXPECTED_MAGIC_NUMBER, actual)#, msg)
+
+
+def test_main():
+ from test.support import run_unittest
+ run_unittest(MagicNumberTests)
+
+if __name__ == '__main__':
+ test_main()
diff --git a/lib-python/2.7/test/test_inspect.py b/lib-python/2.7/test/test_inspect.py
index fc91b4c51d..daed32cbd8 100644
--- a/lib-python/2.7/test/test_inspect.py
+++ b/lib-python/2.7/test/test_inspect.py
@@ -4,11 +4,13 @@ import types
import unittest
import inspect
import linecache
+import datetime
+import textwrap
from UserList import UserList
from UserDict import UserDict
-from test.test_support import run_unittest, check_py3k_warnings, have_unicode
-from test.test_support import check_impl_detail
+from test.support import run_unittest, check_py3k_warnings, have_unicode
+from test.support import check_impl_detail
with check_py3k_warnings(
("tuple parameter unpacking has been removed", SyntaxWarning),
@@ -105,7 +107,7 @@ class TestPredicates(IsTestBase):
# with an app-level slot.
self.istest(inspect.ismemberdescriptor, 'ExampleClassWithSlot.myslot')
else:
- self.assertFalse(inspect.ismemberdescriptor(type(lambda: None).func_globals))
+ self.assertFalse(inspect.ismemberdescriptor(datetime.timedelta.days))
def test_isroutine(self):
self.assertTrue(inspect.isroutine(mod.spam))
@@ -216,7 +218,7 @@ class GetSourceBase(unittest.TestCase):
def sourcerange(self, top, bottom):
lines = self.source.split("\n")
- return "\n".join(lines[top-1:bottom]) + "\n"
+ return "\n".join(lines[top-1:bottom]) + ("\n" if bottom else "")
def assertSourceEqual(self, obj, top, bottom):
self.assertEqual(inspect.getsource(obj),
@@ -338,6 +340,16 @@ class TestRetrievingSourceCode(GetSourceBase):
finally:
linecache.getlines = getlines
+class TestGettingSourceOfToplevelFrames(GetSourceBase):
+ fodderFile = mod
+
+ def test_range_toplevel_frame(self):
+ self.maxDiff = None
+ self.assertSourceEqual(mod.currentframe, 1, None)
+
+ def test_range_traceback_toplevel_frame(self):
+ self.assertSourceEqual(mod.tb, 1, None)
+
class TestDecorators(GetSourceBase):
fodderFile = mod2
@@ -510,6 +522,20 @@ class TestClassesAndFunctions(unittest.TestCase):
'g', 'h', (3, (4, (5,))),
'(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h)')
+ with check_py3k_warnings(("tuple parameter unpacking has been removed",
+ SyntaxWarning),
+ quiet=True):
+ exec(textwrap.dedent('''
+ def spam_deref(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h):
+ def eggs():
+ return a + b + c + d + e + f + g + h
+ return eggs
+ '''))
+ self.assertArgSpecEquals(spam_deref,
+ ['a', 'b', 'c', 'd', ['e', ['f']]],
+ 'g', 'h', (3, (4, (5,))),
+ '(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h)')
+
def test_getargspec_method(self):
class A(object):
def m(self):
@@ -523,9 +549,15 @@ class TestClassesAndFunctions(unittest.TestCase):
exec 'def sublistOfOne((foo,)): return 1'
self.assertArgSpecEquals(sublistOfOne, [['foo']])
+ exec 'def sublistOfOne((foo,)): return (lambda: foo)'
+ self.assertArgSpecEquals(sublistOfOne, [['foo']])
+
exec 'def fakeSublistOfOne((foo)): return 1'
self.assertArgSpecEquals(fakeSublistOfOne, ['foo'])
+ exec 'def sublistOfOne((foo)): return (lambda: foo)'
+ self.assertArgSpecEquals(sublistOfOne, ['foo'])
+
def _classify_test(self, newstyle):
"""Helper for testing that classify_class_attrs finds a bunch of
@@ -829,6 +861,23 @@ class TestGetcallargsFunctions(unittest.TestCase):
self.assertEqualException(f3, '1, 2')
self.assertEqualException(f3, '1, 2, a=1, b=2')
+
+class TestGetcallargsFunctionsCellVars(TestGetcallargsFunctions):
+
+ def makeCallable(self, signature):
+ """Create a function that returns its locals(), excluding the
+ autogenerated '.1', '.2', etc. tuple param names (if any)."""
+ with check_py3k_warnings(
+ ("tuple parameter unpacking has been removed", SyntaxWarning),
+ quiet=True):
+ code = """lambda %s: (
+ (lambda: a+b+c+d+d+e+f+g+h), # make parameters cell vars
+ dict(i for i in locals().items()
+ if not is_tuplename(i[0]))
+ )[1]"""
+ return eval(code % signature, {'is_tuplename' : self.is_tuplename})
+
+
class TestGetcallargsMethods(TestGetcallargsFunctions):
def setUp(self):
@@ -866,8 +915,9 @@ def test_main():
run_unittest(
TestDecorators, TestRetrievingSourceCode, TestOneliners, TestBuggyCases,
TestInterpreterStack, TestClassesAndFunctions, TestPredicates,
- TestGetcallargsFunctions, TestGetcallargsMethods,
- TestGetcallargsUnboundMethods)
+ TestGetcallargsFunctions, TestGetcallargsFunctionsCellVars,
+ TestGetcallargsMethods, TestGetcallargsUnboundMethods,
+ TestGettingSourceOfToplevelFrames)
if __name__ == "__main__":
test_main()
diff --git a/lib-python/2.7/test/test_io.py b/lib-python/2.7/test/test_io.py
index 03bb56e1bb..ab4e001805 100644
--- a/lib-python/2.7/test/test_io.py
+++ b/lib-python/2.7/test/test_io.py
@@ -396,6 +396,22 @@ class IOTest(unittest.TestCase):
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
+ def test_readline_nonsizeable(self):
+ # Issue #30061
+ # Crash when readline() returns an object without __len__
+ class R(self.IOBase):
+ def readline(self):
+ return None
+ self.assertRaises((TypeError, StopIteration), next, R())
+
+ def test_next_nonsizeable(self):
+ # Issue #30061
+ # Crash when next() returns an object without __len__
+ class R(self.IOBase):
+ def next(self):
+ return None
+ self.assertRaises(TypeError, R().readlines, 1)
+
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
@@ -674,6 +690,16 @@ class IOTest(unittest.TestCase):
self.assertEqual(stream.readinto(buffer), 5)
self.assertEqual(buffer.tobytes(), b"12345")
+ def test_close_assert(self):
+ class R(self.IOBase):
+ def __setattr__(self, name, value):
+ pass
+ def flush(self):
+ raise OSError()
+ f = R()
+ # This would cause an assertion failure.
+ self.assertRaises(OSError, f.close)
+
class CIOTest(IOTest):
@@ -1075,6 +1101,7 @@ class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
+ self.addCleanup(support.unlink, support.TESTFN)
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
@@ -1277,6 +1304,7 @@ class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
def test_truncate(self):
# Truncate implicitly flushes the buffer.
+ self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
@@ -1383,6 +1411,7 @@ class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
+ self.addCleanup(support.unlink, support.TESTFN)
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
@@ -2653,6 +2682,22 @@ class TextIOWrapperTest(unittest.TestCase):
t = self.TextIOWrapper(NonbytesStream('a'))
self.assertEqual(t.read(), u'a')
+ def test_illegal_encoder(self):
+ # bpo-31271: A TypeError should be raised in case the return value of
+ # encoder's encode() is invalid.
+ class BadEncoder:
+ def encode(self, dummy):
+ return u'spam'
+ def get_bad_encoder(dummy):
+ return BadEncoder()
+ rot13 = codecs.lookup("rot13")
+ with support.swap_attr(rot13, '_is_text_encoding', True), \
+ support.swap_attr(rot13, 'incrementalencoder', get_bad_encoder):
+ t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
+ with self.assertRaises(TypeError):
+ t.write('bar')
+ t.flush()
+
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
@@ -2689,6 +2734,37 @@ class TextIOWrapperTest(unittest.TestCase):
#t = _make_illegal_wrapper()
#self.assertRaises(TypeError, t.read)
+ # Issue 31243: calling read() while the return value of decoder's
+ # getstate() is invalid should neither crash the interpreter nor
+ # raise a SystemError.
+ def _make_very_illegal_wrapper(getstate_ret_val):
+ class BadDecoder:
+ def getstate(self):
+ return getstate_ret_val
+ def _get_bad_decoder(dummy):
+ return BadDecoder()
+ quopri = codecs.lookup("quopri_codec")
+ with support.swap_attr(quopri, 'incrementaldecoder',
+ _get_bad_decoder):
+ return _make_illegal_wrapper()
+ t = _make_very_illegal_wrapper(42)
+ with self.maybeRaises(TypeError):
+ t.read(42)
+ t = _make_very_illegal_wrapper(())
+ with self.maybeRaises(TypeError):
+ t.read(42)
+
+ def test_issue25862(self):
+ # Assertion failures occurred in tell() after read() and write().
+ t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
+ t.read(1)
+ t.read()
+ t.tell()
+ t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
+ t.read(1)
+ t.write('x')
+ t.tell()
+
class CTextIOWrapperTest(TextIOWrapperTest):
@@ -2734,6 +2810,11 @@ class CTextIOWrapperTest(TextIOWrapperTest):
t2.buddy = t1
support.gc_collect()
+ def test_del__CHUNK_SIZE_SystemError(self):
+ t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
+ with self.assertRaises(AttributeError):
+ del t._CHUNK_SIZE
+
maybeRaises = unittest.TestCase.assertRaises
@@ -2849,6 +2930,16 @@ class IncrementalNewlineDecoderTest(unittest.TestCase):
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
+ def test_translate(self):
+ # issue 35062
+ for translate in (-2, -1, 1, 2):
+ decoder = codecs.getincrementaldecoder("utf-8")()
+ decoder = self.IncrementalNewlineDecoder(decoder, translate)
+ self.check_newline_decoding_utf8(decoder)
+ decoder = codecs.getincrementaldecoder("utf-8")()
+ decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
+ self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
+
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
@@ -2936,6 +3027,7 @@ class MiscIOTest(unittest.TestCase):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
+ self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
@@ -3121,7 +3213,6 @@ class SignalsTest(unittest.TestCase):
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
- signal.alarm(1)
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
@@ -3129,10 +3220,13 @@ class SignalsTest(unittest.TestCase):
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
try:
+ signal.alarm(1)
with self.assertRaises(ZeroDivisionError):
wio.write(item * (support.PIPE_MAX_SIZE // len(item) + 1))
finally:
+ signal.alarm(0)
t.join()
+
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
@@ -3181,6 +3275,7 @@ class SignalsTest(unittest.TestCase):
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
+ signal.alarm(0)
wio.close()
os.close(r)
@@ -3209,6 +3304,7 @@ class SignalsTest(unittest.TestCase):
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
+ signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
@@ -3270,6 +3366,7 @@ class SignalsTest(unittest.TestCase):
self.assertIsNone(error[0])
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
+ signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
diff --git a/lib-python/2.7/test/test_itertools.py b/lib-python/2.7/test/test_itertools.py
index 0863522ff3..12c29c66c5 100644
--- a/lib-python/2.7/test/test_itertools.py
+++ b/lib-python/2.7/test/test_itertools.py
@@ -10,6 +10,10 @@ import random
import copy
import pickle
from functools import reduce
+try:
+ import threading
+except ImportError:
+ threading = None
maxsize = test_support.MAX_Py_ssize_t
minsize = -maxsize-1
@@ -802,6 +806,7 @@ class TestBasicOps(unittest.TestCase):
(10, 20, 3),
(10, 3, 20),
(10, 20),
+ (10, 10),
(10, 3),
(20,)
]:
@@ -826,6 +831,10 @@ class TestBasicOps(unittest.TestCase):
self.assertEqual(list(islice(it, 3)), range(3))
self.assertEqual(list(it), range(3, 10))
+ it = iter(range(10))
+ self.assertEqual(list(islice(it, 3, 3)), [])
+ self.assertEqual(list(it), range(3, 10))
+
# Test invalid arguments
self.assertRaises(TypeError, islice, xrange(10))
self.assertRaises(TypeError, islice, xrange(10), 1, 2, 3, 4)
@@ -988,6 +997,43 @@ class TestBasicOps(unittest.TestCase):
del forward, backward
raise
+ def test_tee_reenter(self):
+ class I:
+ first = True
+ def __iter__(self):
+ return self
+ def next(self):
+ first = self.first
+ self.first = False
+ if first:
+ return next(b)
+
+ a, b = tee(I())
+ with self.assertRaisesRegexp(RuntimeError, "tee"):
+ next(a)
+
+ @unittest.skipUnless(threading, 'Threading required for this test.')
+ def test_tee_concurrent(self):
+ start = threading.Event()
+ finish = threading.Event()
+ class I:
+ def __iter__(self):
+ return self
+ def next(self):
+ start.set()
+ finish.wait()
+
+ a, b = tee(I())
+ thread = threading.Thread(target=next, args=[a])
+ thread.start()
+ try:
+ start.wait()
+ with self.assertRaisesRegexp(RuntimeError, "tee"):
+ next(b)
+ finally:
+ finish.set()
+ thread.join()
+
def test_StopIteration(self):
self.assertRaises(StopIteration, izip().next)
@@ -1093,6 +1139,48 @@ class TestExamples(unittest.TestCase):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
+class TestPurePythonRoughEquivalents(unittest.TestCase):
+
+ @staticmethod
+ def islice(iterable, *args):
+ s = slice(*args)
+ start, stop, step = s.start or 0, s.stop or sys.maxint, s.step or 1
+ it = iter(xrange(start, stop, step))
+ try:
+ nexti = next(it)
+ except StopIteration:
+ # Consume *iterable* up to the *start* position.
+ for i, element in izip(xrange(start), iterable):
+ pass
+ return
+ try:
+ for i, element in enumerate(iterable):
+ if i == nexti:
+ yield element
+ nexti = next(it)
+ except StopIteration:
+ # Consume to *stop*.
+ for i, element in izip(xrange(i + 1, stop), iterable):
+ pass
+
+ def test_islice_recipe(self):
+ self.assertEqual(list(self.islice('ABCDEFG', 2)), list('AB'))
+ self.assertEqual(list(self.islice('ABCDEFG', 2, 4)), list('CD'))
+ self.assertEqual(list(self.islice('ABCDEFG', 2, None)), list('CDEFG'))
+ self.assertEqual(list(self.islice('ABCDEFG', 0, None, 2)), list('ACEG'))
+ # Test items consumed.
+ it = iter(xrange(10))
+ self.assertEqual(list(self.islice(it, 3)), range(3))
+ self.assertEqual(list(it), range(3, 10))
+ it = iter(xrange(10))
+ self.assertEqual(list(self.islice(it, 3, 3)), [])
+ self.assertEqual(list(it), range(3, 10))
+ # Test that slice finishes in predictable state.
+ c = count()
+ self.assertEqual(list(self.islice(c, 1, 3, 50)), [1])
+ self.assertEqual(next(c), 3)
+
+
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
@@ -1475,6 +1563,39 @@ class RegressionTests(unittest.TestCase):
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
+ def test_long_chain_of_empty_iterables(self):
+ # Make sure itertools.chain doesn't run into recursion limits when
+ # dealing with long chains of empty iterables. Even with a high
+ # number this would probably only fail in Py_DEBUG mode.
+ it = chain.from_iterable(() for unused in xrange(10000000))
+ with self.assertRaises(StopIteration):
+ next(it)
+
+ def test_issue30347_1(self):
+ def f(n):
+ if n == 5:
+ list(b)
+ return n != 6
+ for (k, b) in groupby(range(10), f):
+ list(b) # shouldn't crash
+
+ def test_issue30347_2(self):
+ class K(object):
+ i = 0
+ def __init__(self, v):
+ pass
+ def __eq__(self, other):
+ K.i += 1
+ if K.i == 1:
+ next(g, None)
+ return True
+ def __hash__(self):
+ return 1
+ g = next(groupby(range(10), K))[1]
+ for j in range(2):
+ next(g, None) # shouldn't crash
+
+
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
@@ -1554,6 +1675,17 @@ Samuele
... "Return function(0), function(1), ..."
... return imap(function, count(start))
+>>> import collections
+>>> def consume(iterator, n=None):
+... "Advance the iterator n-steps ahead. If n is None, consume entirely."
+... # Use functions that consume iterators at C speed.
+... if n is None:
+... # feed the entire iterator into a zero-length deque
+... collections.deque(iterator, maxlen=0)
+... else:
+... # advance to the empty slice starting at position n
+... next(islice(iterator, n, n), None)
+
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
@@ -1655,6 +1787,14 @@ perform as purported.
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
+>>> it = iter(xrange(10))
+>>> consume(it, 3)
+>>> next(it)
+3
+>>> consume(it)
+>>> next(it, 'Done')
+'Done'
+
>>> nth('abcde', 3)
'd'
@@ -1730,7 +1870,8 @@ __test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
- SubclassWithKwargsTest, TestExamples)
+ SubclassWithKwargsTest, TestExamples,
+ TestPurePythonRoughEquivalents)
test_support.run_unittest(*test_classes)
# verify reference counting
diff --git a/lib-python/2.7/test/test_kqueue.py b/lib-python/2.7/test/test_kqueue.py
index 7f7d60d60c..0efb3b1350 100644
--- a/lib-python/2.7/test/test_kqueue.py
+++ b/lib-python/2.7/test/test_kqueue.py
@@ -36,9 +36,12 @@ class TestKQueue(unittest.TestCase):
self.assertEqual(cmp(ev, other), -1)
self.assertTrue(ev < other)
self.assertTrue(other >= ev)
- self.assertRaises(TypeError, cmp, ev, None)
- self.assertRaises(TypeError, cmp, ev, 1)
- self.assertRaises(TypeError, cmp, ev, "ev")
+ self.assertNotEqual(cmp(ev, None), 0)
+ self.assertNotEqual(cmp(ev, 1), 0)
+ self.assertNotEqual(cmp(ev, "ev"), 0)
+ self.assertEqual(cmp(ev, None), -cmp(None, ev))
+ self.assertEqual(cmp(ev, 1), -cmp(1, ev))
+ self.assertEqual(cmp(ev, "ev"), -cmp("ev", ev))
ev = select.kevent(fd, select.KQ_FILTER_WRITE)
self.assertEqual(ev.ident, fd)
@@ -205,6 +208,30 @@ class TestKQueue(unittest.TestCase):
b.close()
kq.close()
+ def test_issue30058(self):
+ # changelist must be an iterable
+ kq = select.kqueue()
+ a, b = socket.socketpair()
+ ev = select.kevent(a, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
+
+ kq.control([ev], 0)
+ # not a list
+ kq.control((ev,), 0)
+ # __len__ is not consistent with __iter__
+ class BadList:
+ def __len__(self):
+ return 0
+ def __iter__(self):
+ for i in range(100):
+ yield ev
+ kq.control(BadList(), 0)
+ # doesn't have __len__
+ kq.control(iter([ev]), 0)
+
+ a.close()
+ b.close()
+ kq.close()
+
def test_control_raises_oserror(self):
kq = select.kqueue()
event = select.kevent(123456, select.KQ_FILTER_READ, select.KQ_EV_DELETE)
diff --git a/lib-python/2.7/test/test_linecache.py b/lib-python/2.7/test/test_linecache.py
index 39df8b2ec2..b094cf11ea 100644
--- a/lib-python/2.7/test/test_linecache.py
+++ b/lib-python/2.7/test/test_linecache.py
@@ -3,7 +3,7 @@
import linecache
import unittest
import os.path
-from test import test_support as support
+from test import support
FILENAME = linecache.__file__
@@ -11,7 +11,7 @@ INVALID_NAME = '!@$)(!@#_1'
EMPTY = ''
TESTS = 'inspect_fodder inspect_fodder2 mapping_tests'
TESTS = TESTS.split()
-TEST_PATH = os.path.dirname(support.__file__)
+TEST_PATH = support.TEST_HOME_DIR
MODULES = "linecache abc".split()
MODULE_PATH = os.path.dirname(FILENAME)
diff --git a/lib-python/2.7/test/test_locale.py b/lib-python/2.7/test/test_locale.py
index 563ddb144f..6070882a37 100644
--- a/lib-python/2.7/test/test_locale.py
+++ b/lib-python/2.7/test/test_locale.py
@@ -425,7 +425,7 @@ class NormalizeTest(unittest.TestCase):
def test_valencia_modifier(self):
self.check('ca_ES.UTF-8@valencia', 'ca_ES.UTF-8@valencia')
- self.check('ca_ES@valencia', 'ca_ES.ISO8859-15@valencia')
+ self.check('ca_ES@valencia', 'ca_ES.UTF-8@valencia')
self.check('ca@valencia', 'ca_ES.ISO8859-1@valencia')
def test_devanagari_modifier(self):
diff --git a/lib-python/2.7/test/test_mailbox.py b/lib-python/2.7/test/test_mailbox.py
index 2261bb854c..f8db5d5723 100644
--- a/lib-python/2.7/test/test_mailbox.py
+++ b/lib-python/2.7/test/test_mailbox.py
@@ -654,7 +654,7 @@ class TestMaildir(TestMailbox, unittest.TestCase):
hostname = hostname.replace(':', r'\072')
pid = os.getpid()
pattern = re.compile(r"(?P<time>\d+)\.M(?P<M>\d{1,6})P(?P<P>\d+)"
- r"Q(?P<Q>\d+)\.(?P<host>[^:/]+)")
+ r"Q(?P<Q>\d+)\.(?P<host>[^:/]*)")
previous_groups = None
for x in xrange(repetitions):
tmp_file = self._box._create_tmp()
diff --git a/lib-python/2.7/test/test_marshal.py b/lib-python/2.7/test/test_marshal.py
index 8bf0e06a69..bea18cbf9c 100644
--- a/lib-python/2.7/test/test_marshal.py
+++ b/lib-python/2.7/test/test_marshal.py
@@ -169,8 +169,22 @@ class BugsTestCase(unittest.TestCase):
pass
def test_loads_recursion(self):
- s = 'c' + ('X' * 4*4) + '{' * 2**20
- self.assertRaises(ValueError, marshal.loads, s)
+ def run_tests(N, check):
+ # (((...None...),),)
+ check(b'(\x01\x00\x00\x00' * N + b'N')
+ # [[[...None...]]]
+ check(b'[\x01\x00\x00\x00' * N + b'N')
+ # {None: {None: {None: ...None...}}}
+ check(b'{N' * N + b'N' + b'0' * N)
+ # frozenset([frozenset([frozenset([...None...])])])
+ check(b'>\x01\x00\x00\x00' * N + b'N')
+ # Check that the generated marshal data is valid and marshal.loads()
+ # works for moderately deep nesting
+ run_tests(100, marshal.loads)
+ # Very deeply nested structure shouldn't blow the stack
+ def check(s):
+ self.assertRaises(ValueError, marshal.loads, s)
+ run_tests(2**20, check)
@test_support.impl_detail('specific recursion check')
def test_recursion_limit(self):
diff --git a/lib-python/2.7/test/test_memoryio.py b/lib-python/2.7/test/test_memoryio.py
index 459f51fe79..0e34e5fd8a 100644
--- a/lib-python/2.7/test/test_memoryio.py
+++ b/lib-python/2.7/test/test_memoryio.py
@@ -5,6 +5,7 @@ BytesIO -- for bytes
from __future__ import unicode_literals
from __future__ import print_function
+from __future__ import absolute_import
import unittest
from test import test_support as support
diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py
index 1fd0b0a316..9f9b4b4869 100644
--- a/lib-python/2.7/test/test_memoryview.py
+++ b/lib-python/2.7/test/test_memoryview.py
@@ -12,6 +12,7 @@ from test import test_support
import io
import copy
import pickle
+import warnings
class AbstractMemoryTests:
@@ -366,15 +367,20 @@ class BytesMemorySliceSliceTest(unittest.TestCase,
class OtherTest(unittest.TestCase):
def test_copy(self):
m = memoryview(b'abc')
- with self.assertRaises(TypeError):
+ with self.assertRaises(TypeError), warnings.catch_warnings():
+ warnings.filterwarnings('ignore', ".*memoryview", DeprecationWarning)
copy.copy(m)
- # See issue #22995
- ## def test_pickle(self):
- ## m = memoryview(b'abc')
- ## for proto in range(pickle.HIGHEST_PROTOCOL + 1):
- ## with self.assertRaises(TypeError):
- ## pickle.dumps(m, proto)
+ @test_support.cpython_only
+ def test_pickle(self):
+ m = memoryview(b'abc')
+ for proto in range(2):
+ with self.assertRaises(TypeError):
+ pickle.dumps(m, proto)
+ with test_support.check_py3k_warnings(
+ (".*memoryview", DeprecationWarning)):
+ pickle.dumps(m, 2)
+
def test_main():
diff --git a/lib-python/2.7/test/test_minidom.py b/lib-python/2.7/test/test_minidom.py
index b6d88d2581..2eb642395b 100644
--- a/lib-python/2.7/test/test_minidom.py
+++ b/lib-python/2.7/test/test_minidom.py
@@ -3,7 +3,7 @@
import copy
import pickle
from StringIO import StringIO
-from test.test_support import verbose, run_unittest, findfile
+from test import support
import unittest
import xml.dom
@@ -14,7 +14,7 @@ from xml.dom.minidom import parse, Node, Document, parseString
from xml.dom.minidom import getDOMImplementation
-tstfile = findfile("test.xml", subdir="xmltestdata")
+tstfile = support.findfile("test.xml", subdir="xmltestdata")
sample = ("<?xml version='1.0' encoding='us-ascii'?>\n"
"<!DOCTYPE doc PUBLIC 'http://xml.python.org/public'"
" 'http://xml.python.org/system' [\n"
@@ -711,6 +711,57 @@ class MinidomTest(unittest.TestCase):
def testClonePIDeep(self):
self.check_clone_pi(1, "testClonePIDeep")
+ def check_clone_node_entity(self, clone_document):
+ # bpo-35052: Test user data handler in cloneNode() on a document with
+ # an entity
+ document = xml.dom.minidom.parseString("""
+ <?xml version="1.0" ?>
+ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd"
+ [ <!ENTITY smile ":-)"> ]
+ >
+ <doc>Don't let entities make you frown &smile;</doc>
+ """.strip())
+
+ class Handler:
+ def handle(self, operation, key, data, src, dst):
+ self.operation = operation
+ self.key = key
+ self.data = data
+ self.src = src
+ self.dst = dst
+
+ handler = Handler()
+ doctype = document.doctype
+ entity = doctype.entities['smile']
+ entity.setUserData("key", "data", handler)
+
+ if clone_document:
+ # clone Document
+ clone = document.cloneNode(deep=True)
+
+ self.assertEqual(clone.documentElement.firstChild.wholeText,
+ "Don't let entities make you frown :-)")
+ operation = xml.dom.UserDataHandler.NODE_IMPORTED
+ dst = clone.doctype.entities['smile']
+ else:
+ # clone DocumentType
+ with support.swap_attr(doctype, 'ownerDocument', None):
+ clone = doctype.cloneNode(deep=True)
+
+ operation = xml.dom.UserDataHandler.NODE_CLONED
+ dst = clone.entities['smile']
+
+ self.assertEqual(handler.operation, operation)
+ self.assertEqual(handler.key, "key")
+ self.assertEqual(handler.data, "data")
+ self.assertIs(handler.src, entity)
+ self.assertIs(handler.dst, dst)
+
+ def testCloneNodeEntity(self):
+ self.check_clone_node_entity(False)
+ self.check_clone_node_entity(True)
+
def testNormalize(self):
doc = parseString("<doc/>")
root = doc.documentElement
@@ -1446,7 +1497,7 @@ class MinidomTest(unittest.TestCase):
def test_main():
- run_unittest(MinidomTest)
+ support.run_unittest(MinidomTest)
if __name__ == "__main__":
test_main()
diff --git a/lib-python/2.7/test/test_mmap.py b/lib-python/2.7/test/test_mmap.py
index 16e7dc7d9d..52187672a6 100644
--- a/lib-python/2.7/test/test_mmap.py
+++ b/lib-python/2.7/test/test_mmap.py
@@ -679,6 +679,13 @@ class MmapTests(unittest.TestCase):
self.assertRaises(ValueError, m.write_byte, 'b')
self.assertRaises(ValueError, m.write, 'abc')
+ def test_concat_repeat_exception(self):
+ m = mmap.mmap(-1, 16)
+ with self.assertRaises(TypeError):
+ m + m
+ with self.assertRaises(TypeError):
+ m * 2
+
class LargeMmapTests(unittest.TestCase):
diff --git a/lib-python/2.7/test/test_msilib.py b/lib-python/2.7/test/test_msilib.py
index 5d5b0c48fe..a6a455dd75 100644
--- a/lib-python/2.7/test/test_msilib.py
+++ b/lib-python/2.7/test/test_msilib.py
@@ -1,8 +1,57 @@
""" Test suite for the code in msilib """
import unittest
-import os
-from test_support import run_unittest, import_module
+from test_support import TESTFN, import_module, run_unittest, unlink
msilib = import_module('msilib')
+import msilib.schema
+
+
+def init_database():
+ path = TESTFN + '.msi'
+ db = msilib.init_database(
+ path,
+ msilib.schema,
+ 'Python Tests',
+ 'product_code',
+ '1.0',
+ 'PSF',
+ )
+ return db, path
+
+
+class MsiDatabaseTestCase(unittest.TestCase):
+
+ def test_summaryinfo_getproperty_issue1104(self):
+ db, db_path = init_database()
+ try:
+ sum_info = db.GetSummaryInformation(99)
+ title = sum_info.GetProperty(msilib.PID_TITLE)
+ self.assertEqual(title, b"Installation Database")
+
+ sum_info.SetProperty(msilib.PID_TITLE, "a" * 999)
+ title = sum_info.GetProperty(msilib.PID_TITLE)
+ self.assertEqual(title, b"a" * 999)
+
+ sum_info.SetProperty(msilib.PID_TITLE, "a" * 1000)
+ title = sum_info.GetProperty(msilib.PID_TITLE)
+ self.assertEqual(title, b"a" * 1000)
+
+ sum_info.SetProperty(msilib.PID_TITLE, "a" * 1001)
+ title = sum_info.GetProperty(msilib.PID_TITLE)
+ self.assertEqual(title, b"a" * 1001)
+ finally:
+ db = None
+ sum_info = None
+ unlink(db_path)
+
+ def test_directory_start_component_keyfile(self):
+ db, db_path = init_database()
+ self.addCleanup(msilib._directories.clear)
+ feature = msilib.Feature(db, 0, 'Feature', 'A feature', 'Python')
+ cab = msilib.CAB('CAB')
+ dir = msilib.Directory(db, cab, None, TESTFN, 'TARGETDIR',
+ 'SourceDir', 0)
+ dir.start_component(None, feature, None, 'keyfile')
+
class Test_make_id(unittest.TestCase):
#http://msdn.microsoft.com/en-us/library/aa369212(v=vs.85).aspx
@@ -35,12 +84,13 @@ class Test_make_id(unittest.TestCase):
def test_invalid_any_char(self):
self.assertEqual(
msilib.make_id(".s\x82ort"), "_.s_ort")
- self.assertEqual (
+ self.assertEqual(
msilib.make_id(".s\x82o?*+rt"), "_.s_o___rt")
def test_main():
run_unittest(__name__)
+
if __name__ == '__main__':
test_main()
diff --git a/lib-python/2.7/test/test_multiprocessing.py b/lib-python/2.7/test/test_multiprocessing.py
index ba9caec0ad..9b21ac6797 100644
--- a/lib-python/2.7/test/test_multiprocessing.py
+++ b/lib-python/2.7/test/test_multiprocessing.py
@@ -19,17 +19,18 @@ import socket
import random
import logging
import errno
+import weakref
import test.script_helper
-from test import test_support
+from test import support
from StringIO import StringIO
-_multiprocessing = test_support.import_module('_multiprocessing')
+_multiprocessing = support.import_module('_multiprocessing')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
# Work around broken sem_open implementations
-test_support.import_module('multiprocessing.synchronize')
+support.import_module('multiprocessing.synchronize')
import multiprocessing.dummy
import multiprocessing.connection
@@ -179,6 +180,12 @@ def get_value(self):
# Testcases
#
+class DummyCallable(object):
+ def __call__(self, q, c):
+ assert isinstance(c, DummyCallable)
+ q.put(5)
+
+
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@@ -339,8 +346,8 @@ class _TestProcess(BaseTestCase):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
- testfn = test_support.TESTFN
- self.addCleanup(test_support.unlink, testfn)
+ testfn = support.TESTFN
+ self.addCleanup(support.unlink, testfn)
for reason, code in (([1, 2, 3], 1), ('ignore this', 1)):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
@@ -359,6 +366,19 @@ class _TestProcess(BaseTestCase):
p.join(5)
self.assertEqual(p.exitcode, reason)
+ def test_lose_target_ref(self):
+ c = DummyCallable()
+ wr = weakref.ref(c)
+ q = self.Queue()
+ p = self.Process(target=c, args=(q, c))
+ del c
+ p.start()
+ p.join()
+ support.gc_collect()
+ self.assertIs(wr(), None)
+ self.assertEqual(q.get(), 5)
+
+
#
#
#
@@ -626,7 +646,7 @@ class _TestQueue(BaseTestCase):
p.join()
def test_no_import_lock_contention(self):
- with test_support.temp_cwd():
+ with support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
@@ -638,13 +658,29 @@ class _TestQueue(BaseTestCase):
q.close()
""")
- with test_support.DirsOnSysPath(os.getcwd()):
+ with support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except Queue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
+ def test_queue_feeder_donot_stop_onexc(self):
+ # bpo-30414: verify feeder handles exceptions correctly
+ if self.TYPE != 'processes':
+ self.skipTest('test not appropriate for {}'.format(self.TYPE))
+
+ class NotSerializable(object):
+ def __reduce__(self):
+ raise AttributeError
+ with test.support.captured_stderr():
+ q = self.Queue()
+ q.put(NotSerializable())
+ q.put(True)
+ # bpo-30595: use a timeout of 1 second for slow buildbots
+ self.assertTrue(q.get(timeout=1.0))
+
+
#
#
#
@@ -844,7 +880,13 @@ class _TestCondition(BaseTestCase):
cond.release()
# check they have all woken
- time.sleep(DELTA)
+ for i in range(10):
+ try:
+ if get_value(woken) == 6:
+ break
+ except NotImplementedError:
+ break
+ time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
@@ -1099,6 +1141,16 @@ class _TestContainers(BaseTestCase):
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
+ def test_list_iter(self):
+ a = self.list(range(10))
+ it = iter(a)
+ self.assertEqual(list(it), range(10))
+ self.assertEqual(list(it), []) # exhausted
+ # list modified during iteration
+ it = iter(a)
+ a[0] = 100
+ self.assertEqual(next(it), 100)
+
def test_dict(self):
d = self.dict()
indices = range(65, 70)
@@ -1109,6 +1161,19 @@ class _TestContainers(BaseTestCase):
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
+ def test_dict_iter(self):
+ d = self.dict()
+ indices = range(65, 70)
+ for i in indices:
+ d[i] = chr(i)
+ it = iter(d)
+ self.assertEqual(list(it), indices)
+ self.assertEqual(list(it), []) # exhausted
+ # dictionary changed size during iteration
+ it = iter(d)
+ d.clear()
+ self.assertRaises(RuntimeError, next, it)
+
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
@@ -1128,6 +1193,19 @@ def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
+def identity(x):
+ return x
+
+class CountedObject(object):
+ n_instances = 0
+
+ def __new__(cls):
+ cls.n_instances += 1
+ return object.__new__(cls)
+
+ def __del__(self):
+ type(self).n_instances -= 1
+
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
@@ -1211,10 +1289,10 @@ class _TestPool(BaseTestCase):
self.assertRaises(SayWhenError, it.next)
def test_imap_unordered(self):
- it = self.pool.imap_unordered(sqr, range(1000))
- self.assertEqual(sorted(it), map(sqr, range(1000)))
+ it = self.pool.imap_unordered(sqr, range(100))
+ self.assertEqual(sorted(it), map(sqr, range(100)))
- it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
+ it = self.pool.imap_unordered(sqr, range(1000), chunksize=100)
self.assertEqual(sorted(it), map(sqr, range(1000)))
def test_imap_unordered_handle_iterable_exception(self):
@@ -1273,6 +1351,22 @@ class _TestPool(BaseTestCase):
p.close()
p.join()
+ def test_release_task_refs(self):
+ # Issue #29861: task arguments and results should not be kept
+ # alive after we are done with them.
+ objs = list(CountedObject() for i in range(10))
+ refs = list(weakref.ref(o) for o in objs)
+ self.pool.map(identity, objs)
+
+ del objs
+ support.gc_collect()
+ time.sleep(DELTA) # let threaded cleanup code run
+ self.assertEqual(set(wr() for wr in refs), {None})
+ # With a process pool, copies of the objects are returned, check
+ # they were released too.
+ self.assertEqual(CountedObject.n_instances, 0)
+
+
def unpickleable_result():
class C:
pass
@@ -1455,10 +1549,10 @@ class _TestRemoteManager(BaseTestCase):
#'hall\xc3\xa5 v\xc3\xa4rlden'] # UTF-8
]
result = values[:]
- if test_support.have_unicode:
+ if support.have_unicode:
#result[-1] = u'hall\xe5 v\xe4rlden'
- uvalue = test_support.u(r'\u043f\u0440\u0438\u0432\u0456\u0442 '
- r'\u0441\u0432\u0456\u0442')
+ uvalue = support.u(r'\u043f\u0440\u0438\u0432\u0456\u0442 '
+ r'\u0441\u0432\u0456\u0442')
values.append(uvalue)
result.append(uvalue)
@@ -1476,7 +1570,7 @@ class _TestRemoteManager(BaseTestCase):
authkey = os.urandom(32)
manager = QueueManager(
- address=(test.test_support.HOST, 0), authkey=authkey, serializer=SERIALIZER
+ address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
@@ -1513,7 +1607,7 @@ class _TestManagerRestart(BaseTestCase):
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
- address=(test.test_support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
+ address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
@@ -1522,13 +1616,14 @@ class _TestManagerRestart(BaseTestCase):
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
- p.daemon = True
p.start()
+ p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
- test_support.gc_collect()
+ support.gc_collect()
manager.shutdown()
+
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.start()
@@ -1724,13 +1819,14 @@ class _TestConnection(BaseTestCase):
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
- with open(test_support.TESTFN, "wb") as f:
+ self.addCleanup(support.unlink, support.TESTFN)
+ with open(support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
- with open(test_support.TESTFN, "rb") as f:
+ with open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@@ -1749,7 +1845,8 @@ class _TestConnection(BaseTestCase):
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
- with open(test_support.TESTFN, "wb") as f:
+ self.addCleanup(support.unlink, support.TESTFN)
+ with open(support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
@@ -1762,7 +1859,7 @@ class _TestConnection(BaseTestCase):
finally:
os.close(newfd)
p.join()
- with open(test_support.TESTFN, "rb") as f:
+ with open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
@@ -1969,7 +2066,7 @@ class _TestHeap(BaseTestCase):
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
- if test_support.check_impl_detail(cpython=True):
+ if support.check_impl_detail(cpython=True):
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
@@ -2035,7 +2132,7 @@ class _TestSharedCTypes(BaseTestCase):
def test_synchronize(self):
self.test_sharedctypes(lock=True)
- @unittest.skipUnless(test_support.check_impl_detail(pypy=False), "pypy ctypes differences")
+ @unittest.skipUnless(support.check_impl_detail(pypy=False), "pypy ctypes differences")
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
@@ -2052,6 +2149,14 @@ class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
+ def setUp(self):
+ self.registry_backup = util._finalizer_registry.copy()
+ util._finalizer_registry.clear()
+
+ def tearDown(self):
+ self.assertFalse(util._finalizer_registry)
+ util._finalizer_registry.update(self.registry_backup)
+
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
@@ -2060,7 +2165,7 @@ class _TestFinalize(BaseTestCase):
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
- test_support.gc_collect()
+ support.gc_collect()
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
@@ -2102,6 +2207,62 @@ class _TestFinalize(BaseTestCase):
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
+ def test_thread_safety(self):
+ # bpo-24484: _run_finalizers() should be thread-safe
+ def cb():
+ pass
+
+ class Foo(object):
+ def __init__(self):
+ self.ref = self # create reference cycle
+ # insert finalizer at random key
+ util.Finalize(self, cb, exitpriority=random.randint(1, 100))
+
+ finish = False
+ exc = []
+
+ def run_finalizers():
+ while not finish:
+ time.sleep(random.random() * 1e-1)
+ try:
+ # A GC run will eventually happen during this,
+ # collecting stale Foo's and mutating the registry
+ util._run_finalizers()
+ except Exception as e:
+ exc.append(e)
+
+ def make_finalizers():
+ d = {}
+ while not finish:
+ try:
+ # Old Foo's get gradually replaced and later
+ # collected by the GC (because of the cyclic ref)
+ d[random.getrandbits(5)] = {Foo() for i in range(10)}
+ except Exception as e:
+ exc.append(e)
+ d.clear()
+
+ old_interval = sys.getcheckinterval()
+ if support.check_impl_detail(cpython=True):
+ old_threshold = gc.get_threshold()
+ try:
+ sys.setcheckinterval(10)
+ if support.check_impl_detail(cpython=True):
+ gc.set_threshold(5, 5, 5)
+ threads = [threading.Thread(target=run_finalizers),
+ threading.Thread(target=make_finalizers)]
+ with support.start_threads(threads):
+ time.sleep(4.0) # Wait a bit to trigger race condition
+ finish = True
+ if exc:
+ raise exc[0]
+ finally:
+ sys.setcheckinterval(old_interval)
+ if support.check_impl_detail(cpython=True):
+ gc.set_threshold(*old_threshold)
+ gc.collect() # Collect remaining Foo's
+
+
#
# Test that from ... import * works for each module
#
@@ -2519,7 +2680,7 @@ class TestFlags(unittest.TestCase):
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
- @test_support.requires_unicode # XXX json needs unicode support
+ @support.requires_unicode # XXX json needs unicode support
def test_flags(self):
import json, subprocess
# start child process using unusual flags
@@ -2565,6 +2726,9 @@ class TestForkAwareThreadLock(unittest.TestCase):
class TestIgnoreEINTR(unittest.TestCase):
+ # Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
+ CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
+
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
@@ -2573,7 +2737,7 @@ class TestIgnoreEINTR(unittest.TestCase):
conn.send('ready')
x = conn.recv()
conn.send(x)
- conn.send_bytes(b'x'*(1024*1024)) # sending 1 MB should block
+ conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
@@ -2592,7 +2756,7 @@ class TestIgnoreEINTR(unittest.TestCase):
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
- self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
+ self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
@@ -2649,7 +2813,7 @@ def test_main(run=None):
check_enough_semaphores()
if run is None:
- from test.test_support import run_unittest as run
+ from test.support import run_unittest as run
util.get_temp_dir() # creates temp directory for use by all processes
@@ -2674,7 +2838,7 @@ def test_main(run=None):
# module during these tests is at least platform dependent and possibly
# non-deterministic on any given platform. So we don't mind if the listed
# warnings aren't actually raised.
- with test_support.check_py3k_warnings(
+ with support.check_py3k_warnings(
(".+__(get|set)slice__ has been removed", DeprecationWarning),
(r"sys.exc_clear\(\) not supported", DeprecationWarning),
quiet=True):
diff --git a/lib-python/2.7/test/test_netrc.py b/lib-python/2.7/test/test_netrc.py
index 4156c535ef..4d49a55cb6 100644
--- a/lib-python/2.7/test/test_netrc.py
+++ b/lib-python/2.7/test/test_netrc.py
@@ -5,25 +5,29 @@ temp_filename = test_support.TESTFN
class NetrcTestCase(unittest.TestCase):
- def make_nrc(self, test_data):
+ def make_nrc(self, test_data, cleanup=True):
test_data = textwrap.dedent(test_data)
mode = 'w'
if sys.platform != 'cygwin':
mode += 't'
with open(temp_filename, mode) as fp:
fp.write(test_data)
- self.addCleanup(os.unlink, temp_filename)
+ if cleanup:
+ self.addCleanup(os.unlink, temp_filename)
return netrc.netrc(temp_filename)
def test_default(self):
nrc = self.make_nrc("""\
machine host1.domain.com login log1 password pass1 account acct1
default login log2 password pass2
- """)
+ """, cleanup=False)
self.assertEqual(nrc.hosts['host1.domain.com'],
('log1', 'acct1', 'pass1'))
self.assertEqual(nrc.hosts['default'], ('log2', None, 'pass2'))
+ nrc2 = self.make_nrc(nrc.__repr__(), cleanup=True)
+ self.assertEqual(nrc.hosts, nrc2.hosts)
+
def test_macros(self):
nrc = self.make_nrc("""\
macdef macro1
diff --git a/lib-python/2.7/test/test_ordered_dict.py b/lib-python/2.7/test/test_ordered_dict.py
index a5d7f82870..8b01a148f3 100644
--- a/lib-python/2.7/test/test_ordered_dict.py
+++ b/lib-python/2.7/test/test_ordered_dict.py
@@ -244,6 +244,19 @@ class TestOrderedDict(unittest.TestCase):
self.assertEqual(repr(od),
"OrderedDict([('a', None), ('b', None), ('c', None), ('x', ...)])")
+ def test_repr_recursive_values(self):
+ od = OrderedDict()
+ od[42] = od.viewvalues()
+ r = repr(od)
+ # Cannot perform a stronger test, as the contents of the repr
+ # are implementation-dependent. All we can say is that we
+ # want a str result, not an exception of any sort.
+ self.assertIsInstance(r, str)
+ od[42] = od.viewitems()
+ r = repr(od)
+ # Again.
+ self.assertIsInstance(r, str)
+
def test_setdefault(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py
index 6a063405b2..f7b270ba63 100644
--- a/lib-python/2.7/test/test_os.py
+++ b/lib-python/2.7/test/test_os.py
@@ -604,11 +604,32 @@ class URandomFDTests(unittest.TestCase):
assert_python_ok('-c', code)
-class ExecvpeTests(unittest.TestCase):
+class ExecTests(unittest.TestCase):
def test_execvpe_with_bad_arglist(self):
self.assertRaises(ValueError, os.execvpe, 'notepad', [], None)
+ def test_execve_invalid_env(self):
+ args = [sys.executable, '-c', 'pass']
+
+ # null character in the enviroment variable name
+ newenv = os.environ.copy()
+ newenv["FRUIT\0VEGETABLE"] = "cabbage"
+ with self.assertRaises(TypeError):
+ os.execve(args[0], args, newenv)
+
+ # null character in the enviroment variable value
+ newenv = os.environ.copy()
+ newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
+ with self.assertRaises(TypeError):
+ os.execve(args[0], args, newenv)
+
+ # equal character in the enviroment variable name
+ newenv = os.environ.copy()
+ newenv["FRUIT=ORANGE"] = "lemon"
+ with self.assertRaises(ValueError):
+ os.execve(args[0], args, newenv)
+
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32ErrorTests(unittest.TestCase):
@@ -719,30 +740,36 @@ class PosixUidGidTests(unittest.TestCase):
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setuid, 0)
+ self.assertRaises(TypeError, os.setuid, 'not an int')
self.assertRaises(OverflowError, os.setuid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setgid'), 'test needs os.setgid()')
def test_setgid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setgid, 0)
+ self.assertRaises(TypeError, os.setgid, 'not an int')
self.assertRaises(OverflowError, os.setgid, 1<<32)
@unittest.skipUnless(hasattr(os, 'seteuid'), 'test needs os.seteuid()')
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.seteuid, 0)
+ self.assertRaises(TypeError, os.seteuid, 'not an int')
self.assertRaises(OverflowError, os.seteuid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setegid'), 'test needs os.setegid()')
def test_setegid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setegid, 0)
+ self.assertRaises(TypeError, os.setegid, 'not an int')
self.assertRaises(OverflowError, os.setegid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()')
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setreuid, 0, 0)
+ self.assertRaises(TypeError, os.setreuid, 'not an int', 0)
+ self.assertRaises(TypeError, os.setreuid, 0, 'not an int')
self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
@@ -758,6 +785,8 @@ class PosixUidGidTests(unittest.TestCase):
def test_setregid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setregid, 0, 0)
+ self.assertRaises(TypeError, os.setregid, 'not an int', 0)
+ self.assertRaises(TypeError, os.setregid, 0, 'not an int')
self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
@@ -885,6 +914,112 @@ class Win32KillTests(unittest.TestCase):
self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT")
+@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
+class Win32ListdirTests(unittest.TestCase):
+ """Test listdir on Windows."""
+
+ def setUp(self):
+ self.created_paths = []
+ for i in range(2):
+ dir_name = 'SUB%d' % i
+ dir_path = os.path.join(support.TESTFN, dir_name)
+ file_name = 'FILE%d' % i
+ file_path = os.path.join(support.TESTFN, file_name)
+ os.makedirs(dir_path)
+ with open(file_path, 'w') as f:
+ f.write("I'm %s and proud of it. Blame test_os.\n" % file_path)
+ self.created_paths.extend([dir_name, file_name])
+ self.created_paths.sort()
+
+ def tearDown(self):
+ shutil.rmtree(support.TESTFN)
+
+ def test_listdir_no_extended_path(self):
+ """Test when the path is not an "extended" path."""
+ # unicode
+ fs_encoding = sys.getfilesystemencoding()
+ self.assertEqual(
+ sorted(os.listdir(support.TESTFN.decode(fs_encoding))),
+ [path.decode(fs_encoding) for path in self.created_paths])
+
+ # bytes
+ self.assertEqual(
+ sorted(os.listdir(os.fsencode(support.TESTFN))),
+ self.created_paths)
+
+ def test_listdir_extended_path(self):
+ """Test when the path starts with '\\\\?\\'."""
+ # See: http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
+ # unicode
+ fs_encoding = sys.getfilesystemencoding()
+ path = u'\\\\?\\' + os.path.abspath(support.TESTFN.decode(fs_encoding))
+ self.assertEqual(
+ sorted(os.listdir(path)),
+ [path.decode(fs_encoding) for path in self.created_paths])
+
+ # bytes
+ path = b'\\\\?\\' + os.path.abspath(support.TESTFN)
+ self.assertEqual(
+ sorted(os.listdir(path)),
+ self.created_paths)
+
+
+class SpawnTests(unittest.TestCase):
+ def _test_invalid_env(self, spawn):
+ args = [sys.executable, '-c', 'pass']
+
+ # null character in the enviroment variable name
+ newenv = os.environ.copy()
+ newenv["FRUIT\0VEGETABLE"] = "cabbage"
+ try:
+ exitcode = spawn(os.P_WAIT, args[0], args, newenv)
+ except TypeError:
+ pass
+ else:
+ self.assertEqual(exitcode, 127)
+
+ # null character in the enviroment variable value
+ newenv = os.environ.copy()
+ newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
+ try:
+ exitcode = spawn(os.P_WAIT, args[0], args, newenv)
+ except TypeError:
+ pass
+ else:
+ self.assertEqual(exitcode, 127)
+
+ # equal character in the enviroment variable name
+ newenv = os.environ.copy()
+ newenv["FRUIT=ORANGE"] = "lemon"
+ try:
+ exitcode = spawn(os.P_WAIT, args[0], args, newenv)
+ except ValueError:
+ pass
+ else:
+ self.assertEqual(exitcode, 127)
+
+ # equal character in the enviroment variable value
+ filename = test_support.TESTFN
+ self.addCleanup(test_support.unlink, filename)
+ with open(filename, "w") as fp:
+ fp.write('import sys, os\n'
+ 'if os.getenv("FRUIT") != "orange=lemon":\n'
+ ' raise AssertionError')
+ args = [sys.executable, filename]
+ newenv = os.environ.copy()
+ newenv["FRUIT"] = "orange=lemon"
+ exitcode = spawn(os.P_WAIT, args[0], args, newenv)
+ self.assertEqual(exitcode, 0)
+
+ @unittest.skipUnless(hasattr(os, 'spawnve'), 'test needs os.spawnve()')
+ def test_spawnve_invalid_env(self):
+ self._test_invalid_env(os.spawnve)
+
+ @unittest.skipUnless(hasattr(os, 'spawnvpe'), 'test needs os.spawnvpe()')
+ def test_spawnvpe_invalid_env(self):
+ self._test_invalid_env(os.spawnvpe)
+
+
def test_main():
test_support.run_unittest(
FileTests,
@@ -896,11 +1031,12 @@ def test_main():
DevNullTests,
URandomTests,
URandomFDTests,
- ExecvpeTests,
+ ExecTests,
Win32ErrorTests,
TestInvalidFD,
PosixUidGidTests,
- Win32KillTests
+ Win32KillTests,
+ SpawnTests,
)
if __name__ == "__main__":
diff --git a/lib-python/2.7/test/test_parser.py b/lib-python/2.7/test/test_parser.py
index 65a762c871..73974a96f0 100644
--- a/lib-python/2.7/test/test_parser.py
+++ b/lib-python/2.7/test/test_parser.py
@@ -1,4 +1,6 @@
+import copy
import parser
+import pickle
import unittest
import sys
import struct
@@ -261,21 +263,19 @@ class RoundtripLegalSyntaxTestCase(unittest.TestCase):
# An absolutely minimal test of position information. Better
# tests would be a big project.
code = "def f(x):\n return x + 1"
- st1 = parser.suite(code)
- st2 = st1.totuple(line_info=1, col_info=1)
+ st = parser.suite(code)
def walk(tree):
node_type = tree[0]
next = tree[1]
- if isinstance(next, tuple):
+ if isinstance(next, (tuple, list)):
for elt in tree[1:]:
for x in walk(elt):
yield x
else:
yield tree
- terminals = list(walk(st2))
- self.assertEqual([
+ expected = [
(1, 'def', 1, 0),
(1, 'f', 1, 4),
(7, '(', 1, 5),
@@ -291,8 +291,25 @@ class RoundtripLegalSyntaxTestCase(unittest.TestCase):
(4, '', 2, 16),
(6, '', 2, -1),
(4, '', 2, -1),
- (0, '', 2, -1)],
- terminals)
+ (0, '', 2, -1),
+ ]
+
+ self.assertEqual(list(walk(st.totuple(line_info=True, col_info=True))),
+ expected)
+ self.assertEqual(list(walk(st.totuple())),
+ [(t, n) for t, n, l, c in expected])
+ self.assertEqual(list(walk(st.totuple(line_info=True))),
+ [(t, n, l) for t, n, l, c in expected])
+ self.assertEqual(list(walk(st.totuple(col_info=True))),
+ [(t, n, c) for t, n, l, c in expected])
+ self.assertEqual(list(walk(st.tolist(line_info=True, col_info=True))),
+ [list(x) for x in expected])
+ self.assertEqual(list(walk(parser.st2tuple(st, line_info=True,
+ col_info=True))),
+ expected)
+ self.assertEqual(list(walk(parser.st2list(st, line_info=True,
+ col_info=True))),
+ [list(x) for x in expected])
#
@@ -314,6 +331,52 @@ class IllegalSyntaxTestCase(unittest.TestCase):
# not even remotely valid:
self.check_bad_tree((1, 2, 3), "<junk>")
+ def test_illegal_terminal(self):
+ tree = \
+ (257,
+ (267,
+ (268,
+ (269,
+ (274,
+ (1,))),
+ (4, ''))),
+ (4, ''),
+ (0, ''))
+ self.check_bad_tree(tree, "too small items in terminal node")
+ tree = \
+ (257,
+ (267,
+ (268,
+ (269,
+ (274,
+ (1, u'pass'))),
+ (4, ''))),
+ (4, ''),
+ (0, ''))
+ self.check_bad_tree(tree, "non-string second item in terminal node")
+ tree = \
+ (257,
+ (267,
+ (268,
+ (269,
+ (274,
+ (1, 'pass', '0', 0))),
+ (4, ''))),
+ (4, ''),
+ (0, ''))
+ self.check_bad_tree(tree, "non-integer third item in terminal node")
+ tree = \
+ (257,
+ (267,
+ (268,
+ (269,
+ (274,
+ (1, 'pass', 0, 0))),
+ (4, ''))),
+ (4, ''),
+ (0, ''))
+ self.check_bad_tree(tree, "too many items in terminal node")
+
def test_illegal_yield_1(self):
# Illegal yield statement: def f(): return 1; yield 1
tree = \
@@ -541,6 +604,18 @@ class IllegalSyntaxTestCase(unittest.TestCase):
(4, ''), (0, ''))
self.check_bad_tree(tree, "from import a")
+ def test_illegal_encoding(self):
+ # Illegal encoding declaration
+ tree = \
+ (339,
+ (257, (0, '')))
+ self.check_bad_tree(tree, "missed encoding")
+ tree = \
+ (339,
+ (257, (0, '')),
+ u'iso-8859-1')
+ self.check_bad_tree(tree, "non-string encoding")
+
class CompileTestCase(unittest.TestCase):
@@ -602,6 +677,21 @@ class ParserStackLimitTestCase(unittest.TestCase):
class STObjectTestCase(unittest.TestCase):
"""Test operations on ST objects themselves"""
+ def test_copy_pickle(self):
+ sts = [
+ parser.expr('2 + 3'),
+ parser.suite('x = 2; y = x + 3'),
+ parser.expr('list(x**3 for x in range(20))')
+ ]
+ for st in sts:
+ st_copy = copy.copy(st)
+ self.assertEqual(st_copy.totuple(), st.totuple())
+ st_copy = copy.deepcopy(st)
+ self.assertEqual(st_copy.totuple(), st.totuple())
+ for proto in range(pickle.HIGHEST_PROTOCOL+1):
+ st_copy = pickle.loads(pickle.dumps(st, proto))
+ self.assertEqual(st_copy.totuple(), st.totuple())
+
check_sizeof = support.check_sizeof
@support.cpython_only
diff --git a/lib-python/2.7/test/test_pkg.py b/lib-python/2.7/test/test_pkg.py
index 5f1659b0f6..ff05dce683 100644
--- a/lib-python/2.7/test/test_pkg.py
+++ b/lib-python/2.7/test/test_pkg.py
@@ -134,7 +134,7 @@ class Test(unittest.TestCase):
s = """
from t2 import *
- self.assertTrue(dir(), ['self', 'sub'])
+ self.assertEqual(dir(), ['self', 'sub'])
"""
self.run_code(s)
diff --git a/lib-python/2.7/test/test_platform.py b/lib-python/2.7/test/test_platform.py
index f754550f95..542763d46d 100644
--- a/lib-python/2.7/test/test_platform.py
+++ b/lib-python/2.7/test/test_platform.py
@@ -4,7 +4,7 @@ import unittest
import platform
import subprocess
-from test import test_support
+from test import support
class PlatformTest(unittest.TestCase):
def test_architecture(self):
@@ -18,7 +18,7 @@ class PlatformTest(unittest.TestCase):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return p.communicate()
real = os.path.realpath(sys.executable)
- link = os.path.abspath(test_support.TESTFN)
+ link = os.path.abspath(support.TESTFN)
os.symlink(real, link)
try:
self.assertEqual(get(real), get(link))
@@ -163,7 +163,7 @@ class PlatformTest(unittest.TestCase):
# using it, per
# http://blogs.msdn.com/david.wang/archive/2006/03/26/HOWTO-Detect-Process-Bitness.aspx
try:
- with test_support.EnvironmentVarGuard() as environ:
+ with support.EnvironmentVarGuard() as environ:
if 'PROCESSOR_ARCHITEW6432' in environ:
del environ['PROCESSOR_ARCHITEW6432']
environ['PROCESSOR_ARCHITECTURE'] = 'foo'
@@ -247,7 +247,6 @@ class PlatformTest(unittest.TestCase):
res = platform.dist()
def test_libc_ver(self):
- import os
if os.path.isdir(sys.executable) and \
os.path.exists(sys.executable+'.exe'):
# Cygwin horror
@@ -256,6 +255,13 @@ class PlatformTest(unittest.TestCase):
executable = sys.executable
res = platform.libc_ver(executable)
+ self.addCleanup(support.unlink, support.TESTFN)
+ with open(support.TESTFN, 'wb') as f:
+ f.write(b'x'*(16384-10))
+ f.write(b'GLIBC_1.23.4\0GLIBC_1.9\0GLIBC_1.21\0')
+ self.assertEqual(platform.libc_ver(support.TESTFN),
+ ('glibc', '1.23.4'))
+
def test_parse_release_file(self):
for input, output in (
@@ -275,7 +281,7 @@ class PlatformTest(unittest.TestCase):
def test_main():
- test_support.run_unittest(
+ support.run_unittest(
PlatformTest
)
diff --git a/lib-python/2.7/test/test_poll.py b/lib-python/2.7/test/test_poll.py
index 1e195ed624..7ad693d319 100644
--- a/lib-python/2.7/test/test_poll.py
+++ b/lib-python/2.7/test/test_poll.py
@@ -205,6 +205,28 @@ class PollTests(unittest.TestCase):
os.write(w, b'spam')
t.join()
+ @unittest.skipUnless(threading, 'Threading required for this test.')
+ @reap_threads
+ def test_poll_blocks_with_negative_ms(self):
+ for timeout_ms in [None, -1000, -1, -1.0]:
+ # Create two file descriptors. This will be used to unlock
+ # the blocking call to poll.poll inside the thread
+ r, w = os.pipe()
+ pollster = select.poll()
+ pollster.register(r, select.POLLIN)
+
+ poll_thread = threading.Thread(target=pollster.poll, args=(timeout_ms,))
+ poll_thread.start()
+ poll_thread.join(timeout=0.1)
+ self.assertTrue(poll_thread.is_alive())
+
+ # Write to the pipe so pollster.poll unblocks and the thread ends.
+ os.write(w, b'spam')
+ poll_thread.join()
+ self.assertFalse(poll_thread.is_alive())
+ os.close(r)
+ os.close(w)
+
def test_main():
run_unittest(PollTests)
diff --git a/lib-python/2.7/test/test_poplib.py b/lib-python/2.7/test/test_poplib.py
index 23d688724b..d2143759ba 100644
--- a/lib-python/2.7/test/test_poplib.py
+++ b/lib-python/2.7/test/test_poplib.py
@@ -211,6 +211,16 @@ class TestPOP3Class(TestCase):
def test_rpop(self):
self.assertOK(self.client.rpop('foo'))
+ def test_apop_REDOS(self):
+ # Replace welcome with very long evil welcome.
+ # NB The upper bound on welcome length is currently 2048.
+ # At this length, evil input makes each apop call take
+ # on the order of milliseconds instead of microseconds.
+ evil_welcome = b'+OK' + (b'<' * 1000000)
+ with test_support.swap_attr(self.client, 'welcome', evil_welcome):
+ # The evil welcome is invalid, so apop should throw.
+ self.assertRaises(poplib.error_proto, self.client.apop, 'a', 'kb')
+
def test_top(self):
expected = ('+OK 116 bytes',
['From: postmaster@python.org', 'Content-Type: text/plain',
diff --git a/lib-python/2.7/test/test_posix.py b/lib-python/2.7/test/test_posix.py
index e0c84f2d6c..ae636e591a 100644
--- a/lib-python/2.7/test/test_posix.py
+++ b/lib-python/2.7/test/test_posix.py
@@ -199,6 +199,7 @@ class PosixTester(unittest.TestCase):
def test_fdopen_directory(self):
try:
fd = os.open('.', os.O_RDONLY)
+ self.addCleanup(os.close, fd)
except OSError as e:
self.assertEqual(e.errno, errno.EACCES)
self.skipTest("system cannot open directories")
@@ -287,6 +288,10 @@ class PosixTester(unittest.TestCase):
self.assertRaises(TypeError, posix.minor)
self.assertRaises((ValueError, OverflowError), posix.minor, -1)
+ if sys.platform.startswith('freebsd') and dev >= 0x100000000:
+ self.skipTest("bpo-31044: on FreeBSD CURRENT, minor() truncates "
+ "64-bit dev to 32-bit")
+
self.assertEqual(posix.makedev(major, minor), dev)
self.assertEqual(posix.makedev(int(major), int(minor)), dev)
self.assertEqual(posix.makedev(long(major), long(minor)), dev)
@@ -504,6 +509,15 @@ class PosixTester(unittest.TestCase):
finally:
posix.lchflags(_DUMMY_SYMLINK, dummy_symlink_st.st_flags)
+ @unittest.skipUnless(hasattr(os, "putenv"), "requires os.putenv()")
+ def test_putenv(self):
+ with self.assertRaises(TypeError):
+ os.putenv('FRUIT\0VEGETABLE', 'cabbage')
+ with self.assertRaises(TypeError):
+ os.putenv('FRUIT', 'orange\0VEGETABLE=cabbage')
+ with self.assertRaises(ValueError):
+ os.putenv('FRUIT=ORANGE', 'lemon')
+
@unittest.skipUnless(hasattr(posix, 'getcwd'),
'test needs posix.getcwd()')
def test_getcwd_long_pathnames(self):
@@ -542,7 +556,11 @@ class PosixTester(unittest.TestCase):
)
if quirky_platform:
expected_errno = errno.ERANGE
- self.assertEqual(e.errno, expected_errno)
+ if 'darwin' in sys.platform:
+ # macOS 10.15 may return errno.ENOENT instead
+ self.assertIn(e.errno, (errno.ENOENT, errno.ENAMETOOLONG))
+ else:
+ self.assertEqual(e.errno, expected_errno)
finally:
os.chdir('..')
os.rmdir(dirname)
diff --git a/lib-python/2.7/test/test_posixpath.py b/lib-python/2.7/test/test_posixpath.py
index deaa577283..18ea2e42ea 100644
--- a/lib-python/2.7/test/test_posixpath.py
+++ b/lib-python/2.7/test/test_posixpath.py
@@ -262,34 +262,56 @@ class PosixPathTest(unittest.TestCase):
def test_expanduser(self):
self.assertEqual(posixpath.expanduser("foo"), "foo")
- with test_support.EnvironmentVarGuard() as env:
+
+ def test_expanduser_home_envvar(self):
+ with support.EnvironmentVarGuard() as env:
+ env['HOME'] = '/home/victor'
+ self.assertEqual(posixpath.expanduser("~"), "/home/victor")
+
+ # expanduser() strips trailing slash
+ env['HOME'] = '/home/victor/'
+ self.assertEqual(posixpath.expanduser("~"), "/home/victor")
+
for home in '/', '', '//', '///':
env['HOME'] = home
self.assertEqual(posixpath.expanduser("~"), "/")
self.assertEqual(posixpath.expanduser("~/"), "/")
self.assertEqual(posixpath.expanduser("~/foo"), "/foo")
- try:
- import pwd
- except ImportError:
- pass
- else:
- self.assertIsInstance(posixpath.expanduser("~/"), basestring)
- # if home directory == root directory, this test makes no sense
- if posixpath.expanduser("~") != '/':
- self.assertEqual(
- posixpath.expanduser("~") + "/",
- posixpath.expanduser("~/")
- )
- self.assertIsInstance(posixpath.expanduser("~root/"), basestring)
- self.assertIsInstance(posixpath.expanduser("~foo/"), basestring)
- with test_support.EnvironmentVarGuard() as env:
- # expanduser should fall back to using the password database
- del env['HOME']
- home = pwd.getpwuid(os.getuid()).pw_dir
- # $HOME can end with a trailing /, so strip it (see #17809)
- home = home.rstrip("/") or '/'
- self.assertEqual(posixpath.expanduser("~"), home)
+ def test_expanduser_pwd(self):
+ pwd = support.import_module('pwd')
+
+ self.assertIsInstance(posixpath.expanduser("~/"), str)
+
+ # if home directory == root directory, this test makes no sense
+ if posixpath.expanduser("~") != '/':
+ self.assertEqual(
+ posixpath.expanduser("~") + "/",
+ posixpath.expanduser("~/")
+ )
+ self.assertIsInstance(posixpath.expanduser("~root/"), str)
+ self.assertIsInstance(posixpath.expanduser("~foo/"), str)
+
+ with support.EnvironmentVarGuard() as env:
+ # expanduser should fall back to using the password database
+ del env['HOME']
+
+ home = pwd.getpwuid(os.getuid()).pw_dir
+ # $HOME can end with a trailing /, so strip it (see #17809)
+ home = home.rstrip("/") or '/'
+ self.assertEqual(posixpath.expanduser("~"), home)
+
+ # bpo-10496: If the HOME environment variable is not set and the
+ # user (current identifier or name in the path) doesn't exist in
+ # the password database (pwd.getuid() or pwd.getpwnam() fail),
+ # expanduser() must return the path unchanged.
+ def raise_keyerror(*args):
+ raise KeyError
+
+ with support.swap_attr(pwd, 'getpwuid', raise_keyerror), \
+ support.swap_attr(pwd, 'getpwnam', raise_keyerror):
+ for path in ('~', '~/.local', '~vstinner/'):
+ self.assertEqual(posixpath.expanduser(path), path)
def test_normpath(self):
self.assertEqual(posixpath.normpath(""), ".")
@@ -474,12 +496,10 @@ class PosixPathTest(unittest.TestCase):
finally:
os.getcwd = real_getcwd
- @test_support.requires_unicode
+ @unittest.skipUnless(test_support.FS_NONASCII, 'need test_support.FS_NONASCII')
def test_expandvars_nonascii_word(self):
encoding = sys.getfilesystemencoding()
- # Non-ASCII word characters
- letters = test_support.u(r'\xe6\u0130\u0141\u03c6\u041a\u05d0\u062a\u0e01')
- uwnonascii = letters.encode(encoding, 'ignore').decode(encoding)[:3]
+ uwnonascii = test_support.FS_NONASCII
swnonascii = uwnonascii.encode(encoding)
if not swnonascii:
self.skipTest('Needs non-ASCII word characters')
diff --git a/lib-python/2.7/test/test_pty.py b/lib-python/2.7/test/test_pty.py
index bec38c4545..0eb31fdaf7 100644
--- a/lib-python/2.7/test/test_pty.py
+++ b/lib-python/2.7/test/test_pty.py
@@ -11,6 +11,7 @@ import sys
import select
import signal
import socket
+import io # readline
import unittest
TEST_STRING_1 = "I wish to buy a fish license.\n"
@@ -24,6 +25,16 @@ else:
pass
+# Note that os.read() is nondeterministic so we need to be very careful
+# to make the test suite deterministic. A normal call to os.read() may
+# give us less than expected.
+#
+# Beware, on my Linux system, if I put 'foo\n' into a terminal fd, I get
+# back 'foo\r\n' at the other end. The behavior depends on the termios
+# setting. The newline translation may be OS-specific. To make the
+# test suite deterministic and OS-independent, the functions _readline
+# and normalize_output can be used.
+
def normalize_output(data):
# Some operating systems do conversions on newline. We could possibly
# fix that by doing the appropriate termios.tcsetattr()s. I couldn't
@@ -45,6 +56,12 @@ def normalize_output(data):
return data
+def _readline(fd):
+ """Read one line. May block forever if no newline is read."""
+ reader = io.FileIO(fd, mode='rb', closefd=False)
+ return reader.readline()
+
+
# Marginal testing of pty suite. Cannot do extensive 'do or fail' testing
# because pty code is not too portable.
@@ -53,14 +70,11 @@ class PtyTest(unittest.TestCase):
def setUp(self):
# isatty() and close() can hang on some platforms. Set an alarm
# before running the test to make sure we don't hang forever.
- self.old_alarm = signal.signal(signal.SIGALRM, self.handle_sig)
+ old_alarm = signal.signal(signal.SIGALRM, self.handle_sig)
+ self.addCleanup(signal.signal, signal.SIGALRM, old_alarm)
+ self.addCleanup(signal.alarm, 0)
signal.alarm(10)
- def tearDown(self):
- # remove alarm, restore old alarm handler
- signal.alarm(0)
- signal.signal(signal.SIGALRM, self.old_alarm)
-
def handle_sig(self, sig, frame):
self.fail("isatty hung")
@@ -97,14 +111,14 @@ class PtyTest(unittest.TestCase):
debug("Writing to slave_fd")
os.write(slave_fd, TEST_STRING_1)
- s1 = os.read(master_fd, 1024)
+ s1 = _readline(master_fd)
self.assertEqual('I wish to buy a fish license.\n',
normalize_output(s1))
debug("Writing chunked output")
os.write(slave_fd, TEST_STRING_2[:5])
os.write(slave_fd, TEST_STRING_2[5:])
- s2 = os.read(master_fd, 1024)
+ s2 = _readline(master_fd)
self.assertEqual('For my pet fish, Eric.\n', normalize_output(s2))
os.close(slave_fd)
diff --git a/lib-python/2.7/test/test_py_compile.py b/lib-python/2.7/test/test_py_compile.py
index 5ec523abe2..95863088b4 100644
--- a/lib-python/2.7/test/test_py_compile.py
+++ b/lib-python/2.7/test/test_py_compile.py
@@ -10,7 +10,7 @@ from test import test_support as support
class PyCompileTests(unittest.TestCase):
def setUp(self):
- self.directory = tempfile.mkdtemp()
+ self.directory = tempfile.mkdtemp(dir=os.getcwd())
self.source_path = os.path.join(self.directory, '_test.py')
self.pyc_path = self.source_path + 'c'
self.cwd_drive = os.path.splitdrive(os.getcwd())[0]
diff --git a/lib-python/2.7/test/test_pydoc.py b/lib-python/2.7/test/test_pydoc.py
index 0e9f5f3e46..ec83523637 100644
--- a/lib-python/2.7/test/test_pydoc.py
+++ b/lib-python/2.7/test/test_pydoc.py
@@ -257,7 +257,7 @@ def get_pydoc_html(module):
def get_pydoc_link(module):
"Returns a documentation web link of a module"
dirname = os.path.dirname
- basedir = dirname(dirname(__file__))
+ basedir = dirname(dirname(os.path.realpath(__file__)))
doc = pydoc.TextDoc()
loc = doc.getdocloc(module, basedir=basedir)
return loc
diff --git a/lib-python/2.7/test/test_random.py b/lib-python/2.7/test/test_random.py
index e4876fd090..8a6d17290d 100644
--- a/lib-python/2.7/test/test_random.py
+++ b/lib-python/2.7/test/test_random.py
@@ -307,10 +307,27 @@ class SystemRandom_TestBasicOps(TestBasicOps):
class MersenneTwister_TestBasicOps(TestBasicOps):
gen = random.Random()
+ @test_support.cpython_only
+ def test_bug_31478(self):
+ # _random.Random.seed() should ignore the __abs__() method of a
+ # long/int subclass argument.
+ class BadInt(int):
+ def __abs__(self):
+ 1/0.0
+ class BadLong(long):
+ def __abs__(self):
+ 1/0.0
+ self.gen.seed(42)
+ expected_value = self.gen.random()
+ for seed_arg in [42L, BadInt(42), BadLong(42)]:
+ self.gen.seed(seed_arg)
+ self.assertEqual(self.gen.random(), expected_value)
+
def test_setstate_first_arg(self):
self.assertRaises(ValueError, self.gen.setstate, (1, None, None))
def test_setstate_middle_arg(self):
+ start_state = self.gen.getstate()
# Wrong type, s/b tuple
self.assertRaises(TypeError, self.gen.setstate, (2, None, None))
# Wrong length, s/b 625
@@ -324,6 +341,10 @@ class MersenneTwister_TestBasicOps(TestBasicOps):
self.gen.setstate((2, (1,)*624+(625,), None))
with self.assertRaises((ValueError, OverflowError)):
self.gen.setstate((2, (1,)*624+(-1,), None))
+ # Failed calls to setstate() should not have changed the state.
+ bits100 = self.gen.getrandbits(100)
+ self.gen.setstate(start_state)
+ self.assertEqual(self.gen.getrandbits(100), bits100)
def test_referenceImplementation(self):
# Compare the python implementation with results from the original
diff --git a/lib-python/2.7/test/test_re.py b/lib-python/2.7/test/test_re.py
index 5725a99ad6..ae314841c6 100644
--- a/lib-python/2.7/test/test_re.py
+++ b/lib-python/2.7/test/test_re.py
@@ -3,7 +3,7 @@ from test.test_support import (
verbose, run_unittest, import_module,
precisionbigmemtest, _2G, cpython_only,
captured_stdout, have_unicode, requires_unicode, u,
- check_warnings)
+ check_warnings, check_py3k_warnings)
import locale
import re
from re import Scanner
@@ -58,7 +58,7 @@ class ReTests(unittest.TestCase):
s = r"\1\1"
self.assertEqual(re.sub('(.)', s, 'x'), 'xx')
- self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
+ self.assertEqual(re.sub('(.)', s.replace('\\', r'\\'), 'x'), s)
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx')
@@ -66,11 +66,13 @@ class ReTests(unittest.TestCase):
self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx')
- self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'),
- '\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
- self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a')
- self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'),
- (chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
+ self.assertEqual(re.sub('a', r'\t\n\v\r\f\a\b', 'a'), '\t\n\v\r\f\a\b')
+ self.assertEqual(re.sub('a', '\t\n\v\r\f\a\b', 'a'), '\t\n\v\r\f\a\b')
+ self.assertEqual(re.sub('a', '\t\n\v\r\f\a\b', 'a'),
+ (chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)+chr(8)))
+ for c in 'cdehijklmopqsuwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':
+ with check_py3k_warnings():
+ self.assertEqual(re.sub('a', '\\' + c, 'a'), '\\' + c)
self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest')
@@ -223,11 +225,11 @@ class ReTests(unittest.TestCase):
def test_re_split(self):
self.assertEqual(re.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c'])
- self.assertEqual(re.split(":*", ":a:b::c"), ['', 'a', 'b', 'c'])
- self.assertEqual(re.split("(:*)", ":a:b::c"),
+ self.assertEqual(re.split(":+", ":a:b::c"), ['', 'a', 'b', 'c'])
+ self.assertEqual(re.split("(:+)", ":a:b::c"),
['', ':', 'a', ':', 'b', '::', 'c'])
- self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c'])
- self.assertEqual(re.split("(:)*", ":a:b::c"),
+ self.assertEqual(re.split("(?::+)", ":a:b::c"), ['', 'a', 'b', 'c'])
+ self.assertEqual(re.split("(:)+", ":a:b::c"),
['', ':', 'a', ':', 'b', ':', 'c'])
self.assertEqual(re.split("([b:]+)", ":a:b::c"),
['', ':', 'a', ':b::', 'c'])
@@ -237,13 +239,34 @@ class ReTests(unittest.TestCase):
self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"),
['', 'a', '', '', 'c'])
+ for sep, expected in [
+ (':*', ['', 'a', 'b', 'c']),
+ ('(?::*)', ['', 'a', 'b', 'c']),
+ ('(:*)', ['', ':', 'a', ':', 'b', '::', 'c']),
+ ('(:)*', ['', ':', 'a', ':', 'b', ':', 'c']),
+ ]:
+ with check_py3k_warnings(('', FutureWarning)):
+ self.assertEqual(re.split(sep, ':a:b::c'), expected)
+
+ for sep, expected in [
+ ('', [':a:b::c']),
+ (r'\b', [':a:b::c']),
+ (r'(?=:)', [':a:b::c']),
+ (r'(?<=:)', [':a:b::c']),
+ ]:
+ with check_py3k_warnings():
+ self.assertEqual(re.split(sep, ':a:b::c'), expected)
+
def test_qualified_re_split(self):
self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c'])
self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d'])
self.assertEqual(re.split("(:)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
- self.assertEqual(re.split("(:*)", ":a:b::c", 2),
+ self.assertEqual(re.split("(:+)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
+ with check_py3k_warnings(('', FutureWarning)):
+ self.assertEqual(re.split("(:*)", ":a:b::c", maxsplit=2),
+ ['', ':', 'a', ':', 'b::c'])
def test_re_findall(self):
self.assertEqual(re.findall(":+", "abc"), [])
@@ -404,6 +427,29 @@ class ReTests(unittest.TestCase):
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.UNICODE).group(0), "1aa! a")
+ def test_other_escapes(self):
+ self.assertRaises(re.error, re.compile, "\\")
+ self.assertEqual(re.match(r"\(", '(').group(), '(')
+ self.assertIsNone(re.match(r"\(", ')'))
+ self.assertEqual(re.match(r"\\", '\\').group(), '\\')
+ self.assertEqual(re.match(r"[\]]", ']').group(), ']')
+ self.assertIsNone(re.match(r"[\]]", '['))
+ self.assertEqual(re.match(r"[a\-c]", '-').group(), '-')
+ self.assertIsNone(re.match(r"[a\-c]", 'b'))
+ self.assertEqual(re.match(r"[\^a]+", 'a^').group(), 'a^')
+ self.assertIsNone(re.match(r"[\^a]+", 'b'))
+ re.purge() # for warnings
+ for c in 'ceghijklmopquyzCEFGHIJKLMNOPQRTUVXY':
+ warn = FutureWarning if c in 'Uu' else DeprecationWarning
+ with check_py3k_warnings(('', warn)):
+ self.assertEqual(re.match('\\%c$' % c, c).group(), c)
+ self.assertIsNone(re.match('\\%c' % c, 'a'))
+ for c in 'ceghijklmopquyzABCEFGHIJKLMNOPQRTUVXYZ':
+ warn = FutureWarning if c in 'Uu' else DeprecationWarning
+ with check_py3k_warnings(('', warn)):
+ self.assertEqual(re.match('[\\%c]$' % c, c).group(), c)
+ self.assertIsNone(re.match('[\\%c]' % c, 'a'))
+
def test_string_boundaries(self):
# See http://bugs.python.org/issue10713
self.assertEqual(re.search(r"\b(abc)\b", "abc").group(1),
@@ -931,6 +977,19 @@ class ReTests(unittest.TestCase):
self.assertTrue(re.match('(?ixu) ' + upper_char, lower_char))
self.assertTrue(re.match('(?ixu) ' + lower_char, upper_char))
+ # Incompatibilities
+ re.purge()
+ with check_py3k_warnings():
+ re.compile('', re.LOCALE|re.UNICODE)
+ with check_py3k_warnings():
+ re.compile('(?L)', re.UNICODE)
+ with check_py3k_warnings():
+ re.compile('(?u)', re.LOCALE)
+ with check_py3k_warnings():
+ re.compile('(?Lu)')
+ with check_py3k_warnings():
+ re.compile('(?uL)')
+
def test_dollar_matches_twice(self):
"$ matches the end of string, and just before the terminating \n"
pattern = re.compile('$')
@@ -967,8 +1026,9 @@ class ReTests(unittest.TestCase):
def test_bug_13899(self):
# Issue #13899: re pattern r"[\A]" should work like "A" but matches
# nothing. Ditto B and Z.
- self.assertEqual(re.findall(r'[\A\B\b\C\Z]', 'AB\bCZ'),
- ['A', 'B', '\b', 'C', 'Z'])
+ with check_py3k_warnings():
+ self.assertEqual(re.findall(r'[\A\B\b\C\Z]', 'AB\bCZ'),
+ ['A', 'B', '\b', 'C', 'Z'])
@precisionbigmemtest(size=_2G, memuse=1)
def test_large_search(self, size):
@@ -1261,7 +1321,11 @@ def run_re_tests():
def test_main():
run_unittest(ReTests)
- run_re_tests()
+ deprecations = [
+ ('bad escape', DeprecationWarning),
+ ]
+ with check_py3k_warnings(*deprecations):
+ run_re_tests()
if __name__ == "__main__":
test_main()
diff --git a/lib-python/2.7/test/test_regrtest.py b/lib-python/2.7/test/test_regrtest.py
new file mode 100644
index 0000000000..872ba64611
--- /dev/null
+++ b/lib-python/2.7/test/test_regrtest.py
@@ -0,0 +1,833 @@
+"""
+Tests of regrtest.py.
+
+Note: test_regrtest cannot be run twice in parallel.
+"""
+from __future__ import print_function
+
+import collections
+import errno
+import os.path
+import platform
+import re
+import subprocess
+import sys
+import sysconfig
+import tempfile
+import textwrap
+import unittest
+from test import support
+# Use utils alias to use the same code for TestUtils in master and 2.7 branches
+import regrtest as utils
+
+
+Py_DEBUG = hasattr(sys, 'getobjects')
+ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
+ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR))
+
+TEST_INTERRUPTED = textwrap.dedent("""
+ from signal import SIGINT
+ try:
+ from _testcapi import raise_signal
+ raise_signal(SIGINT)
+ except ImportError:
+ import os
+ os.kill(os.getpid(), SIGINT)
+ """)
+
+
+SubprocessRun = collections.namedtuple('SubprocessRun',
+ 'returncode stdout stderr')
+
+
+class BaseTestCase(unittest.TestCase):
+ TEST_UNIQUE_ID = 1
+ TESTNAME_PREFIX = 'test_regrtest_'
+ TESTNAME_REGEX = r'test_[a-zA-Z0-9_]+'
+
+ def setUp(self):
+ self.testdir = os.path.realpath(os.path.dirname(__file__))
+
+ self.tmptestdir = tempfile.mkdtemp()
+ self.addCleanup(support.rmtree, self.tmptestdir)
+
+ def create_test(self, name=None, code=None):
+ if not name:
+ name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
+ BaseTestCase.TEST_UNIQUE_ID += 1
+
+ if code is None:
+ code = textwrap.dedent("""
+ import unittest
+ from test import support
+
+ class Tests(unittest.TestCase):
+ def test_empty_test(self):
+ pass
+
+ def test_main():
+ support.run_unittest(Tests)
+ """)
+
+ # test_regrtest cannot be run twice in parallel because
+ # of setUp() and create_test()
+ name = self.TESTNAME_PREFIX + name
+ path = os.path.join(self.tmptestdir, name + '.py')
+
+ self.addCleanup(support.unlink, path)
+ # Use O_EXCL to ensure that we do not override existing tests
+ try:
+ fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
+ except OSError as exc:
+ if (exc.errno in (errno.EACCES, errno.EPERM)
+ and not sysconfig.is_python_build()):
+ self.skipTest("cannot write %s: %s" % (path, exc))
+ else:
+ raise
+ else:
+ with os.fdopen(fd, 'w') as fp:
+ fp.write(code)
+ return name
+
+ def regex_search(self, regex, output):
+ match = re.search(regex, output, re.MULTILINE)
+ if not match:
+ self.fail("%r not found in %r" % (regex, output))
+ return match
+
+ def check_line(self, output, regex):
+ regex = re.compile(r'^' + regex, re.MULTILINE)
+ self.assertRegexpMatches(output, regex)
+
+ def parse_executed_tests(self, output):
+ regex = (r'^[0-9]+:[0-9]+:[0-9]+ (?:load avg: [0-9]+\.[0-9]{2} )?\[ *[0-9]+(?:/ *[0-9]+)*\] (%s)'
+ % self.TESTNAME_REGEX)
+ parser = re.finditer(regex, output, re.MULTILINE)
+ return list(match.group(1) for match in parser)
+
+ def check_executed_tests(self, output, tests, skipped=(), failed=(),
+ env_changed=(), omitted=(),
+ rerun=(), no_test_ran=(),
+ randomize=False, interrupted=False,
+ fail_env_changed=False):
+ if isinstance(tests, str):
+ tests = [tests]
+ if isinstance(skipped, str):
+ skipped = [skipped]
+ if isinstance(failed, str):
+ failed = [failed]
+ if isinstance(env_changed, str):
+ env_changed = [env_changed]
+ if isinstance(omitted, str):
+ omitted = [omitted]
+ if isinstance(rerun, str):
+ rerun = [rerun]
+ if isinstance(no_test_ran, str):
+ no_test_ran = [no_test_ran]
+
+ executed = self.parse_executed_tests(output)
+ if randomize:
+ self.assertEqual(set(executed), set(tests), output)
+ else:
+ self.assertEqual(executed, tests, (executed, tests, output))
+
+ def plural(count):
+ return 's' if count != 1 else ''
+
+ def list_regex(line_format, tests):
+ count = len(tests)
+ names = ' '.join(sorted(tests))
+ regex = line_format % (count, plural(count))
+ regex = r'%s:\n %s$' % (regex, names)
+ return regex
+
+ if skipped:
+ regex = list_regex('%s test%s skipped', skipped)
+ self.check_line(output, regex)
+
+ if failed:
+ regex = list_regex('%s test%s failed', failed)
+ self.check_line(output, regex)
+
+ if env_changed:
+ regex = list_regex('%s test%s altered the execution environment',
+ env_changed)
+ self.check_line(output, regex)
+
+ if omitted:
+ regex = list_regex('%s test%s omitted', omitted)
+ self.check_line(output, regex)
+
+ if rerun:
+ regex = list_regex('%s re-run test%s', rerun)
+ self.check_line(output, regex)
+ self.check_line(output, "Re-running failed tests in verbose mode")
+ for name in rerun:
+ regex = "Re-running test %r in verbose mode" % name
+ self.check_line(output, regex)
+
+ good = (len(tests) - len(skipped) - len(failed)
+ - len(omitted) - len(env_changed) - len(no_test_ran))
+ if good:
+ regex = r'%s test%s OK\.$' % (good, plural(good))
+ if not skipped and not failed and good > 1:
+ regex = 'All %s' % regex
+ self.check_line(output, regex)
+
+ if interrupted:
+ self.check_line(output, 'Test suite interrupted by signal SIGINT.')
+
+ result = []
+ if failed:
+ result.append('FAILURE')
+ elif fail_env_changed and env_changed:
+ result.append('ENV CHANGED')
+ if interrupted:
+ result.append('INTERRUPTED')
+ if not any((good, result, failed, interrupted, skipped,
+ env_changed, fail_env_changed)):
+ result.append("NO TEST RUN")
+ elif not result:
+ result.append('SUCCESS')
+ result = ', '.join(result)
+ if rerun:
+ self.check_line(output, 'Tests result: %s' % result)
+ result = 'FAILURE then %s' % result
+
+ self.check_line(output, 'Tests result: %s' % result)
+
+ def parse_random_seed(self, output):
+ match = self.regex_search(r'Using random seed ([0-9]+)', output)
+ randseed = int(match.group(1))
+ self.assertTrue(0 <= randseed <= 10000000, randseed)
+ return randseed
+
+ def run_command(self, args, input=None, exitcode=0, **kw):
+ if not input:
+ input = ''
+ if 'stderr' not in kw:
+ kw['stderr'] = subprocess.PIPE
+ proc = subprocess.Popen(args,
+ universal_newlines=True,
+ stdout=subprocess.PIPE,
+ **kw)
+ stdout, stderr = proc.communicate(input=input)
+ if proc.returncode != exitcode:
+ msg = ("Command %s failed with exit code %s\n"
+ "\n"
+ "stdout:\n"
+ "---\n"
+ "%s\n"
+ "---\n"
+ % (str(args), proc.returncode, stdout))
+ if proc.stderr:
+ msg += ("\n"
+ "stderr:\n"
+ "---\n"
+ "%s"
+ "---\n"
+ % stderr)
+ self.fail(msg)
+ return SubprocessRun(proc.returncode, stdout, stderr)
+
+ def run_python(self, args, **kw):
+ args = [sys.executable] + list(args)
+ proc = self.run_command(args, **kw)
+ return proc.stdout
+
+
+class ProgramsTestCase(BaseTestCase):
+ """
+ Test various ways to run the Python test suite. Use options close
+ to options used on the buildbot.
+ """
+
+ NTEST = 4
+
+ def setUp(self):
+ super(ProgramsTestCase, self).setUp()
+
+ # Create NTEST tests doing nothing
+ self.tests = [self.create_test() for index in range(self.NTEST)]
+
+ self.python_args = ['-Wd', '-3', '-E', '-bb', '-tt']
+ self.regrtest_args = ['-uall', '-rwW',
+ '--testdir=%s' % self.tmptestdir]
+
+ def check_output(self, output):
+ self.parse_random_seed(output)
+ self.check_executed_tests(output, self.tests, randomize=True)
+
+ def run_tests(self, args):
+ output = self.run_python(args)
+ self.check_output(output)
+
+ def test_script_regrtest(self):
+ # Lib/test/regrtest.py
+ script = os.path.join(self.testdir, 'regrtest.py')
+
+ args = self.python_args + [script] + self.regrtest_args + self.tests
+ self.run_tests(args)
+
+ def test_module_test(self):
+ # -m test
+ args = self.python_args + ['-m', 'test'] + self.regrtest_args + self.tests
+ self.run_tests(args)
+
+ def test_module_regrtest(self):
+ # -m test.regrtest
+ args = self.python_args + ['-m', 'test.regrtest'] + self.regrtest_args + self.tests
+ self.run_tests(args)
+
+ def test_module_autotest(self):
+ # -m test.autotest
+ args = self.python_args + ['-m', 'test.autotest'] + self.regrtest_args + self.tests
+ self.run_tests(args)
+
+ def test_module_from_test_autotest(self):
+ # from test import autotest
+ code = 'from test import autotest'
+ args = self.python_args + ['-c', code] + self.regrtest_args + self.tests
+ self.run_tests(args)
+
+ def test_script_autotest(self):
+ # Lib/test/autotest.py
+ script = os.path.join(self.testdir, 'autotest.py')
+ args = self.python_args + [script] + self.regrtest_args + self.tests
+ self.run_tests(args)
+
+ def run_batch(self, *args):
+ proc = self.run_command(args)
+ self.check_output(proc.stdout)
+
+ def need_pcbuild(self):
+ exe = os.path.normpath(os.path.abspath(sys.executable))
+ parts = exe.split(os.path.sep)
+ if len(parts) < 3:
+ # it's not a python build, python is likely to be installed
+ return
+
+ build_dir = parts[-3]
+ if build_dir.lower() != 'pcbuild':
+ self.skipTest("Tools/buildbot/test.bat requires PCbuild build, "
+ "found %s" % build_dir)
+
+ @unittest.skipUnless(sysconfig.is_python_build(),
+ 'test.bat script is not installed')
+ @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
+ def test_tools_buildbot_test(self):
+ self.need_pcbuild()
+
+ # Tools\buildbot\test.bat
+ script = os.path.join(ROOT_DIR, 'Tools', 'buildbot', 'test.bat')
+ test_args = ['--testdir=%s' % self.tmptestdir]
+ if platform.architecture()[0] == '64bit':
+ test_args.append('-x64') # 64-bit build
+ if not Py_DEBUG:
+ test_args.append('+d') # Release build, use python.exe
+
+ args = [script] + test_args + self.tests
+ self.run_batch(*args)
+
+ @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
+ def test_pcbuild_rt(self):
+ self.need_pcbuild()
+
+ # PCbuild\rt.bat
+ script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
+ rt_args = ["-q"] # Quick, don't run tests twice
+ if platform.architecture()[0] == '64bit':
+ rt_args.append('-x64') # 64-bit build
+ if Py_DEBUG:
+ rt_args.append('-d') # Debug build, use python_d.exe
+ args = [script] + rt_args + self.regrtest_args + self.tests
+ self.run_batch(*args)
+
+
+class ArgsTestCase(BaseTestCase):
+ """
+ Test arguments of the Python test suite.
+ """
+
+ def run_tests(self, *testargs, **kw):
+ cmdargs = ('-m', 'test', '--testdir=%s' % self.tmptestdir) + testargs
+ return self.run_python(cmdargs, **kw)
+
+ def test_failing_test(self):
+ # test a failing test
+ code = textwrap.dedent("""
+ import unittest
+ from test import support
+
+ class FailingTest(unittest.TestCase):
+ def test_failing(self):
+ self.fail("bug")
+
+ def test_main():
+ support.run_unittest(FailingTest)
+ """)
+ test_ok = self.create_test('ok')
+ test_failing = self.create_test('failing', code=code)
+ tests = [test_ok, test_failing]
+
+ output = self.run_tests(*tests, exitcode=2)
+ self.check_executed_tests(output, tests, failed=test_failing)
+
+ def test_resources(self):
+ # test -u command line option
+ tests = {}
+ for resource in ('audio', 'network'):
+ code = textwrap.dedent("""
+ from test import support; support.requires(%r)
+ import unittest
+ class PassingTest(unittest.TestCase):
+ def test_pass(self):
+ pass
+
+ def test_main():
+ support.run_unittest(PassingTest)
+ """ % resource)
+
+ tests[resource] = self.create_test(resource, code)
+ test_names = sorted(tests.values())
+
+ # -u all: 2 resources enabled
+ output = self.run_tests('-u', 'all', *test_names)
+ self.check_executed_tests(output, test_names)
+
+ # -u audio: 1 resource enabled
+ output = self.run_tests('-uaudio', *test_names)
+ self.check_executed_tests(output, test_names,
+ skipped=tests['network'])
+
+ # no option: 0 resources enabled
+ output = self.run_tests(*test_names)
+ self.check_executed_tests(output, test_names,
+ skipped=test_names)
+
+ def test_random(self):
+ # test -r and --randseed command line option
+ code = textwrap.dedent("""
+ import random
+ print("TESTRANDOM: %s" % random.randint(1, 1000))
+ """)
+ test = self.create_test('random', code)
+
+ # first run to get the output with the random seed
+ output = self.run_tests('-r', '-v', test)
+ randseed = self.parse_random_seed(output)
+ match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
+ test_random = int(match.group(1))
+
+ # try to reproduce with the random seed
+ output = self.run_tests('-r', '-v', '--randseed=%s' % randseed, test)
+ randseed2 = self.parse_random_seed(output)
+ self.assertEqual(randseed2, randseed)
+
+ match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
+ test_random2 = int(match.group(1))
+ self.assertEqual(test_random2, test_random)
+
+ def test_fromfile(self):
+ # test --fromfile
+ tests = [self.create_test() for index in range(5)]
+
+ # Write the list of files using a format similar to regrtest output:
+ # [1/2] test_1
+ # [2/2] test_2
+ filename = support.TESTFN
+ self.addCleanup(support.unlink, filename)
+
+ # test format 'test_opcodes'
+ with open(filename, "w") as fp:
+ for name in tests:
+ print(name, file=fp)
+
+ output = self.run_tests('--fromfile', filename)
+ self.check_executed_tests(output, tests)
+
+ def test_interrupted(self):
+ code = TEST_INTERRUPTED
+ test = self.create_test('sigint', code=code)
+ output = self.run_tests(test, exitcode=130)
+ self.check_executed_tests(output, test, omitted=test,
+ interrupted=True)
+
+ def test_slowest(self):
+ # test --slow
+ tests = [self.create_test() for index in range(3)]
+ output = self.run_tests("--slowest", *tests)
+ self.check_executed_tests(output, tests)
+ regex = ('10 slowest tests:\n'
+ '(?:- %s: .*\n){%s}'
+ % (self.TESTNAME_REGEX, len(tests)))
+ self.check_line(output, regex)
+
+ def test_slow_interrupted(self):
+ # Issue #25373: test --slowest with an interrupted test
+ code = TEST_INTERRUPTED
+ test = self.create_test("sigint", code=code)
+
+ try:
+ import threading
+ tests = (False, True)
+ except ImportError:
+ tests = (False,)
+ for multiprocessing in tests:
+ if multiprocessing:
+ args = ("--slowest", "-j2", test)
+ else:
+ args = ("--slowest", test)
+ output = self.run_tests(*args, exitcode=130)
+ self.check_executed_tests(output, test,
+ omitted=test, interrupted=True)
+
+ regex = ('10 slowest tests:\n')
+ self.check_line(output, regex)
+
+ def test_coverage(self):
+ # test --coverage
+ test = self.create_test('coverage')
+ output = self.run_tests("--coverage", test)
+ self.check_executed_tests(output, [test])
+ regex = (r'lines +cov% +module +\(path\)\n'
+ r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
+ self.check_line(output, regex)
+
+ def test_forever(self):
+ # test --forever
+ code = textwrap.dedent("""
+ import __builtin__
+ import unittest
+ from test import support
+
+ class ForeverTester(unittest.TestCase):
+ def test_run(self):
+ # Store the state in the __builtin__ module, because the test
+ # module is reload at each run
+ if 'RUN' in __builtin__.__dict__:
+ __builtin__.__dict__['RUN'] += 1
+ if __builtin__.__dict__['RUN'] >= 3:
+ self.fail("fail at the 3rd runs")
+ else:
+ __builtin__.__dict__['RUN'] = 1
+
+ def test_main():
+ support.run_unittest(ForeverTester)
+ """)
+ test = self.create_test('forever', code=code)
+ output = self.run_tests('--forever', test, exitcode=2)
+ self.check_executed_tests(output, [test]*3, failed=test)
+
+ def check_leak(self, code, what):
+ test = self.create_test('huntrleaks', code=code)
+
+ filename = 'reflog.txt'
+ self.addCleanup(support.unlink, filename)
+ output = self.run_tests('--huntrleaks', '3:3:', test,
+ exitcode=2,
+ stderr=subprocess.STDOUT)
+ self.check_executed_tests(output, [test], failed=test)
+
+ line = 'beginning 6 repetitions\n123456\n......\n'
+ self.check_line(output, re.escape(line))
+
+ line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what)
+ self.assertIn(line2, output)
+
+ with open(filename) as fp:
+ reflog = fp.read()
+ self.assertIn(line2, reflog)
+
+ @unittest.skipUnless(Py_DEBUG, 'need a debug build')
+ @support.requires_type_collecting
+ def test_huntrleaks(self):
+ # test --huntrleaks
+ code = textwrap.dedent("""
+ import unittest
+ from test import support
+
+ GLOBAL_LIST = []
+
+ class RefLeakTest(unittest.TestCase):
+ def test_leak(self):
+ GLOBAL_LIST.append(object())
+
+ def test_main():
+ support.run_unittest(RefLeakTest)
+ """)
+ self.check_leak(code, 'references')
+
+ @unittest.skipUnless(Py_DEBUG, 'need a debug build')
+ def test_huntrleaks_fd_leak(self):
+ # test --huntrleaks for file descriptor leak
+ code = textwrap.dedent("""
+ import os
+ import unittest
+ from test import support
+
+ class FDLeakTest(unittest.TestCase):
+ def test_leak(self):
+ fd = os.open(__file__, os.O_RDONLY)
+ # bug: never close the file descriptor
+
+ def test_main():
+ support.run_unittest(FDLeakTest)
+ """)
+ self.check_leak(code, 'file descriptors')
+
+ def test_list_tests(self):
+ # test --list-tests
+ tests = [self.create_test() for i in range(5)]
+ output = self.run_tests('--list-tests', *tests)
+ self.assertEqual(output.rstrip().splitlines(),
+ tests)
+
+ def test_list_cases(self):
+ # test --list-cases
+ code = textwrap.dedent("""
+ import unittest
+
+ class Tests(unittest.TestCase):
+ def test_method1(self):
+ pass
+ def test_method2(self):
+ pass
+ """)
+ testname = self.create_test(code=code)
+
+ # Test --list-cases
+ all_methods = ['%s.Tests.test_method1' % testname,
+ '%s.Tests.test_method2' % testname]
+ output = self.run_tests('--list-cases', testname)
+ self.assertEqual(output.splitlines(), all_methods)
+
+ # Test --list-cases with --match
+ all_methods = ['%s.Tests.test_method1' % testname]
+ output = self.run_tests('--list-cases',
+ '-m', 'test_method1',
+ testname)
+ self.assertEqual(output.splitlines(), all_methods)
+
+ @unittest.skipIf(sys.platform.startswith('aix'),
+ "support._crash_python() doesn't work on AIX")
+ def test_crashed(self):
+ # Any code which causes a crash
+ code = 'import test.support; test.support._crash_python()'
+ crash_test = self.create_test(name="crash", code=code)
+ ok_test = self.create_test(name="ok")
+
+ tests = [crash_test, ok_test]
+ output = self.run_tests("-j2", *tests, exitcode=2)
+ self.check_executed_tests(output, tests, failed=crash_test,
+ randomize=True)
+
+ def parse_methods(self, output):
+ regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
+ return [match.group(1) for match in regex.finditer(output)]
+
+ def test_matchfile(self):
+ # Any code which causes a crash
+ code = textwrap.dedent("""
+ import unittest
+ from test import support
+
+ class Tests(unittest.TestCase):
+ def test_method1(self):
+ pass
+ def test_method2(self):
+ pass
+ def test_method3(self):
+ pass
+ def test_method4(self):
+ pass
+
+ def test_main():
+ support.run_unittest(Tests)
+ """)
+ all_methods = ['test_method1', 'test_method2',
+ 'test_method3', 'test_method4']
+ testname = self.create_test(code=code)
+
+ # by default, all methods should be run
+ output = self.run_tests("-v", testname)
+ methods = self.parse_methods(output)
+ self.assertEqual(methods, all_methods)
+
+ # only run a subset
+ filename = support.TESTFN
+ self.addCleanup(support.unlink, filename)
+
+ subset = [
+ # only match the method name
+ 'test_method1',
+ # match the full identifier
+ '%s.Tests.test_method3' % testname]
+ with open(filename, "w") as fp:
+ for name in subset:
+ print(name, file=fp)
+
+ output = self.run_tests("-v", "--matchfile", filename, testname)
+ methods = self.parse_methods(output)
+ subset = ['test_method1', 'test_method3']
+ self.assertEqual(methods, subset)
+
+ # bpo-34021: The test fails randomly for an unknown reason
+ # on "x86 Windows XP VS9.0 2.7" buildbot worker.
+ @unittest.skipIf(sys.platform == "win32", "test fails randomly on Windows")
+ def test_env_changed(self):
+ code = textwrap.dedent("""
+ import unittest
+ from test import support
+
+ class Tests(unittest.TestCase):
+ def test_env_changed(self):
+ open("env_changed", "w").close()
+
+ def test_main():
+ support.run_unittest(Tests)
+ """)
+ testname = self.create_test(code=code)
+
+ # don't fail by default
+ output = self.run_tests(testname)
+ self.check_executed_tests(output, [testname], env_changed=testname)
+
+ # fail with --fail-env-changed
+ output = self.run_tests("--fail-env-changed", testname, exitcode=3)
+ self.check_executed_tests(output, [testname], env_changed=testname,
+ fail_env_changed=True)
+
+ def test_rerun_fail(self):
+ code = textwrap.dedent("""
+ import unittest
+ from test import support
+
+ class Tests(unittest.TestCase):
+ def test_bug(self):
+ # test always fail
+ self.fail("bug")
+
+ def test_main():
+ support.run_unittest(Tests)
+ """)
+ testname = self.create_test(code=code)
+
+ output = self.run_tests("-w", testname, exitcode=2)
+ self.check_executed_tests(output, [testname],
+ failed=testname, rerun=testname)
+
+ def test_no_tests_ran(self):
+ code = textwrap.dedent("""
+ import unittest
+ from test import support
+
+ class Tests(unittest.TestCase):
+ def test_bug(self):
+ pass
+
+ def test_main():
+ support.run_unittest(Tests)
+ """)
+ testname = self.create_test(code=code)
+
+ output = self.run_tests("-m", "nosuchtest", testname, exitcode=0)
+ self.check_executed_tests(output, [testname], no_test_ran=testname)
+
+ def test_no_tests_ran_skip(self):
+ code = textwrap.dedent("""
+ import unittest
+
+ class Tests(unittest.TestCase):
+ def test_skipped(self):
+ self.skipTest("because")
+ """)
+ testname = self.create_test(code=code)
+
+ output = self.run_tests(testname, exitcode=0)
+ self.check_executed_tests(output, [testname])
+
+ def test_no_tests_ran_multiple_tests_nonexistent(self):
+ code = textwrap.dedent("""
+ import unittest
+ from test import support
+
+ class Tests(unittest.TestCase):
+ def test_bug(self):
+ pass
+
+ def test_main():
+ support.run_unittest(Tests)
+ """)
+ testname = self.create_test(code=code)
+ testname2 = self.create_test(code=code)
+
+ output = self.run_tests("-m", "nosuchtest",
+ testname, testname2,
+ exitcode=0)
+ self.check_executed_tests(output, [testname, testname2],
+ no_test_ran=[testname, testname2])
+
+ def test_no_test_ran_some_test_exist_some_not(self):
+ code = textwrap.dedent("""
+ import unittest
+ from test import support
+
+ class Tests(unittest.TestCase):
+ def test_bug(self):
+ pass
+
+ def test_main():
+ support.run_unittest(Tests)
+ """)
+ testname = self.create_test(code=code)
+ other_code = textwrap.dedent("""
+ import unittest
+ from test import support
+
+ class Tests(unittest.TestCase):
+ def test_other_bug(self):
+ pass
+
+ def test_main():
+ support.run_unittest(Tests)
+ """)
+ testname2 = self.create_test(code=other_code)
+
+ output = self.run_tests("-m", "nosuchtest", "-m", "test_other_bug",
+ testname, testname2,
+ exitcode=0)
+ self.check_executed_tests(output, [testname, testname2],
+ no_test_ran=[testname])
+
+
+class TestUtils(unittest.TestCase):
+ def test_format_duration(self):
+ self.assertEqual(utils.format_duration(0),
+ '0 ms')
+ self.assertEqual(utils.format_duration(1e-9),
+ '1 ms')
+ self.assertEqual(utils.format_duration(10e-3),
+ '10 ms')
+ self.assertEqual(utils.format_duration(1.5),
+ '1 sec 500 ms')
+ self.assertEqual(utils.format_duration(1),
+ '1 sec')
+ self.assertEqual(utils.format_duration(2 * 60),
+ '2 min')
+ self.assertEqual(utils.format_duration(2 * 60 + 1),
+ '2 min 1 sec')
+ self.assertEqual(utils.format_duration(3 * 3600),
+ '3 hour')
+ self.assertEqual(utils.format_duration(3 * 3600 + 2 * 60 + 1),
+ '3 hour 2 min')
+ self.assertEqual(utils.format_duration(3 * 3600 + 1),
+ '3 hour 1 sec')
+
+
+def test_main():
+ support.run_unittest(ProgramsTestCase, ArgsTestCase, TestUtils)
+
+
+if __name__ == "__main__":
+ test_main()
diff --git a/lib-python/2.7/test/test_robotparser.py b/lib-python/2.7/test/test_robotparser.py
index e4b01f1e30..ba7ccf8b58 100644
--- a/lib-python/2.7/test/test_robotparser.py
+++ b/lib-python/2.7/test/test_robotparser.py
@@ -1,296 +1,260 @@
-import unittest, StringIO, robotparser
-from test import test_support
-from urllib2 import urlopen, HTTPError
-
-HAVE_HTTPS = True
+import os
+import robotparser
+import unittest
+from test import support
+from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
+import StringIO
try:
- from urllib2 import HTTPSHandler
+ import threading
except ImportError:
- HAVE_HTTPS = False
-
-class RobotTestCase(unittest.TestCase):
- def __init__(self, index, parser, url, good, agent):
- unittest.TestCase.__init__(self)
- if good:
- self.str = "RobotTest(%d, good, %s)" % (index, url)
- else:
- self.str = "RobotTest(%d, bad, %s)" % (index, url)
- self.parser = parser
- self.url = url
- self.good = good
- self.agent = agent
-
- def runTest(self):
- if isinstance(self.url, tuple):
- agent, url = self.url
- else:
- url = self.url
- agent = self.agent
- if self.good:
- self.assertTrue(self.parser.can_fetch(agent, url))
- else:
- self.assertFalse(self.parser.can_fetch(agent, url))
+ threading = None
- def __str__(self):
- return self.str
-tests = unittest.TestSuite()
+class BaseRobotTest:
+ robots_txt = ''
+ agent = 'test_robotparser'
+ good = []
+ bad = []
-def RobotTest(index, robots_txt, good_urls, bad_urls,
- agent="test_robotparser"):
+ def setUp(self):
+ lines = StringIO.StringIO(self.robots_txt).readlines()
+ self.parser = robotparser.RobotFileParser()
+ self.parser.parse(lines)
- lines = StringIO.StringIO(robots_txt).readlines()
- parser = robotparser.RobotFileParser()
- parser.parse(lines)
- for url in good_urls:
- tests.addTest(RobotTestCase(index, parser, url, 1, agent))
- for url in bad_urls:
- tests.addTest(RobotTestCase(index, parser, url, 0, agent))
+ def get_agent_and_url(self, url):
+ if isinstance(url, tuple):
+ agent, url = url
+ return agent, url
+ return self.agent, url
-# Examples from http://www.robotstxt.org/wc/norobots.html (fetched 2002)
-
-# 1.
-doc = """
-User-agent: *
-Disallow: /cyberworld/map/ # This is an infinite virtual URL space
-Disallow: /tmp/ # these will soon disappear
-Disallow: /foo.html
-"""
-
-good = ['/','/test.html']
-bad = ['/cyberworld/map/index.html','/tmp/xxx','/foo.html']
+ def test_good_urls(self):
+ for url in self.good:
+ agent, url = self.get_agent_and_url(url)
+ self.assertTrue(self.parser.can_fetch(agent, url))
-RobotTest(1, doc, good, bad)
+ def test_bad_urls(self):
+ for url in self.bad:
+ agent, url = self.get_agent_and_url(url)
+ self.assertFalse(self.parser.can_fetch(agent, url))
-# 2.
-doc = """
-# robots.txt for http://www.example.com/
+class UserAgentWildcardTest(BaseRobotTest, unittest.TestCase):
+ robots_txt = """\
User-agent: *
Disallow: /cyberworld/map/ # This is an infinite virtual URL space
+Disallow: /tmp/ # these will soon disappear
+Disallow: /foo.html
+ """
+ good = ['/', '/test.html']
+ bad = ['/cyberworld/map/index.html', '/tmp/xxx', '/foo.html']
-# Cybermapper knows where to go.
-User-agent: cybermapper
-Disallow:
-
-"""
-
-good = ['/','/test.html',('cybermapper','/cyberworld/map/index.html')]
-bad = ['/cyberworld/map/index.html']
-
-RobotTest(2, doc, good, bad)
-# 3.
-doc = """
+class RejectAllRobotsTest(BaseRobotTest, unittest.TestCase):
+ robots_txt = """\
# go away
User-agent: *
Disallow: /
-"""
-
-good = []
-bad = ['/cyberworld/map/index.html','/','/tmp/']
+ """
+ good = []
+ bad = ['/cyberworld/map/index.html', '/', '/tmp/']
-RobotTest(3, doc, good, bad)
-# Examples from http://www.robotstxt.org/wc/norobots-rfc.html (fetched 2002)
-
-# 4.
-doc = """
-User-agent: figtree
-Disallow: /tmp
-Disallow: /a%3cd.html
-Disallow: /a%2fb.html
-Disallow: /%7ejoe/index.html
-"""
-
-good = [] # XFAIL '/a/b.html'
-bad = ['/tmp','/tmp.html','/tmp/a.html',
- '/a%3cd.html','/a%3Cd.html','/a%2fb.html',
- '/~joe/index.html'
- ]
-
-RobotTest(4, doc, good, bad, 'figtree')
-RobotTest(5, doc, good, bad, 'FigTree Robot libwww-perl/5.04')
-
-# 6.
-doc = """
-User-agent: *
-Disallow: /tmp/
-Disallow: /a%3Cd.html
-Disallow: /a/b.html
-Disallow: /%7ejoe/index.html
-"""
-
-good = ['/tmp',] # XFAIL: '/a%2fb.html'
-bad = ['/tmp/','/tmp/a.html',
- '/a%3cd.html','/a%3Cd.html',"/a/b.html",
- '/%7Ejoe/index.html']
-
-RobotTest(6, doc, good, bad)
-
-# From bug report #523041
+class UserAgentOrderingTest(BaseRobotTest, unittest.TestCase):
+ # the order of User-agent should be correct. note
+ # that this file is incorrect because "Googlebot" is a
+ # substring of "Googlebot-Mobile"
+ robots_txt = """\
+User-agent: Googlebot
+Disallow: /
-# 7.
-doc = """
-User-Agent: *
-Disallow: /.
-"""
+User-agent: Googlebot-Mobile
+Allow: /
+ """
+ agent = 'Googlebot'
+ bad = ['/something.jpg']
-good = ['/foo.html']
-bad = [] # Bug report says "/" should be denied, but that is not in the RFC
-RobotTest(7, doc, good, bad)
+class UserAgentGoogleMobileTest(UserAgentOrderingTest):
+ agent = 'Googlebot-Mobile'
-# From Google: http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=40364
-# 8.
-doc = """
+class GoogleURLOrderingTest(BaseRobotTest, unittest.TestCase):
+ # Google also got the order wrong. You need
+ # to specify the URLs from more specific to more general
+ robots_txt = """\
User-agent: Googlebot
Allow: /folder1/myfile.html
Disallow: /folder1/
-"""
+ """
+ agent = 'googlebot'
+ good = ['/folder1/myfile.html']
+ bad = ['/folder1/anotherfile.html']
-good = ['/folder1/myfile.html']
-bad = ['/folder1/anotherfile.html']
-RobotTest(8, doc, good, bad, agent="Googlebot")
+class DisallowQueryStringTest(BaseRobotTest, unittest.TestCase):
+ # see issue #6325 for details
+ robots_txt = """\
+User-agent: *
+Disallow: /some/path?name=value
+ """
+ good = ['/some/path']
+ bad = ['/some/path?name=value']
-# 9. This file is incorrect because "Googlebot" is a substring of
-# "Googlebot-Mobile", so test 10 works just like test 9.
-doc = """
-User-agent: Googlebot
-Disallow: /
-User-agent: Googlebot-Mobile
-Allow: /
-"""
-
-good = []
-bad = ['/something.jpg']
+class UseFirstUserAgentWildcardTest(BaseRobotTest, unittest.TestCase):
+ # obey first * entry (#4108)
+ robots_txt = """\
+User-agent: *
+Disallow: /some/path
-RobotTest(9, doc, good, bad, agent="Googlebot")
+User-agent: *
+Disallow: /another/path
+ """
+ good = ['/another/path']
+ bad = ['/some/path']
-good = []
-bad = ['/something.jpg']
-RobotTest(10, doc, good, bad, agent="Googlebot-Mobile")
+class EmptyQueryStringTest(BaseRobotTest, unittest.TestCase):
+ # normalize the URL first (#17403)
+ robots_txt = """\
+User-agent: *
+Allow: /some/path?
+Disallow: /another/path?
+ """
+ good = ['/some/path?']
+ bad = ['/another/path?']
-# 11. Get the order correct.
-doc = """
-User-agent: Googlebot-Mobile
-Allow: /
-User-agent: Googlebot
-Disallow: /
-"""
+class DefaultEntryTest(BaseRobotTest, unittest.TestCase):
+ robots_txt = """\
+User-agent: *
+Crawl-delay: 1
+Request-rate: 3/15
+Disallow: /cyberworld/map/
+ """
+ good = ['/', '/test.html']
+ bad = ['/cyberworld/map/index.html']
-good = []
-bad = ['/something.jpg']
-RobotTest(11, doc, good, bad, agent="Googlebot")
+class StringFormattingTest(BaseRobotTest, unittest.TestCase):
+ robots_txt = """\
+User-agent: *
+Crawl-delay: 1
+Request-rate: 3/15
+Disallow: /cyberworld/map/ # This is an infinite virtual URL space
-good = ['/something.jpg']
-bad = []
+# Cybermapper knows where to go.
+User-agent: cybermapper
+Disallow: /some/path
+ """
-RobotTest(12, doc, good, bad, agent="Googlebot-Mobile")
+ expected_output = """\
+User-agent: cybermapper
+Disallow: /some/path
+User-agent: *
+Disallow: /cyberworld/map/
-# 13. Google also got the order wrong in #8. You need to specify the
-# URLs from more specific to more general.
-doc = """
-User-agent: Googlebot
-Allow: /folder1/myfile.html
-Disallow: /folder1/
"""
-good = ['/folder1/myfile.html']
-bad = ['/folder1/anotherfile.html']
+ def test_string_formatting(self):
+ self.assertEqual(str(self.parser), self.expected_output)
-RobotTest(13, doc, good, bad, agent="googlebot")
+class RobotHandler(BaseHTTPRequestHandler):
-# 14. For issue #6325 (query string support)
-doc = """
-User-agent: *
-Disallow: /some/path?name=value
-"""
+ def do_GET(self):
+ self.send_error(403, "Forbidden access")
-good = ['/some/path']
-bad = ['/some/path?name=value']
+ def log_message(self, format, *args):
+ pass
-RobotTest(14, doc, good, bad)
-# 15. For issue #4108 (obey first * entry)
-doc = """
-User-agent: *
-Disallow: /some/path
+@unittest.skipUnless(threading, 'threading required for this test')
+class PasswordProtectedSiteTestCase(unittest.TestCase):
-User-agent: *
-Disallow: /another/path
-"""
-
-good = ['/another/path']
-bad = ['/some/path']
-
-RobotTest(15, doc, good, bad)
+ def setUp(self):
+ self.server = HTTPServer((support.HOST, 0), RobotHandler)
-# 16. Empty query (issue #17403). Normalizing the url first.
-doc = """
-User-agent: *
-Allow: /some/path?
-Disallow: /another/path?
-"""
+ self.t = threading.Thread(
+ name='HTTPServer serving',
+ target=self.server.serve_forever,
+ # Short poll interval to make the test finish quickly.
+ # Time between requests is short enough that we won't wake
+ # up spuriously too many times.
+ kwargs={'poll_interval':0.01})
+ self.t.daemon = True # In case this function raises.
+ self.t.start()
-good = ['/some/path?']
-bad = ['/another/path?']
+ def tearDown(self):
+ self.server.shutdown()
+ self.t.join()
+ self.server.server_close()
-RobotTest(16, doc, good, bad)
+ @support.reap_threads
+ def testPasswordProtectedSite(self):
+ addr = self.server.server_address
+ url = 'http://' + support.HOST + ':' + str(addr[1])
+ robots_url = url + "/robots.txt"
+ parser = robotparser.RobotFileParser()
+ parser.set_url(url)
+ parser.read()
+ self.assertFalse(parser.can_fetch("*", robots_url))
class NetworkTestCase(unittest.TestCase):
- def testPasswordProtectedSite(self):
- test_support.requires('network')
- with test_support.transient_internet('mueblesmoraleda.com'):
- url = 'http://mueblesmoraleda.com'
- robots_url = url + "/robots.txt"
- # First check the URL is usable for our purposes, since the
- # test site is a bit flaky.
- try:
- urlopen(robots_url)
- except HTTPError as e:
- if e.code not in {401, 403}:
- self.skipTest(
- "%r should return a 401 or 403 HTTP error, not %r"
- % (robots_url, e.code))
- else:
- self.skipTest(
- "%r should return a 401 or 403 HTTP error, not succeed"
- % (robots_url))
- parser = robotparser.RobotFileParser()
- parser.set_url(url)
- try:
- parser.read()
- except IOError:
- self.skipTest('%s is unavailable' % url)
- self.assertEqual(parser.can_fetch("*", robots_url), False)
-
- @unittest.skipUnless(HAVE_HTTPS, 'need SSL support to download license')
- @test_support.system_must_validate_cert
- def testPythonOrg(self):
- test_support.requires('network')
- with test_support.transient_internet('www.python.org'):
- parser = robotparser.RobotFileParser(
- "https://www.python.org/robots.txt")
- parser.read()
- self.assertTrue(
- parser.can_fetch("*", "https://www.python.org/robots.txt"))
+ base_url = 'http://www.pythontest.net/'
+ robots_txt = '{}elsewhere/robots.txt'.format(base_url)
+
+ @classmethod
+ def setUpClass(cls):
+ support.requires('network')
+ with support.transient_internet(cls.base_url):
+ cls.parser = robotparser.RobotFileParser(cls.robots_txt)
+ cls.parser.read()
+
+ def url(self, path):
+ return '{}{}{}'.format(
+ self.base_url, path, '/' if not os.path.splitext(path)[1] else ''
+ )
+
+ def test_basic(self):
+ self.assertFalse(self.parser.disallow_all)
+ self.assertFalse(self.parser.allow_all)
+ self.assertGreater(self.parser.mtime(), 0)
+
+ def test_can_fetch(self):
+ self.assertTrue(self.parser.can_fetch('*', self.url('elsewhere')))
+ self.assertFalse(self.parser.can_fetch('Nutch', self.base_url))
+ self.assertFalse(self.parser.can_fetch('Nutch', self.url('brian')))
+ self.assertFalse(self.parser.can_fetch('Nutch', self.url('webstats')))
+ self.assertFalse(self.parser.can_fetch('*', self.url('webstats')))
+ self.assertTrue(self.parser.can_fetch('*', self.base_url))
+
+ def test_read_404(self):
+ parser = robotparser.RobotFileParser(self.url('i-robot.txt'))
+ parser.read()
+ self.assertTrue(parser.allow_all)
+ self.assertFalse(parser.disallow_all)
+ self.assertEqual(parser.mtime(), 0)
def test_main():
- test_support.run_unittest(tests)
- test_support.run_unittest(NetworkTestCase)
-
-if __name__=='__main__':
- test_support.verbose = 1
+ support.run_unittest(
+ UserAgentWildcardTest,
+ RejectAllRobotsTest,
+ UserAgentOrderingTest,
+ UserAgentGoogleMobileTest,
+ GoogleURLOrderingTest,
+ DisallowQueryStringTest,
+ UseFirstUserAgentWildcardTest,
+ EmptyQueryStringTest,
+ DefaultEntryTest,
+ StringFormattingTest,
+ PasswordProtectedSiteTestCase,
+ NetworkTestCase)
+
+
+if __name__ == "__main__":
test_main()
diff --git a/lib-python/2.7/test/test_sax.py b/lib-python/2.7/test/test_sax.py
index a4228dc654..87a8658672 100644
--- a/lib-python/2.7/test/test_sax.py
+++ b/lib-python/2.7/test/test_sax.py
@@ -2,7 +2,8 @@
# $Id$
from xml.sax import make_parser, ContentHandler, \
- SAXException, SAXReaderNotAvailable, SAXParseException
+ SAXException, SAXReaderNotAvailable, SAXParseException, \
+ saxutils
try:
make_parser()
except SAXReaderNotAvailable:
@@ -173,6 +174,21 @@ class ParseTest(unittest.TestCase):
input.setEncoding('iso-8859-1')
self.check_parse(input)
+ def test_parse_close_source(self):
+ builtin_open = open
+ non_local = {'fileobj': None}
+
+ def mock_open(*args):
+ fileobj = builtin_open(*args)
+ non_local['fileobj'] = fileobj
+ return fileobj
+
+ with support.swap_attr(saxutils, 'open', mock_open):
+ make_xml_file(self.data, 'iso-8859-1', None)
+ with self.assertRaises(SAXException):
+ self.check_parse(TESTFN)
+ self.assertTrue(non_local['fileobj'].closed)
+
def check_parseString(self, s):
from xml.sax import parseString
result = StringIO()
diff --git a/lib-python/2.7/test/test_shutil.py b/lib-python/2.7/test/test_shutil.py
index 0869a9e553..ca89fe509b 100644
--- a/lib-python/2.7/test/test_shutil.py
+++ b/lib-python/2.7/test/test_shutil.py
@@ -35,6 +35,7 @@ except ImportError:
try:
import zipfile
+ import zlib
ZIP_SUPPORT = True
except ImportError:
ZIP_SUPPORT = find_executable('zip')
@@ -460,7 +461,6 @@ class TestShutil(unittest.TestCase):
self.assertEqual(tarball, base_name + '.tar')
self.assertTrue(os.path.isfile(tarball))
- @unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
def test_make_zipfile(self):
# creating something to zip
@@ -485,6 +485,7 @@ class TestShutil(unittest.TestCase):
['dist/', 'dist/file1', 'dist/file2',
'dist/sub/', 'dist/sub/file3', 'dist/sub2/',
'outer'])
+ support.unlink(res)
with support.change_cwd(work_dir):
base_name = os.path.abspath(rel_base_name)
@@ -498,7 +499,6 @@ class TestShutil(unittest.TestCase):
['dist/', 'dist/file1', 'dist/file2',
'dist/sub/', 'dist/sub/file3', 'dist/sub2/'])
- @unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
@unittest.skipUnless(find_executable('zip'),
'Need the zip command to run')
@@ -524,7 +524,6 @@ class TestShutil(unittest.TestCase):
names2 = zf.namelist()
self.assertEqual(sorted(names), sorted(names2))
- @unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
@unittest.skipUnless(find_executable('unzip'),
'Need the unzip command to run')
@@ -544,6 +543,8 @@ class TestShutil(unittest.TestCase):
subprocess.check_output(zip_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
details = exc.output
+ if 'unrecognized option: t' in details:
+ self.skipTest("unzip doesn't support -t")
msg = "{}\n\n**Unzip Output**\n{}"
self.fail(msg.format(exc, details))
diff --git a/lib-python/2.7/test/test_signal.py b/lib-python/2.7/test/test_signal.py
index 7483f64123..5f4f579a67 100644
--- a/lib-python/2.7/test/test_signal.py
+++ b/lib-python/2.7/test/test_signal.py
@@ -138,6 +138,8 @@ class InterProcessSignalTests(unittest.TestCase):
else:
self.fail("pause returned of its own accord, and the signal"
" didn't arrive after another second.")
+ finally:
+ signal.alarm(0)
# Issue 3864. Unknown if this affects earlier versions of freebsd also.
@unittest.skipIf(sys.platform=='freebsd6',
@@ -185,6 +187,9 @@ class InterProcessSignalTests(unittest.TestCase):
self.fail('Test deadlocked after %d seconds.' %
self.MAX_DURATION)
+ # read the exit status to not leak a zombie process
+ os.waitpid(child, 0)
+
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class BasicSignalTests(unittest.TestCase):
@@ -243,11 +248,15 @@ class WakeupSignalTests(unittest.TestCase):
import select
signal.alarm(1)
- before_time = time.time()
- # We attempt to get a signal during the sleep,
- # before select is called
- time.sleep(self.TIMEOUT_FULL)
- mid_time = time.time()
+ try:
+ before_time = time.time()
+ # We attempt to get a signal during the sleep,
+ # before select is called
+ time.sleep(self.TIMEOUT_FULL)
+ mid_time = time.time()
+ finally:
+ signal.alarm(0)
+
self.assertTrue(mid_time - before_time < self.TIMEOUT_HALF)
select.select([self.read], [], [], self.TIMEOUT_FULL)
after_time = time.time()
@@ -257,11 +266,15 @@ class WakeupSignalTests(unittest.TestCase):
import select
signal.alarm(1)
- before_time = time.time()
- # We attempt to get a signal during the select call
- self.assertRaises(select.error, select.select,
- [self.read], [], [], self.TIMEOUT_FULL)
- after_time = time.time()
+ try:
+ before_time = time.time()
+ # We attempt to get a signal during the select call
+ self.assertRaises(select.error, select.select,
+ [self.read], [], [], self.TIMEOUT_FULL)
+ after_time = time.time()
+ finally:
+ signal.alarm(0)
+
self.assertTrue(after_time - before_time < self.TIMEOUT_HALF)
def setUp(self):
@@ -490,6 +503,16 @@ class ItimerTest(unittest.TestCase):
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
+ def test_setitimer_tiny(self):
+ # bpo-30807: C setitimer() takes a microsecond-resolution interval.
+ # Check that float -> timeval conversion doesn't round
+ # the interval down to zero, which would disable the timer.
+ self.itimer = signal.ITIMER_REAL
+ signal.setitimer(self.itimer, 1e-6)
+ time.sleep(1)
+ self.assertEqual(self.hndl_called, True)
+
+
def test_main():
test_support.run_unittest(BasicSignalTests, InterProcessSignalTests,
WakeupFDTests, WakeupSignalTests,
diff --git a/lib-python/2.7/test/test_site.py b/lib-python/2.7/test/test_site.py
index de3f28bb0d..e5ddaccb47 100644
--- a/lib-python/2.7/test/test_site.py
+++ b/lib-python/2.7/test/test_site.py
@@ -7,7 +7,9 @@ executing have not been removed.
import unittest
from test.test_support import run_unittest, TESTFN, EnvironmentVarGuard
from test.test_support import captured_output
+from test import support
import __builtin__
+import errno
import os
import sys
import re
@@ -24,14 +26,30 @@ if "site" in sys.modules:
else:
raise unittest.SkipTest("importation of site.py suppressed")
-if site.ENABLE_USER_SITE and not os.path.isdir(site.USER_SITE):
- # need to add user site directory for tests
- try:
- os.makedirs(site.USER_SITE)
- site.addsitedir(site.USER_SITE)
- except OSError as exc:
- raise unittest.SkipTest('unable to create user site directory (%r): %s'
- % (site.USER_SITE, exc))
+
+OLD_SYS_PATH = None
+
+
+def setUpModule():
+ global OLD_SYS_PATH
+ OLD_SYS_PATH = sys.path[:]
+
+ if site.ENABLE_USER_SITE and not os.path.isdir(site.USER_SITE):
+ # need to add user site directory for tests
+ try:
+ os.makedirs(site.USER_SITE)
+ # modify sys.path: will be restored by tearDownModule()
+ site.addsitedir(site.USER_SITE)
+ except OSError as exc:
+ if exc.errno in (errno.EACCES, errno.EPERM):
+ raise unittest.SkipTest('unable to create user site directory (%r): %s'
+ % (site.USER_SITE, exc))
+ else:
+ raise
+
+
+def tearDownModule():
+ sys.path[:] = OLD_SYS_PATH
class HelperFunctionsTests(unittest.TestCase):
@@ -224,6 +242,7 @@ class HelperFunctionsTests(unittest.TestCase):
# the call sets USER_BASE *and* USER_SITE
self.assertEqual(site.USER_SITE, user_site)
self.assertTrue(user_site.startswith(site.USER_BASE), user_site)
+ self.assertEqual(site.USER_BASE, site.getuserbase())
def test_getsitepackages(self):
site.PREFIXES = ['xoxo']
@@ -252,6 +271,48 @@ class HelperFunctionsTests(unittest.TestCase):
wanted = os.path.join('xoxo', 'lib', 'site-packages')
self.assertEqual(dirs[1], wanted)
+ def test_no_home_directory(self):
+ # bpo-10496: getuserbase() and getusersitepackages() must not fail if
+ # the current user has no home directory (if expanduser() returns the
+ # path unchanged).
+ site.USER_SITE = None
+ site.USER_BASE = None
+ sysconfig._CONFIG_VARS = None
+
+ with EnvironmentVarGuard() as environ, \
+ support.swap_attr(os.path, 'expanduser', lambda path: path):
+
+ del environ['PYTHONUSERBASE']
+ del environ['APPDATA']
+
+ user_base = site.getuserbase()
+ self.assertTrue(user_base.startswith('~' + os.sep),
+ user_base)
+
+ user_site = site.getusersitepackages()
+ self.assertTrue(user_site.startswith(user_base), user_site)
+
+ def fake_isdir(path):
+ fake_isdir.arg = path
+ return False
+ fake_isdir.arg = None
+
+ def must_not_be_called(*args):
+ raise AssertionError
+
+ with support.swap_attr(os.path, 'isdir', fake_isdir), \
+ support.swap_attr(site, 'addsitedir', must_not_be_called), \
+ support.swap_attr(site, 'ENABLE_USER_SITE', True):
+
+ # addusersitepackages() must not add user_site to sys.path
+ # if it is not an existing directory
+ known_paths = set()
+ site.addusersitepackages(known_paths)
+
+ self.assertEqual(fake_isdir.arg, user_site)
+ self.assertFalse(known_paths)
+
+
class PthFile(object):
"""Helper class for handling testing of .pth files"""
diff --git a/lib-python/2.7/test/test_smtplib.py b/lib-python/2.7/test/test_smtplib.py
index 1bb6690188..703b631c17 100644
--- a/lib-python/2.7/test/test_smtplib.py
+++ b/lib-python/2.7/test/test_smtplib.py
@@ -306,12 +306,14 @@ class TooLongLineTests(unittest.TestCase):
self.sock.settimeout(15)
self.port = test_support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
- threading.Thread(target=server, args=servargs).start()
+ self.thread = threading.Thread(target=server, args=servargs)
+ self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
+ self.thread.join()
sys.stdout = self.old_stdout
def testLineTooLong(self):
diff --git a/lib-python/2.7/test/test_socket.py b/lib-python/2.7/test/test_socket.py
index d0a7d686ff..f62ed4dde9 100644
--- a/lib-python/2.7/test/test_socket.py
+++ b/lib-python/2.7/test/test_socket.py
@@ -21,6 +21,9 @@ except ImportError:
_socket = None
+MAIN_TIMEOUT = 60.0
+
+
def try_address(host, port=0, family=socket.AF_INET):
"""Try to bind a socket on the given host:port and return True
if that has been possible."""
@@ -35,7 +38,7 @@ def try_address(host, port=0, family=socket.AF_INET):
HOST = test_support.HOST
MSG = b'Michael Gilfix was here\n'
-SUPPORTS_IPV6 = socket.has_ipv6 and try_address('::1', family=socket.AF_INET6)
+SUPPORTS_IPV6 = test_support.IPV6_ENABLED
try:
import thread
@@ -736,6 +739,7 @@ class GeneralModuleTests(unittest.TestCase):
self.assertRaises(socket.timeout, c.sendall,
b"x" * test_support.SOCK_MAX_SIZE)
finally:
+ signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
@@ -949,6 +953,7 @@ class BasicSocketPairTest(SocketPairTest):
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
+ self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
@@ -984,21 +989,27 @@ class NonBlockingTCPTests(ThreadedTCPSocketTest):
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
- try:
- conn, addr = self.serv.accept()
- except socket.error:
- pass
- else:
- self.fail("Error trying to do non-blocking accept.")
- read, write, err = select.select([self.serv], [], [])
- if self.serv in read:
+
+ # connect() didn't start: non-blocking accept() fails
+ with self.assertRaises(socket.error):
conn, addr = self.serv.accept()
- conn.close()
- else:
+
+ self.event.set()
+
+ read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
+ if self.serv not in read:
self.fail("Error trying to do accept after select.")
+ # connect() completed: non-blocking accept() doesn't block
+ conn, addr = self.serv.accept()
+ self.addCleanup(conn.close)
+ self.assertIsNone(conn.gettimeout())
+
def _testAccept(self):
- time.sleep(0.1)
+ # don't connect before event is set to check
+ # that non-blocking accept() raises socket.error
+ self.event.wait()
+
self.cli.connect((HOST, self.port))
def testConnect(self):
@@ -1013,25 +1024,32 @@ class NonBlockingTCPTests(ThreadedTCPSocketTest):
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
+ self.addCleanup(conn.close)
conn.setblocking(0)
- try:
- msg = conn.recv(len(MSG))
- except socket.error:
- pass
- else:
- self.fail("Error trying to do non-blocking recv.")
- read, write, err = select.select([conn], [], [])
- if conn in read:
+
+ # the server didn't send data yet: non-blocking recv() fails
+ with self.assertRaises(socket.error):
msg = conn.recv(len(MSG))
- conn.close()
- self.assertEqual(msg, MSG)
- else:
+
+ self.event.set()
+
+ read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
+ if conn not in read:
self.fail("Error during select call to non-blocking socket.")
+ # the server sent data yet: non-blocking recv() doesn't block
+ msg = conn.recv(len(MSG))
+ self.assertEqual(msg, MSG)
+
def _testRecv(self):
self.cli.connect((HOST, self.port))
- time.sleep(0.1)
- self.cli.send(MSG)
+
+ # don't send anything before event is set to check
+ # that non-blocking recv() raises socket.error
+ self.event.wait()
+
+ # send data: recv() will no longer block
+ self.cli.sendall(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
@@ -1363,6 +1381,10 @@ class NetworkConnectionNoServer(unittest.TestCase):
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
+ if hasattr(errno, 'EADDRNOTAVAIL'):
+ # bpo-31910: socket.create_connection() fails randomly
+ # with EADDRNOTAVAIL on Travis CI
+ expected_errnos.append(errno.EADDRNOTAVAIL)
self.assertIn(cm.exception.errno, expected_errnos)
@@ -1537,8 +1559,8 @@ class TCPTimeoutTest(SocketTCPTest):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
- signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
+ signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
@@ -1715,9 +1737,17 @@ def isTipcAvailable():
"""
if not hasattr(socket, "AF_TIPC"):
return False
- if not os.path.isfile("/proc/modules"):
- return False
- with open("/proc/modules") as f:
+ try:
+ f = open("/proc/modules")
+ except IOError as e:
+ # It's ok if the file does not exist, is a directory or if we
+ # have not the permission to read it. In any other case it's a
+ # real error, so raise it again.
+ if e.errno in (errno.ENOENT, errno.EISDIR, errno.EACCES):
+ return False
+ else:
+ raise
+ with f:
for line in f:
if line.startswith("tipc "):
return True
diff --git a/lib-python/2.7/test/test_socketserver.py b/lib-python/2.7/test/test_socketserver.py
index d645d208dc..847859d06b 100644
--- a/lib-python/2.7/test/test_socketserver.py
+++ b/lib-python/2.7/test/test_socketserver.py
@@ -69,17 +69,32 @@ def simple_subprocess(testcase):
testcase.assertEqual(72 << 8, status)
+def close_server(server):
+ server.server_close()
+
+ if hasattr(server, 'active_children'):
+ # ForkingMixIn: Manually reap all child processes, since server_close()
+ # calls waitpid() in non-blocking mode using the WNOHANG flag.
+ for pid in server.active_children.copy():
+ try:
+ os.waitpid(pid, 0)
+ except ChildProcessError:
+ pass
+ server.active_children.clear()
+
+
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketServerTest(unittest.TestCase):
"""Test all socket servers."""
def setUp(self):
+ self.addCleanup(signal_alarm, 0)
signal_alarm(60) # Kill deadlocks after 60 seconds.
self.port_seed = 0
self.test_files = []
def tearDown(self):
- signal_alarm(0) # Didn't deadlock.
+ self.doCleanups()
reap_children()
for fn in self.test_files:
@@ -118,7 +133,7 @@ class SocketServerTest(unittest.TestCase):
class MyServer(svrcls):
def handle_error(self, request, client_address):
self.close_request(request)
- self.server_close()
+ close_server(self)
raise
class MyHandler(hdlrbase):
@@ -158,7 +173,7 @@ class SocketServerTest(unittest.TestCase):
if verbose: print "waiting for server"
server.shutdown()
t.join()
- server.server_close()
+ close_server(server)
self.assertRaises(socket.error, server.socket.fileno)
if verbose: print "done"
@@ -314,6 +329,7 @@ class SocketServerTest(unittest.TestCase):
s.shutdown()
for t, s in threads:
t.join()
+ close_server(s)
def test_tcpserver_bind_leak(self):
# Issue #22435: the server socket wouldn't be closed if bind()/listen()
@@ -347,7 +363,7 @@ class MiscTestCase(unittest.TestCase):
s.close()
server.handle_request()
self.assertEqual(server.shutdown_called, 1)
- server.server_close()
+ close_server(server)
def test_main():
diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py
index 37d0eda226..b388318ecd 100644
--- a/lib-python/2.7/test/test_ssl.py
+++ b/lib-python/2.7/test/test_ssl.py
@@ -14,11 +14,12 @@ import gc
import os
import errno
import pprint
-import tempfile
+import shutil
import urllib2
import traceback
import weakref
import platform
+import re
import functools
from contextlib import closing
@@ -72,10 +73,18 @@ NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
+TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
-DHFILE = data_file("dh1024.pem")
+DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = DHFILE.encode(sys.getfilesystemencoding())
+# Not defined in all versions of OpenSSL
+OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
+OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
+OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
+OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
+OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
+
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
@@ -151,6 +160,36 @@ def skip_if_broken_ubuntu_ssl(func):
else:
return func
+def skip_if_openssl_cnf_minprotocol_gt_tls1(func):
+ """Skip a test if the OpenSSL config MinProtocol is > TLSv1.
+ OS distros with an /etc/ssl/openssl.cnf and MinProtocol set often do so to
+ require TLSv1.2 or higher (Debian Buster). Some of our tests for older
+ protocol versions will fail under such a config.
+ Alternative workaround: Run this test in a process with
+ OPENSSL_CONF=/dev/null in the environment.
+ """
+ @functools.wraps(func)
+ def f(*args, **kwargs):
+ openssl_cnf = os.environ.get("OPENSSL_CONF", "/etc/ssl/openssl.cnf")
+ try:
+ with open(openssl_cnf, "r") as config:
+ for line in config:
+ match = re.match(r"MinProtocol\s*=\s*(TLSv\d+\S*)", line)
+ if match:
+ tls_ver = match.group(1)
+ if tls_ver > "TLSv1":
+ raise unittest.SkipTest(
+ "%s has MinProtocol = %s which is > TLSv1." %
+ (openssl_cnf, tls_ver))
+ except (EnvironmentError, UnicodeDecodeError) as err:
+ # no config file found, etc.
+ if support.verbose:
+ sys.stdout.write("\n Could not scan %s for MinProtocol: %s\n"
+ % (openssl_cnf, err))
+ return func(*args, **kwargs)
+ return f
+
+
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
@@ -168,6 +207,13 @@ class BasicSocketTests(unittest.TestCase):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
+ ssl.OP_NO_SSLv2
+ ssl.OP_NO_SSLv3
+ ssl.OP_NO_TLSv1
+ ssl.OP_NO_TLSv1_3
+ if ssl.OPENSSL_VERSION_INFO >= (1, 0, 1):
+ ssl.OP_NO_TLSv1_1
+ ssl.OP_NO_TLSv1_2
def test_random(self):
v = ssl.RAND_status()
@@ -194,9 +240,9 @@ class BasicSocketTests(unittest.TestCase):
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
- self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
- self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
- self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
+ self.assertEqual(p['notAfter'], asn1time('Aug 26 14:23:15 2028 GMT'))
+ self.assertEqual(p['notBefore'], asn1time('Aug 29 14:23:15 2018 GMT'))
+ self.assertEqual(p['serialNumber'], '98A7CF88C74A32ED')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
@@ -220,6 +266,27 @@ class BasicSocketTests(unittest.TestCase):
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
+ def test_parse_cert_CVE_2019_5010(self):
+ p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
+ if support.verbose:
+ sys.stdout.write("\n" + pprint.pformat(p) + "\n")
+ self.assertEqual(
+ p,
+ {
+ 'issuer': (
+ (('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
+ 'notAfter': 'Jun 14 18:00:58 2028 GMT',
+ 'notBefore': 'Jun 18 18:00:58 2018 GMT',
+ 'serialNumber': '02',
+ 'subject': ((('countryName', 'UK'),),
+ (('commonName',
+ 'codenomicon-vm-2.test.lal.cisco.com'),)),
+ 'subjectAltName': (
+ ('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
+ 'version': 3
+ }
+ )
+
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
@@ -334,6 +401,7 @@ class BasicSocketTests(unittest.TestCase):
self.assertRaises(socket.error, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(socket.error, ss.send, b'x')
self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0))
+ self.assertRaises(NotImplementedError, ss.dup)
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
@@ -986,6 +1054,13 @@ class ContextTests(unittest.TestCase):
def test_load_dh_params(self):
+ filename = u'dhpäräm.pem'
+ fs_encoding = sys.getfilesystemencoding()
+ try:
+ filename.encode(fs_encoding)
+ except UnicodeEncodeError:
+ self.skipTest("filename %r cannot be encoded to the filesystem encoding %r" % (filename, fs_encoding))
+
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
@@ -997,6 +1072,10 @@ class ContextTests(unittest.TestCase):
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
+ with support.temp_dir() as d:
+ fname = os.path.join(d, filename)
+ shutil.copy(DHFILE, fname)
+ ctx.load_dh_params(fname)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
@@ -1141,16 +1220,29 @@ class ContextTests(unittest.TestCase):
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
+ def _assert_context_options(self, ctx):
+ self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
+ if OP_NO_COMPRESSION != 0:
+ self.assertEqual(ctx.options & OP_NO_COMPRESSION,
+ OP_NO_COMPRESSION)
+ if OP_SINGLE_DH_USE != 0:
+ self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
+ OP_SINGLE_DH_USE)
+ if OP_SINGLE_ECDH_USE != 0:
+ self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
+ OP_SINGLE_ECDH_USE)
+ if OP_CIPHER_SERVER_PREFERENCE != 0:
+ self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
+ OP_CIPHER_SERVER_PREFERENCE)
+
def test_create_default_context(self):
ctx = ssl.create_default_context()
+
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
- self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
- self.assertEqual(
- ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
- getattr(ssl, "OP_NO_COMPRESSION", 0),
- )
+ self._assert_context_options(ctx)
+
with open(SIGNING_CA) as f:
cadata = f.read().decode("ascii")
@@ -1158,40 +1250,24 @@ class ContextTests(unittest.TestCase):
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
- self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
- self.assertEqual(
- ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
- getattr(ssl, "OP_NO_COMPRESSION", 0),
- )
+ self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
- self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
- self.assertEqual(
- ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
- getattr(ssl, "OP_NO_COMPRESSION", 0),
- )
- self.assertEqual(
- ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
- getattr(ssl, "OP_SINGLE_DH_USE", 0),
- )
- self.assertEqual(
- ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
- getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
- )
+ self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
- self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
+ self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
- self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
+ self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
@@ -1199,12 +1275,12 @@ class ContextTests(unittest.TestCase):
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
- self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
+ self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
- self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
+ self._assert_context_options(ctx)
def test__https_verify_certificates(self):
# Unit test to check the contect factory mapping
@@ -1592,34 +1668,6 @@ class NetworkedTests(unittest.TestCase):
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
- def test_algorithms(self):
- # Issue #8484: all algorithms should be available when verifying a
- # certificate.
- # SHA256 was added in OpenSSL 0.9.8
- if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
- self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
- # sha256.tbs-internet.com needs SNI to use the correct certificate
- if not ssl.HAS_SNI:
- self.skipTest("SNI needed for this test")
- # https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
- remote = ("sha256.tbs-internet.com", 443)
- sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
- with support.transient_internet("sha256.tbs-internet.com"):
- ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
- ctx.verify_mode = ssl.CERT_REQUIRED
- ctx.load_verify_locations(sha256_cert)
- s = ctx.wrap_socket(socket.socket(socket.AF_INET),
- server_hostname="sha256.tbs-internet.com")
- try:
- s.connect(remote)
- if support.verbose:
- sys.stdout.write("\nCipher with %r is %r\n" %
- (remote, s.cipher()))
- sys.stdout.write("Certificate is:\n%s\n" %
- pprint.pformat(s.getpeercert()))
- finally:
- s.close()
-
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet(REMOTE_HOST):
@@ -1684,23 +1732,43 @@ else:
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
- except socket.error as e:
- # We treat ConnectionResetError as though it were an
- # SSLError - OpenSSL on Ubuntu abruptly closes the
- # connection when asked to use an unsupported protocol.
- #
- # XXX Various errors can have happened here, for example
- # a mismatching protocol version, an invalid certificate,
- # or a low-level bug. This should be made more discriminating.
- if not isinstance(e, ssl.SSLError) and e.errno != errno.ECONNRESET:
- raise
- self.server.conn_errors.append(e)
- if self.server.chatty:
- handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
- self.running = False
- self.server.stop()
- self.close()
- return False
+ except (ssl.SSLError, socket.error, OSError) as e:
+ if e.errno in (errno.ECONNRESET, errno.EPIPE, errno.ESHUTDOWN):
+ # Mimick Python 3:
+ #
+ # except (ConnectionResetError, BrokenPipeError):
+ #
+ # We treat ConnectionResetError as though it were an
+ # SSLError - OpenSSL on Ubuntu abruptly closes the
+ # connection when asked to use an unsupported protocol.
+ #
+ # BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
+ # tries to send session tickets after handshake.
+ # https://github.com/openssl/openssl/issues/6342
+ self.server.conn_errors.append(str(e))
+ if self.server.chatty:
+ handle_error(
+ "\n server: bad connection attempt from "
+ + repr(self.addr) + ":\n")
+ self.running = False
+ self.close()
+ return False
+ else:
+ # OSError may occur with wrong protocols, e.g. both
+ # sides use PROTOCOL_TLS_SERVER.
+ #
+ # XXX Various errors can have happened here, for example
+ # a mismatching protocol version, an invalid certificate,
+ # or a low-level bug. This should be made more discriminating.
+ if not isinstance(e, ssl.SSLError) and e.errno != errno.ECONNRESET:
+ raise
+ self.server.conn_errors.append(e)
+ if self.server.chatty:
+ handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
+ self.running = False
+ self.server.stop()
+ self.close()
+ return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
@@ -1799,7 +1867,7 @@ else:
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
- else ssl.PROTOCOL_TLSv1)
+ else ssl.PROTOCOL_TLS)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
@@ -1959,6 +2027,8 @@ else:
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
+ # make sure that ConnectionHandler is removed from socket_map
+ asyncore.close_all(ignore_all=True)
def start(self, flag=None):
self.flag = flag
@@ -2223,10 +2293,10 @@ else:
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
- "wrongcert.pem")
- server = ThreadedEchoServer(CERTFILE,
+ "keycert.pem")
+ server = ThreadedEchoServer(SIGNED_CERTFILE,
certreqs=ssl.CERT_REQUIRED,
- cacerts=CERTFILE, chatty=False,
+ cacerts=SIGNING_CA, chatty=False,
connectionchatty=False)
with server, \
closing(socket.socket()) as sock, \
@@ -2314,6 +2384,7 @@ else:
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
+ @skip_if_openssl_cnf_minprotocol_gt_tls1
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
@@ -2391,6 +2462,7 @@ else:
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
+ @skip_if_openssl_cnf_minprotocol_gt_tls1
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
@@ -2657,6 +2729,7 @@ else:
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
+ self.assertRaises(NotImplementedError, s.dup)
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
@@ -2753,7 +2826,7 @@ else:
# Block on the accept and wait on the connection to close.
evt.set()
remote[0], peer[0] = server.accept()
- remote[0].recv(1)
+ remote[0].send(remote[0].recv(4))
t = threading.Thread(target=serve)
t.start()
@@ -2761,6 +2834,8 @@ else:
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
+ client.send(b'data')
+ client.recv()
client_addr = client.getsockname()
client.close()
t.join()
@@ -2784,19 +2859,24 @@ else:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
- def test_default_ciphers(self):
- context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
- try:
- # Force a set of weak ciphers on our client context
- context.set_ciphers("DES")
- except ssl.SSLError:
- self.skipTest("no DES cipher available")
- with ThreadedEchoServer(CERTFILE,
- ssl_version=ssl.PROTOCOL_SSLv23,
- chatty=False) as server:
- with closing(context.wrap_socket(socket.socket())) as s:
- with self.assertRaises(ssl.SSLError):
- s.connect((HOST, server.port))
+ def test_no_shared_ciphers(self):
+ server_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ server_context.load_cert_chain(SIGNED_CERTFILE)
+ client_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ client_context.verify_mode = ssl.CERT_REQUIRED
+ client_context.check_hostname = True
+
+ # OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
+ client_context.options |= ssl.OP_NO_TLSv1_3
+ # Force different suites on client and master
+ client_context.set_ciphers("AES128")
+ server_context.set_ciphers("AES256")
+ with ThreadedEchoServer(context=server_context) as server:
+ s = client_context.wrap_socket(
+ socket.socket(),
+ server_hostname="localhost")
+ with self.assertRaises(ssl.SSLError):
+ s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
@@ -2814,6 +2894,25 @@ else:
self.assertEqual(s.version(), 'TLSv1')
self.assertIs(s.version(), None)
+ @unittest.skipUnless(ssl.HAS_TLSv1_3,
+ "test requires TLSv1.3 enabled OpenSSL")
+ def test_tls1_3(self):
+ context = ssl.SSLContext(ssl.PROTOCOL_TLS)
+ context.load_cert_chain(CERTFILE)
+ # disable all but TLS 1.3
+ context.options |= (
+ ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2
+ )
+ with ThreadedEchoServer(context=context) as server:
+ s = context.wrap_socket(socket.socket())
+ with closing(s):
+ s.connect((HOST, server.port))
+ self.assertIn(s.cipher()[0], [
+ 'TLS_AES_256_GCM_SHA384',
+ 'TLS_CHACHA20_POLY1305_SHA256',
+ 'TLS_AES_128_GCM_SHA256',
+ ])
+
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
diff --git a/lib-python/2.7/test/test_startfile.py b/lib-python/2.7/test/test_startfile.py
index 3c22f3f2a3..61212ad42f 100644
--- a/lib-python/2.7/test/test_startfile.py
+++ b/lib-python/2.7/test/test_startfile.py
@@ -10,8 +10,8 @@
import unittest
from test import test_support
import os
+import sys
from os import path
-from time import sleep
startfile = test_support.get_attribute(os, 'startfile')
@@ -23,20 +23,23 @@ class TestCase(unittest.TestCase):
def test_nonexisting_u(self):
self.assertRaises(OSError, startfile, u"nonexisting.vbs")
+ def check_empty(self, empty):
+ # We need to make sure the child process starts in a directory
+ # we're not about to delete. If we're running under -j, that
+ # means the test harness provided directory isn't a safe option.
+ # See http://bugs.python.org/issue15526 for more details
+ with test_support.change_cwd(path.dirname(sys.executable)):
+ startfile(empty)
+ startfile(empty, "open")
+
def test_empty(self):
empty = path.join(path.dirname(__file__), "empty.vbs")
- startfile(empty)
- startfile(empty, "open")
- # Give the child process some time to exit before we finish.
- # Otherwise the cleanup code will not be able to delete the cwd,
- # because it is still in use.
- sleep(0.1)
-
- def test_empty_u(self):
+ self.check_empty(empty)
+
+ def test_empty_unicode(self):
empty = path.join(path.dirname(__file__), "empty.vbs")
- startfile(unicode(empty, "mbcs"))
- startfile(unicode(empty, "mbcs"), "open")
- sleep(0.1)
+ empty = unicode(empty, "mbcs")
+ self.check_empty(empty)
def test_main():
test_support.run_unittest(TestCase)
diff --git a/lib-python/2.7/test/test_str.py b/lib-python/2.7/test/test_str.py
index c85825315f..2c53bc07d1 100644
--- a/lib-python/2.7/test/test_str.py
+++ b/lib-python/2.7/test/test_str.py
@@ -466,8 +466,116 @@ class StrTest(
self.assertIn('str', exc)
self.assertIn('tuple', exc)
+ def test_issue28598_strsubclass_rhs(self):
+ # A subclass of str with an __rmod__ method should be able to hook
+ # into the % operator
+ class SubclassedStr(str):
+ def __rmod__(self, other):
+ return 'Success, self.__rmod__({!r}) was called'.format(other)
+ self.assertEqual('lhs %% %r' % SubclassedStr('rhs'),
+ "Success, self.__rmod__('lhs %% %r') was called")
+
+
+class CAPITest(unittest.TestCase):
+
+ # Test PyString_FromFormat()
+ @test_support.cpython_only
+ def test_from_format(self):
+ ctypes = test_support.import_module('ctypes')
+ _testcapi = test_support.import_module('_testcapi')
+ from ctypes import pythonapi, py_object
+ from ctypes import (
+ c_int, c_uint,
+ c_long, c_ulong,
+ c_size_t, c_ssize_t,
+ c_char_p)
+
+ PyString_FromFormat = pythonapi.PyString_FromFormat
+ PyString_FromFormat.restype = py_object
+
+ # basic tests
+ self.assertEqual(PyString_FromFormat(b'format'),
+ b'format')
+ self.assertEqual(PyString_FromFormat(b'Hello %s !', b'world'),
+ b'Hello world !')
+
+ # test formatters
+ self.assertEqual(PyString_FromFormat(b'c=%c', c_int(0)),
+ b'c=\0')
+ self.assertEqual(PyString_FromFormat(b'c=%c', c_int(ord('@'))),
+ b'c=@')
+ self.assertEqual(PyString_FromFormat(b'c=%c', c_int(255)),
+ b'c=\xff')
+ self.assertEqual(PyString_FromFormat(b'd=%d ld=%ld zd=%zd',
+ c_int(1), c_long(2),
+ c_size_t(3)),
+ b'd=1 ld=2 zd=3')
+ self.assertEqual(PyString_FromFormat(b'd=%d ld=%ld zd=%zd',
+ c_int(-1), c_long(-2),
+ c_size_t(-3)),
+ b'd=-1 ld=-2 zd=-3')
+ self.assertEqual(PyString_FromFormat(b'u=%u lu=%lu zu=%zu',
+ c_uint(123), c_ulong(456),
+ c_size_t(789)),
+ b'u=123 lu=456 zu=789')
+ self.assertEqual(PyString_FromFormat(b'i=%i', c_int(123)),
+ b'i=123')
+ self.assertEqual(PyString_FromFormat(b'i=%i', c_int(-123)),
+ b'i=-123')
+ self.assertEqual(PyString_FromFormat(b'x=%x', c_int(0xabc)),
+ b'x=abc')
+
+ self.assertEqual(PyString_FromFormat(b's=%s', c_char_p(b'cstr')),
+ b's=cstr')
+
+ # test minimum and maximum integer values
+ size_max = c_size_t(-1).value
+ for formatstr, ctypes_type, value, py_formatter in (
+ (b'%d', c_int, _testcapi.INT_MIN, str),
+ (b'%d', c_int, _testcapi.INT_MAX, str),
+ (b'%ld', c_long, _testcapi.LONG_MIN, str),
+ (b'%ld', c_long, _testcapi.LONG_MAX, str),
+ (b'%lu', c_ulong, _testcapi.ULONG_MAX, str),
+ (b'%zd', c_ssize_t, _testcapi.PY_SSIZE_T_MIN, str),
+ (b'%zd', c_ssize_t, _testcapi.PY_SSIZE_T_MAX, str),
+ (b'%zu', c_size_t, size_max, str),
+ ):
+ self.assertEqual(PyString_FromFormat(formatstr, ctypes_type(value)),
+ py_formatter(value).encode('ascii')),
+
+ # width and precision (width is currently ignored)
+ self.assertEqual(PyString_FromFormat(b'%5s', b'a'),
+ b'a')
+ self.assertEqual(PyString_FromFormat(b'%.3s', b'abcdef'),
+ b'abc')
+
+ # '%%' formatter
+ self.assertEqual(PyString_FromFormat(b'%%'),
+ b'%')
+ self.assertEqual(PyString_FromFormat(b'[%%]'),
+ b'[%]')
+ self.assertEqual(PyString_FromFormat(b'%%%c', c_int(ord('_'))),
+ b'%_')
+ self.assertEqual(PyString_FromFormat(b'%%s'),
+ b'%s')
+
+ # Invalid formats and partial formatting
+ self.assertEqual(PyString_FromFormat(b'%'), b'%')
+ self.assertEqual(PyString_FromFormat(b'x=%i y=%', c_int(2), c_int(3)),
+ b'x=2 y=%')
+
+ self.assertEqual(PyString_FromFormat(b'%c', c_int(-1)), b'\xff')
+ self.assertEqual(PyString_FromFormat(b'%c', c_int(256)), b'\0')
+
+ # Issue #33817: empty strings
+ self.assertEqual(PyString_FromFormat(b''),
+ b'')
+ self.assertEqual(PyString_FromFormat(b'%s', b''),
+ b'')
+
+
def test_main():
- test_support.run_unittest(StrTest)
+ test_support.run_unittest(StrTest, CAPITest)
if __name__ == "__main__":
test_main()
diff --git a/lib-python/2.7/test/test_strftime.py b/lib-python/2.7/test/test_strftime.py
index b26ebec93d..325024ceae 100644
--- a/lib-python/2.7/test/test_strftime.py
+++ b/lib-python/2.7/test/test_strftime.py
@@ -60,8 +60,10 @@ class StrftimeTest(unittest.TestCase):
import java
java.util.Locale.setDefault(java.util.Locale.US)
except ImportError:
- import locale
- locale.setlocale(locale.LC_TIME, 'C')
+ from locale import setlocale, LC_TIME
+ saved_locale = setlocale(LC_TIME)
+ setlocale(LC_TIME, 'C')
+ self.addCleanup(setlocale, LC_TIME, saved_locale)
def test_strftime(self):
now = time.time()
diff --git a/lib-python/2.7/test/test_strop.py b/lib-python/2.7/test/test_strop.py
index 81d078ed8d..50b8f6ebc3 100644
--- a/lib-python/2.7/test/test_strop.py
+++ b/lib-python/2.7/test/test_strop.py
@@ -2,11 +2,12 @@ import warnings
warnings.filterwarnings("ignore", "strop functions are obsolete;",
DeprecationWarning,
r'test.test_strop|unittest')
-import strop
import unittest
import sys
from test import test_support
+strop = test_support.import_module("strop")
+
class StropFunctionTestCase(unittest.TestCase):
diff --git a/lib-python/2.7/test/test_strptime.py b/lib-python/2.7/test/test_strptime.py
index 3d24941951..a7af85a6d5 100644
--- a/lib-python/2.7/test/test_strptime.py
+++ b/lib-python/2.7/test/test_strptime.py
@@ -428,7 +428,7 @@ class CalculationTests(unittest.TestCase):
self.assertTrue(result.tm_year == self.time_tuple.tm_year and
result.tm_mon == self.time_tuple.tm_mon and
result.tm_mday == self.time_tuple.tm_mday,
- "Calculation of Gregorian date failed;"
+ "Calculation of Gregorian date failed; "
"%s-%s-%s != %s-%s-%s" %
(result.tm_year, result.tm_mon, result.tm_mday,
self.time_tuple.tm_year, self.time_tuple.tm_mon,
@@ -440,7 +440,7 @@ class CalculationTests(unittest.TestCase):
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_wday == self.time_tuple.tm_wday,
- "Calculation of day of the week failed;"
+ "Calculation of day of the week failed; "
"%s != %s" % (result.tm_wday, self.time_tuple.tm_wday))
def test_week_of_year_and_day_of_week_calculation(self):
@@ -580,7 +580,7 @@ class CacheTests(unittest.TestCase):
finally:
locale.setlocale(locale.LC_TIME, locale_info)
- @support.run_with_tz('STD-1DST')
+ @support.run_with_tz('STD-1DST,M4.1.0,M10.1.0')
def test_TimeRE_recreation_timezone(self):
# The TimeRE instance should be recreated upon changing the timezone.
oldtzname = time.tzname
diff --git a/lib-python/2.7/test/test_subprocess.py b/lib-python/2.7/test/test_subprocess.py
index 65212df1bf..5789c89228 100644
--- a/lib-python/2.7/test/test_subprocess.py
+++ b/lib-python/2.7/test/test_subprocess.py
@@ -2,6 +2,7 @@ import unittest
from test import test_support
import subprocess
import sys
+import platform
import signal
import os
import errno
@@ -9,6 +10,14 @@ import tempfile
import time
import re
import sysconfig
+import textwrap
+
+try:
+ import ctypes
+except ImportError:
+ ctypes = None
+else:
+ import ctypes.util
try:
import resource
@@ -19,6 +28,11 @@ try:
except ImportError:
threading = None
+try:
+ import _testcapi
+except ImportError:
+ _testcapi = None
+
mswindows = (sys.platform == "win32")
#
@@ -43,6 +57,8 @@ class BaseTestCase(unittest.TestCase):
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
+ self.doCleanups()
+ test_support.reap_children()
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
@@ -379,6 +395,46 @@ class ProcessTestCase(BaseTestCase):
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), "orange")
+ def test_invalid_cmd(self):
+ # null character in the command name
+ cmd = sys.executable + '\0'
+ with self.assertRaises(TypeError):
+ subprocess.Popen([cmd, "-c", "pass"])
+
+ # null character in the command argument
+ with self.assertRaises(TypeError):
+ subprocess.Popen([sys.executable, "-c", "pass#\0"])
+
+ def test_invalid_env(self):
+ # null character in the enviroment variable name
+ newenv = os.environ.copy()
+ newenv["FRUIT\0VEGETABLE"] = "cabbage"
+ with self.assertRaises(TypeError):
+ subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
+
+ # null character in the enviroment variable value
+ newenv = os.environ.copy()
+ newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
+ with self.assertRaises(TypeError):
+ subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
+
+ # equal character in the enviroment variable name
+ newenv = os.environ.copy()
+ newenv["FRUIT=ORANGE"] = "lemon"
+ with self.assertRaises(ValueError):
+ subprocess.Popen([sys.executable, "-c", "pass"], env=newenv)
+
+ # equal character in the enviroment variable value
+ newenv = os.environ.copy()
+ newenv["FRUIT"] = "orange=lemon"
+ p = subprocess.Popen([sys.executable, "-c",
+ 'import sys, os;'
+ 'sys.stdout.write(os.getenv("FRUIT"))'],
+ stdout=subprocess.PIPE,
+ env=newenv)
+ stdout, stderr = p.communicate()
+ self.assertEqual(stdout, "orange=lemon")
+
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
@@ -643,7 +699,7 @@ class ProcessTestCase(BaseTestCase):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
- if c.exception.errno not in (errno.ENOENT, errno.EACCES):
+ if c.exception.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EACCES):
raise c.exception
@unittest.skipIf(threading is None, "threading required")
@@ -789,8 +845,11 @@ class _SuppressCoreFiles(object):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
signal.alarm(1)
- # communicate() will be interrupted by SIGALRM
- process.communicate()
+ try:
+ # communicate() will be interrupted by SIGALRM
+ process.communicate()
+ finally:
+ signal.alarm(0)
@unittest.skipIf(mswindows, "POSIX specific tests")
@@ -1220,6 +1279,29 @@ class POSIXProcessTestCase(BaseTestCase):
self.assertEqual(p2.returncode, 0, "Unexpected error: " + repr(stderr))
+ @unittest.skipUnless(_testcapi is not None
+ and hasattr(_testcapi, 'W_STOPCODE'),
+ 'need _testcapi.W_STOPCODE')
+ def test_stopped(self):
+ """Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
+ args = [sys.executable, '-c', 'pass']
+ proc = subprocess.Popen(args)
+
+ # Wait until the real process completes to avoid zombie process
+ pid = proc.pid
+ pid, status = os.waitpid(pid, 0)
+ self.assertEqual(status, 0)
+
+ status = _testcapi.W_STOPCODE(3)
+
+ def mock_waitpid(pid, flags):
+ return (pid, status)
+
+ with test_support.swap_attr(os, 'waitpid', mock_waitpid):
+ returncode = proc.wait()
+
+ self.assertEqual(returncode, -3)
+
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
diff --git a/lib-python/2.7/test/test_sundry.py b/lib-python/2.7/test/test_sundry.py
index 8fe89953b7..d4a97c82d9 100644
--- a/lib-python/2.7/test/test_sundry.py
+++ b/lib-python/2.7/test/test_sundry.py
@@ -10,7 +10,6 @@ class TestUntestedModules(unittest.TestCase):
with test_support.check_warnings(quiet=True):
import CGIHTTPServer
import audiodev
- import bdb
import cgitb
import code
import compileall
diff --git a/lib-python/2.7/test/test_support.py b/lib-python/2.7/test/test_support.py
index 258b3390d1..3c894afcef 100644
--- a/lib-python/2.7/test/test_support.py
+++ b/lib-python/2.7/test/test_support.py
@@ -1,1762 +1,3 @@
-"""Supporting definitions for the Python regression tests."""
-
-if __name__ != 'test.test_support':
- raise ImportError('test_support must be imported from the test package')
-
-import contextlib
-import errno
-import functools
-import gc
-import socket
-import stat
import sys
-import os
-import platform
-import shutil
-import warnings
-import unittest
-import importlib
-import UserDict
-import re
-import time
-import struct
-import sysconfig
-try:
- import thread
-except ImportError:
- thread = None
-
-__all__ = ["Error", "TestFailed", "ResourceDenied", "import_module",
- "verbose", "use_resources", "max_memuse", "record_original_stdout",
- "get_original_stdout", "unload", "unlink", "rmtree", "forget",
- "is_resource_enabled", "requires", "requires_mac_ver",
- "find_unused_port", "bind_port",
- "fcmp", "have_unicode", "is_jython", "TESTFN", "HOST", "FUZZ",
- "SAVEDCWD", "temp_cwd", "findfile", "sortdict", "check_syntax_error",
- "open_urlresource", "check_warnings", "check_py3k_warnings",
- "CleanImport", "EnvironmentVarGuard", "captured_output",
- "captured_stdout", "TransientResource", "transient_internet",
- "run_with_locale", "set_memlimit", "bigmemtest", "bigaddrspacetest",
- "BasicTestRunner", "run_unittest", "run_doctest", "threading_setup",
- "threading_cleanup", "reap_threads", "start_threads", "cpython_only",
- "check_impl_detail", "get_attribute", "py3k_bytes",
- "import_fresh_module", "threading_cleanup", "reap_children",
- "strip_python_stderr", "IPV6_ENABLED", "run_with_tz"]
-
-class Error(Exception):
- """Base class for regression test exceptions."""
-
-class TestFailed(Error):
- """Test failed."""
-
-class ResourceDenied(unittest.SkipTest):
- """Test skipped because it requested a disallowed resource.
-
- This is raised when a test calls requires() for a resource that
- has not been enabled. It is used to distinguish between expected
- and unexpected skips.
- """
-
-@contextlib.contextmanager
-def _ignore_deprecated_imports(ignore=True):
- """Context manager to suppress package and module deprecation
- warnings when importing them.
-
- If ignore is False, this context manager has no effect."""
- if ignore:
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", ".+ (module|package)",
- DeprecationWarning)
- yield
- else:
- yield
-
-
-def import_module(name, deprecated=False):
- """Import and return the module to be tested, raising SkipTest if
- it is not available.
-
- If deprecated is True, any module or package deprecation messages
- will be suppressed."""
- with _ignore_deprecated_imports(deprecated):
- try:
- return importlib.import_module(name)
- except ImportError, msg:
- raise unittest.SkipTest(str(msg))
-
-
-def _save_and_remove_module(name, orig_modules):
- """Helper function to save and remove a module from sys.modules
-
- Raise ImportError if the module can't be imported."""
- # try to import the module and raise an error if it can't be imported
- if name not in sys.modules:
- __import__(name)
- del sys.modules[name]
- for modname in list(sys.modules):
- if modname == name or modname.startswith(name + '.'):
- orig_modules[modname] = sys.modules[modname]
- del sys.modules[modname]
-
-def _save_and_block_module(name, orig_modules):
- """Helper function to save and block a module in sys.modules
-
- Return True if the module was in sys.modules, False otherwise."""
- saved = True
- try:
- orig_modules[name] = sys.modules[name]
- except KeyError:
- saved = False
- sys.modules[name] = None
- return saved
-
-
-def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
- """Imports and returns a module, deliberately bypassing the sys.modules cache
- and importing a fresh copy of the module. Once the import is complete,
- the sys.modules cache is restored to its original state.
-
- Modules named in fresh are also imported anew if needed by the import.
- If one of these modules can't be imported, None is returned.
-
- Importing of modules named in blocked is prevented while the fresh import
- takes place.
-
- If deprecated is True, any module or package deprecation messages
- will be suppressed."""
- # NOTE: test_heapq, test_json, and test_warnings include extra sanity
- # checks to make sure that this utility function is working as expected
- with _ignore_deprecated_imports(deprecated):
- # Keep track of modules saved for later restoration as well
- # as those which just need a blocking entry removed
- orig_modules = {}
- names_to_remove = []
- _save_and_remove_module(name, orig_modules)
- try:
- for fresh_name in fresh:
- _save_and_remove_module(fresh_name, orig_modules)
- for blocked_name in blocked:
- if not _save_and_block_module(blocked_name, orig_modules):
- names_to_remove.append(blocked_name)
- fresh_module = importlib.import_module(name)
- except ImportError:
- fresh_module = None
- finally:
- for orig_name, module in orig_modules.items():
- sys.modules[orig_name] = module
- for name_to_remove in names_to_remove:
- del sys.modules[name_to_remove]
- return fresh_module
-
-
-def get_attribute(obj, name):
- """Get an attribute, raising SkipTest if AttributeError is raised."""
- try:
- attribute = getattr(obj, name)
- except AttributeError:
- raise unittest.SkipTest("module %s has no attribute %s" % (
- obj.__name__, name))
- else:
- return attribute
-
-
-verbose = 1 # Flag set to 0 by regrtest.py
-use_resources = None # Flag set to [] by regrtest.py
-max_memuse = 0 # Disable bigmem tests (they will still be run with
- # small sizes, to make sure they work.)
-real_max_memuse = 0
-
-# _original_stdout is meant to hold stdout at the time regrtest began.
-# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
-# The point is to have some flavor of stdout the user can actually see.
-_original_stdout = None
-def record_original_stdout(stdout):
- global _original_stdout
- _original_stdout = stdout
-
-def get_original_stdout():
- return _original_stdout or sys.stdout
-
-def unload(name):
- try:
- del sys.modules[name]
- except KeyError:
- pass
-
-def _force_run(path, func, *args):
- try:
- return func(*args)
- except EnvironmentError as err:
- if verbose >= 2:
- print('%s: %s' % (err.__class__.__name__, err))
- print('re-run %s%r' % (func.__name__, args))
- os.chmod(path, stat.S_IRWXU)
- return func(*args)
-
-if sys.platform.startswith("win"):
- def _waitfor(func, pathname, waitall=False):
- # Perform the operation
- func(pathname)
- # Now setup the wait loop
- if waitall:
- dirname = pathname
- else:
- dirname, name = os.path.split(pathname)
- dirname = dirname or '.'
- # Check for `pathname` to be removed from the filesystem.
- # The exponential backoff of the timeout amounts to a total
- # of ~1 second after which the deletion is probably an error
- # anyway.
- # Testing on an i7@4.3GHz shows that usually only 1 iteration is
- # required when contention occurs.
- timeout = 0.001
- while timeout < 1.0:
- # Note we are only testing for the existence of the file(s) in
- # the contents of the directory regardless of any security or
- # access rights. If we have made it this far, we have sufficient
- # permissions to do that much using Python's equivalent of the
- # Windows API FindFirstFile.
- # Other Windows APIs can fail or give incorrect results when
- # dealing with files that are pending deletion.
- L = os.listdir(dirname)
- if not (L if waitall else name in L):
- return
- # Increase the timeout and try again
- time.sleep(timeout)
- timeout *= 2
- warnings.warn('tests may fail, delete still pending for ' + pathname,
- RuntimeWarning, stacklevel=4)
-
- def _unlink(filename):
- _waitfor(os.unlink, filename)
-
- def _rmdir(dirname):
- _waitfor(os.rmdir, dirname)
-
- def _rmtree(path):
- def _rmtree_inner(path):
- for name in _force_run(path, os.listdir, path):
- fullname = os.path.join(path, name)
- if os.path.isdir(fullname):
- _waitfor(_rmtree_inner, fullname, waitall=True)
- _force_run(fullname, os.rmdir, fullname)
- else:
- _force_run(fullname, os.unlink, fullname)
- _waitfor(_rmtree_inner, path, waitall=True)
- _waitfor(lambda p: _force_run(p, os.rmdir, p), path)
-else:
- _unlink = os.unlink
- _rmdir = os.rmdir
-
- def _rmtree(path):
- try:
- shutil.rmtree(path)
- return
- except EnvironmentError:
- pass
-
- def _rmtree_inner(path):
- for name in _force_run(path, os.listdir, path):
- fullname = os.path.join(path, name)
- try:
- mode = os.lstat(fullname).st_mode
- except EnvironmentError:
- mode = 0
- if stat.S_ISDIR(mode):
- _rmtree_inner(fullname)
- _force_run(path, os.rmdir, fullname)
- else:
- _force_run(path, os.unlink, fullname)
- _rmtree_inner(path)
- os.rmdir(path)
-
-def unlink(filename):
- try:
- _unlink(filename)
- except OSError:
- pass
-
-def rmdir(dirname):
- try:
- _rmdir(dirname)
- except OSError as error:
- # The directory need not exist.
- if error.errno != errno.ENOENT:
- raise
-
-def rmtree(path):
- try:
- _rmtree(path)
- except OSError, e:
- # Unix returns ENOENT, Windows returns ESRCH.
- if e.errno not in (errno.ENOENT, errno.ESRCH):
- raise
-
-def forget(modname):
- '''"Forget" a module was ever imported by removing it from sys.modules and
- deleting any .pyc and .pyo files.'''
- unload(modname)
- for dirname in sys.path:
- unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
- # Deleting the .pyo file cannot be within the 'try' for the .pyc since
- # the chance exists that there is no .pyc (and thus the 'try' statement
- # is exited) but there is a .pyo file.
- unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
-
-# Check whether a gui is actually available
-def _is_gui_available():
- if hasattr(_is_gui_available, 'result'):
- return _is_gui_available.result
- reason = None
- if sys.platform.startswith('win'):
- # if Python is running as a service (such as the buildbot service),
- # gui interaction may be disallowed
- import ctypes
- import ctypes.wintypes
- UOI_FLAGS = 1
- WSF_VISIBLE = 0x0001
- class USEROBJECTFLAGS(ctypes.Structure):
- _fields_ = [("fInherit", ctypes.wintypes.BOOL),
- ("fReserved", ctypes.wintypes.BOOL),
- ("dwFlags", ctypes.wintypes.DWORD)]
- dll = ctypes.windll.user32
- h = dll.GetProcessWindowStation()
- if not h:
- raise ctypes.WinError()
- uof = USEROBJECTFLAGS()
- needed = ctypes.wintypes.DWORD()
- res = dll.GetUserObjectInformationW(h,
- UOI_FLAGS,
- ctypes.byref(uof),
- ctypes.sizeof(uof),
- ctypes.byref(needed))
- if not res:
- raise ctypes.WinError()
- if not bool(uof.dwFlags & WSF_VISIBLE):
- reason = "gui not available (WSF_VISIBLE flag not set)"
- elif sys.platform == 'darwin':
- # The Aqua Tk implementations on OS X can abort the process if
- # being called in an environment where a window server connection
- # cannot be made, for instance when invoked by a buildbot or ssh
- # process not running under the same user id as the current console
- # user. To avoid that, raise an exception if the window manager
- # connection is not available.
- from ctypes import cdll, c_int, pointer, Structure
- from ctypes.util import find_library
-
- app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
-
- if app_services.CGMainDisplayID() == 0:
- reason = "gui tests cannot run without OS X window manager"
- else:
- class ProcessSerialNumber(Structure):
- _fields_ = [("highLongOfPSN", c_int),
- ("lowLongOfPSN", c_int)]
- psn = ProcessSerialNumber()
- psn_p = pointer(psn)
- if ( (app_services.GetCurrentProcess(psn_p) < 0) or
- (app_services.SetFrontProcess(psn_p) < 0) ):
- reason = "cannot run without OS X gui process"
-
- # check on every platform whether tkinter can actually do anything
- if not reason:
- try:
- from Tkinter import Tk
- root = Tk()
- root.withdraw()
- root.update()
- root.destroy()
- except Exception as e:
- err_string = str(e)
- if len(err_string) > 50:
- err_string = err_string[:50] + ' [...]'
- reason = 'Tk unavailable due to {}: {}'.format(type(e).__name__,
- err_string)
-
- _is_gui_available.reason = reason
- _is_gui_available.result = not reason
-
- return _is_gui_available.result
-
-def is_resource_enabled(resource):
- """Test whether a resource is enabled.
-
- Known resources are set by regrtest.py. If not running under regrtest.py,
- all resources are assumed enabled unless use_resources has been set.
- """
- return use_resources is None or resource in use_resources
-
-def requires(resource, msg=None):
- """Raise ResourceDenied if the specified resource is not available."""
- if not is_resource_enabled(resource):
- if msg is None:
- msg = "Use of the `%s' resource not enabled" % resource
- raise ResourceDenied(msg)
- if resource == 'gui' and not _is_gui_available():
- raise ResourceDenied(_is_gui_available.reason)
-
-def requires_mac_ver(*min_version):
- """Decorator raising SkipTest if the OS is Mac OS X and the OS X
- version if less than min_version.
-
- For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
- is lesser than 10.5.
- """
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kw):
- if sys.platform == 'darwin':
- version_txt = platform.mac_ver()[0]
- try:
- version = tuple(map(int, version_txt.split('.')))
- except ValueError:
- pass
- else:
- if version < min_version:
- min_version_txt = '.'.join(map(str, min_version))
- raise unittest.SkipTest(
- "Mac OS X %s or higher required, not %s"
- % (min_version_txt, version_txt))
- return func(*args, **kw)
- wrapper.min_version = min_version
- return wrapper
- return decorator
-
-
-# Don't use "localhost", since resolving it uses the DNS under recent
-# Windows versions (see issue #18792).
-HOST = "127.0.0.1"
-HOSTv6 = "::1"
-
-
-def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
- """Returns an unused port that should be suitable for binding. This is
- achieved by creating a temporary socket with the same family and type as
- the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
- the specified host address (defaults to 0.0.0.0) with the port set to 0,
- eliciting an unused ephemeral port from the OS. The temporary socket is
- then closed and deleted, and the ephemeral port is returned.
-
- Either this method or bind_port() should be used for any tests where a
- server socket needs to be bound to a particular port for the duration of
- the test. Which one to use depends on whether the calling code is creating
- a python socket, or if an unused port needs to be provided in a constructor
- or passed to an external program (i.e. the -accept argument to openssl's
- s_server mode). Always prefer bind_port() over find_unused_port() where
- possible. Hard coded ports should *NEVER* be used. As soon as a server
- socket is bound to a hard coded port, the ability to run multiple instances
- of the test simultaneously on the same host is compromised, which makes the
- test a ticking time bomb in a buildbot environment. On Unix buildbots, this
- may simply manifest as a failed test, which can be recovered from without
- intervention in most cases, but on Windows, the entire python process can
- completely and utterly wedge, requiring someone to log in to the buildbot
- and manually kill the affected process.
-
- (This is easy to reproduce on Windows, unfortunately, and can be traced to
- the SO_REUSEADDR socket option having different semantics on Windows versus
- Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
- listen and then accept connections on identical host/ports. An EADDRINUSE
- socket.error will be raised at some point (depending on the platform and
- the order bind and listen were called on each socket).
-
- However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
- will ever be raised when attempting to bind two identical host/ports. When
- accept() is called on each socket, the second caller's process will steal
- the port from the first caller, leaving them both in an awkwardly wedged
- state where they'll no longer respond to any signals or graceful kills, and
- must be forcibly killed via OpenProcess()/TerminateProcess().
-
- The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
- instead of SO_REUSEADDR, which effectively affords the same semantics as
- SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
- Source world compared to Windows ones, this is a common mistake. A quick
- look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
- openssl.exe is called with the 's_server' option, for example. See
- http://bugs.python.org/issue2550 for more info. The following site also
- has a very thorough description about the implications of both REUSEADDR
- and EXCLUSIVEADDRUSE on Windows:
- http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
-
- XXX: although this approach is a vast improvement on previous attempts to
- elicit unused ports, it rests heavily on the assumption that the ephemeral
- port returned to us by the OS won't immediately be dished back out to some
- other process when we close and delete our temporary socket but before our
- calling code has a chance to bind the returned port. We can deal with this
- issue if/when we come across it."""
- tempsock = socket.socket(family, socktype)
- port = bind_port(tempsock)
- tempsock.close()
- del tempsock
- return port
-
-def bind_port(sock, host=HOST):
- """Bind the socket to a free port and return the port number. Relies on
- ephemeral ports in order to ensure we are using an unbound port. This is
- important as many tests may be running simultaneously, especially in a
- buildbot environment. This method raises an exception if the sock.family
- is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
- or SO_REUSEPORT set on it. Tests should *never* set these socket options
- for TCP/IP sockets. The only case for setting these options is testing
- multicasting via multiple UDP sockets.
-
- Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
- on Windows), it will be set on the socket. This will prevent anyone else
- from bind()'ing to our host/port for the duration of the test.
- """
- if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
- if hasattr(socket, 'SO_REUSEADDR'):
- if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
- raise TestFailed("tests should never set the SO_REUSEADDR " \
- "socket option on TCP/IP sockets!")
- if hasattr(socket, 'SO_REUSEPORT'):
- try:
- if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
- raise TestFailed("tests should never set the SO_REUSEPORT " \
- "socket option on TCP/IP sockets!")
- except EnvironmentError:
- # Python's socket module was compiled using modern headers
- # thus defining SO_REUSEPORT but this process is running
- # under an older kernel that does not support SO_REUSEPORT.
- pass
- if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
-
- sock.bind((host, 0))
- port = sock.getsockname()[1]
- return port
-
-def _is_ipv6_enabled():
- """Check whether IPv6 is enabled on this host."""
- if socket.has_ipv6:
- sock = None
- try:
- sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
- sock.bind((HOSTv6, 0))
- return True
- except socket.error:
- pass
- finally:
- if sock:
- sock.close()
- return False
-
-IPV6_ENABLED = _is_ipv6_enabled()
-
-def system_must_validate_cert(f):
- """Skip the test on TLS certificate validation failures."""
- @functools.wraps(f)
- def dec(*args, **kwargs):
- try:
- f(*args, **kwargs)
- except IOError as e:
- if "CERTIFICATE_VERIFY_FAILED" in str(e):
- raise unittest.SkipTest("system does not contain "
- "necessary certificates")
- raise
- return dec
-
-FUZZ = 1e-6
-
-def fcmp(x, y): # fuzzy comparison function
- if isinstance(x, float) or isinstance(y, float):
- try:
- fuzz = (abs(x) + abs(y)) * FUZZ
- if abs(x-y) <= fuzz:
- return 0
- except:
- pass
- elif type(x) == type(y) and isinstance(x, (tuple, list)):
- for i in range(min(len(x), len(y))):
- outcome = fcmp(x[i], y[i])
- if outcome != 0:
- return outcome
- return (len(x) > len(y)) - (len(x) < len(y))
- return (x > y) - (x < y)
-
-
-# A constant likely larger than the underlying OS pipe buffer size, to
-# make writes blocking.
-# Windows limit seems to be around 512 B, and many Unix kernels have a
-# 64 KiB pipe buffer size or 16 * PAGE_SIZE: take a few megs to be sure.
-# (see issue #17835 for a discussion of this number).
-PIPE_MAX_SIZE = 4 * 1024 * 1024 + 1
-
-# A constant likely larger than the underlying OS socket buffer size, to make
-# writes blocking.
-# The socket buffer sizes can usually be tuned system-wide (e.g. through sysctl
-# on Linux), or on a per-socket basis (SO_SNDBUF/SO_RCVBUF). See issue #18643
-# for a discussion of this number).
-SOCK_MAX_SIZE = 16 * 1024 * 1024 + 1
-
-is_jython = sys.platform.startswith('java')
-
-try:
- unicode
- have_unicode = True
-except NameError:
- have_unicode = False
-
-requires_unicode = unittest.skipUnless(have_unicode, 'no unicode support')
-
-def u(s):
- return unicode(s, 'unicode-escape')
-
-# FS_NONASCII: non-ASCII Unicode character encodable by
-# sys.getfilesystemencoding(), or None if there is no such character.
-FS_NONASCII = None
-if have_unicode:
- for character in (
- # First try printable and common characters to have a readable filename.
- # For each character, the encoding list are just example of encodings able
- # to encode the character (the list is not exhaustive).
-
- # U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
- unichr(0x00E6),
- # U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
- unichr(0x0130),
- # U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
- unichr(0x0141),
- # U+03C6 (Greek Small Letter Phi): cp1253
- unichr(0x03C6),
- # U+041A (Cyrillic Capital Letter Ka): cp1251
- unichr(0x041A),
- # U+05D0 (Hebrew Letter Alef): Encodable to cp424
- unichr(0x05D0),
- # U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
- unichr(0x060C),
- # U+062A (Arabic Letter Teh): cp720
- unichr(0x062A),
- # U+0E01 (Thai Character Ko Kai): cp874
- unichr(0x0E01),
-
- # Then try more "special" characters. "special" because they may be
- # interpreted or displayed differently depending on the exact locale
- # encoding and the font.
-
- # U+00A0 (No-Break Space)
- unichr(0x00A0),
- # U+20AC (Euro Sign)
- unichr(0x20AC),
- ):
- try:
- character.encode(sys.getfilesystemencoding())\
- .decode(sys.getfilesystemencoding())
- except UnicodeError:
- pass
- else:
- FS_NONASCII = character
- break
-
-# Filename used for testing
-if os.name == 'java':
- # Jython disallows @ in module names
- TESTFN = '$test'
-elif os.name == 'riscos':
- TESTFN = 'testfile'
-else:
- TESTFN = '@test'
- # Unicode name only used if TEST_FN_ENCODING exists for the platform.
- if have_unicode:
- # Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
- # TESTFN_UNICODE is a filename that can be encoded using the
- # file system encoding, but *not* with the default (ascii) encoding
- if isinstance('', unicode):
- # python -U
- # XXX perhaps unicode() should accept Unicode strings?
- TESTFN_UNICODE = "@test-\xe0\xf2"
- else:
- # 2 latin characters.
- TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
- TESTFN_ENCODING = sys.getfilesystemencoding()
- # TESTFN_UNENCODABLE is a filename that should *not* be
- # able to be encoded by *either* the default or filesystem encoding.
- # This test really only makes sense on Windows NT platforms
- # which have special Unicode support in posixmodule.
- if (not hasattr(sys, "getwindowsversion") or
- sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
- TESTFN_UNENCODABLE = None
- else:
- # Japanese characters (I think - from bug 846133)
- TESTFN_UNENCODABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
- try:
- # XXX - Note - should be using TESTFN_ENCODING here - but for
- # Windows, "mbcs" currently always operates as if in
- # errors=ignore' mode - hence we get '?' characters rather than
- # the exception. 'Latin1' operates as we expect - ie, fails.
- # See [ 850997 ] mbcs encoding ignores errors
- TESTFN_UNENCODABLE.encode("Latin1")
- except UnicodeEncodeError:
- pass
- else:
- print \
- 'WARNING: The filename %r CAN be encoded by the filesystem. ' \
- 'Unicode filename tests may not be effective' \
- % TESTFN_UNENCODABLE
-
-
-# Disambiguate TESTFN for parallel testing, while letting it remain a valid
-# module name.
-TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
-
-# Save the initial cwd
-SAVEDCWD = os.getcwd()
-
-@contextlib.contextmanager
-def change_cwd(path, quiet=False):
- """Return a context manager that changes the current working directory.
-
- Arguments:
-
- path: the directory to use as the temporary current working directory.
-
- quiet: if False (the default), the context manager raises an exception
- on error. Otherwise, it issues only a warning and keeps the current
- working directory the same.
-
- """
- saved_dir = os.getcwd()
- try:
- os.chdir(path)
- except OSError:
- if not quiet:
- raise
- warnings.warn('tests may fail, unable to change CWD to: ' + path,
- RuntimeWarning, stacklevel=3)
- try:
- yield os.getcwd()
- finally:
- os.chdir(saved_dir)
-
-
-@contextlib.contextmanager
-def temp_cwd(name='tempcwd', quiet=False):
- """
- Context manager that creates a temporary directory and set it as CWD.
-
- The new CWD is created in the current directory and it's named *name*.
- If *quiet* is False (default) and it's not possible to create or change
- the CWD, an error is raised. If it's True, only a warning is raised
- and the original CWD is used.
- """
- if (have_unicode and isinstance(name, unicode) and
- not os.path.supports_unicode_filenames):
- try:
- name = name.encode(sys.getfilesystemencoding() or 'ascii')
- except UnicodeEncodeError:
- if not quiet:
- raise unittest.SkipTest('unable to encode the cwd name with '
- 'the filesystem encoding.')
- saved_dir = os.getcwd()
- is_temporary = False
- try:
- os.mkdir(name)
- os.chdir(name)
- is_temporary = True
- except OSError:
- if not quiet:
- raise
- warnings.warn('tests may fail, unable to change the CWD to ' + name,
- RuntimeWarning, stacklevel=3)
- try:
- yield os.getcwd()
- finally:
- os.chdir(saved_dir)
- if is_temporary:
- rmtree(name)
-
-
-def findfile(file, here=None, subdir=None):
- """Try to find a file on sys.path and the working directory. If it is not
- found the argument passed to the function is returned (this does not
- necessarily signal failure; could still be the legitimate path)."""
- import test
- if os.path.isabs(file):
- return file
- if subdir is not None:
- file = os.path.join(subdir, file)
- path = sys.path
- if here is None:
- path = test.__path__ + path
- else:
- path = [os.path.dirname(here)] + path
- for dn in path:
- fn = os.path.join(dn, file)
- if os.path.exists(fn): return fn
- return file
-
-def sortdict(dict):
- "Like repr(dict), but in sorted order."
- items = dict.items()
- items.sort()
- reprpairs = ["%r: %r" % pair for pair in items]
- withcommas = ", ".join(reprpairs)
- return "{%s}" % withcommas
-
-def make_bad_fd():
- """
- Create an invalid file descriptor by opening and closing a file and return
- its fd.
- """
- file = open(TESTFN, "wb")
- try:
- return file.fileno()
- finally:
- file.close()
- unlink(TESTFN)
-
-def check_syntax_error(testcase, statement):
- testcase.assertRaises(SyntaxError, compile, statement,
- '<test string>', 'exec')
-
-def open_urlresource(url, check=None):
- import urlparse, urllib2
-
- filename = urlparse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
-
- fn = os.path.join(os.path.dirname(__file__), "data", filename)
-
- def check_valid_file(fn):
- f = open(fn)
- if check is None:
- return f
- elif check(f):
- f.seek(0)
- return f
- f.close()
-
- if os.path.exists(fn):
- f = check_valid_file(fn)
- if f is not None:
- return f
- unlink(fn)
-
- # Verify the requirement before downloading the file
- requires('urlfetch')
-
- print >> get_original_stdout(), '\tfetching %s ...' % url
- f = urllib2.urlopen(url, timeout=15)
- try:
- with open(fn, "wb") as out:
- s = f.read()
- while s:
- out.write(s)
- s = f.read()
- finally:
- f.close()
-
- f = check_valid_file(fn)
- if f is not None:
- return f
- raise TestFailed('invalid resource "%s"' % fn)
-
-
-class WarningsRecorder(object):
- """Convenience wrapper for the warnings list returned on
- entry to the warnings.catch_warnings() context manager.
- """
- def __init__(self, warnings_list):
- self._warnings = warnings_list
- self._last = 0
-
- def __getattr__(self, attr):
- if len(self._warnings) > self._last:
- return getattr(self._warnings[-1], attr)
- elif attr in warnings.WarningMessage._WARNING_DETAILS:
- return None
- raise AttributeError("%r has no attribute %r" % (self, attr))
-
- @property
- def warnings(self):
- return self._warnings[self._last:]
-
- def reset(self):
- self._last = len(self._warnings)
-
-
-def _filterwarnings(filters, quiet=False):
- """Catch the warnings, then check if all the expected
- warnings have been raised and re-raise unexpected warnings.
- If 'quiet' is True, only re-raise the unexpected warnings.
- """
- # Clear the warning registry of the calling module
- # in order to re-raise the warnings.
- frame = sys._getframe(2)
- registry = frame.f_globals.get('__warningregistry__')
- if registry:
- registry.clear()
- with warnings.catch_warnings(record=True) as w:
- # Set filter "always" to record all warnings. Because
- # test_warnings swap the module, we need to look up in
- # the sys.modules dictionary.
- sys.modules['warnings'].simplefilter("always")
- yield WarningsRecorder(w)
- # Filter the recorded warnings
- reraise = [warning.message for warning in w]
- missing = []
- for msg, cat in filters:
- seen = False
- for exc in reraise[:]:
- message = str(exc)
- # Filter out the matching messages
- if (re.match(msg, message, re.I) and
- issubclass(exc.__class__, cat)):
- seen = True
- reraise.remove(exc)
- if not seen and not quiet:
- # This filter caught nothing
- missing.append((msg, cat.__name__))
- if reraise:
- raise AssertionError("unhandled warning %r" % reraise[0])
- if missing:
- raise AssertionError("filter (%r, %s) did not catch any warning" %
- missing[0])
-
-
-@contextlib.contextmanager
-def check_warnings(*filters, **kwargs):
- """Context manager to silence warnings.
-
- Accept 2-tuples as positional arguments:
- ("message regexp", WarningCategory)
-
- Optional argument:
- - if 'quiet' is True, it does not fail if a filter catches nothing
- (default True without argument,
- default False if some filters are defined)
-
- Without argument, it defaults to:
- check_warnings(("", Warning), quiet=True)
- """
- quiet = kwargs.get('quiet')
- if not filters:
- filters = (("", Warning),)
- # Preserve backward compatibility
- if quiet is None:
- quiet = True
- return _filterwarnings(filters, quiet)
-
-
-@contextlib.contextmanager
-def check_py3k_warnings(*filters, **kwargs):
- """Context manager to silence py3k warnings.
-
- Accept 2-tuples as positional arguments:
- ("message regexp", WarningCategory)
-
- Optional argument:
- - if 'quiet' is True, it does not fail if a filter catches nothing
- (default False)
-
- Without argument, it defaults to:
- check_py3k_warnings(("", DeprecationWarning), quiet=False)
- """
- if sys.py3kwarning:
- if not filters:
- filters = (("", DeprecationWarning),)
- else:
- # It should not raise any py3k warning
- filters = ()
- return _filterwarnings(filters, kwargs.get('quiet'))
-
-
-class CleanImport(object):
- """Context manager to force import to return a new module reference.
-
- This is useful for testing module-level behaviours, such as
- the emission of a DeprecationWarning on import.
-
- Use like this:
-
- with CleanImport("foo"):
- importlib.import_module("foo") # new reference
- """
-
- def __init__(self, *module_names):
- self.original_modules = sys.modules.copy()
- for module_name in module_names:
- if module_name in sys.modules:
- module = sys.modules[module_name]
- # It is possible that module_name is just an alias for
- # another module (e.g. stub for modules renamed in 3.x).
- # In that case, we also need delete the real module to clear
- # the import cache.
- if module.__name__ != module_name:
- del sys.modules[module.__name__]
- del sys.modules[module_name]
-
- def __enter__(self):
- return self
-
- def __exit__(self, *ignore_exc):
- sys.modules.update(self.original_modules)
-
-
-class EnvironmentVarGuard(UserDict.DictMixin):
-
- """Class to help protect the environment variable properly. Can be used as
- a context manager."""
-
- def __init__(self):
- self._environ = os.environ
- self._changed = {}
-
- def __getitem__(self, envvar):
- return self._environ[envvar]
-
- def __setitem__(self, envvar, value):
- # Remember the initial value on the first access
- if envvar not in self._changed:
- self._changed[envvar] = self._environ.get(envvar)
- self._environ[envvar] = value
-
- def __delitem__(self, envvar):
- # Remember the initial value on the first access
- if envvar not in self._changed:
- self._changed[envvar] = self._environ.get(envvar)
- if envvar in self._environ:
- del self._environ[envvar]
-
- def keys(self):
- return self._environ.keys()
-
- def set(self, envvar, value):
- self[envvar] = value
-
- def unset(self, envvar):
- del self[envvar]
-
- def __enter__(self):
- return self
-
- def __exit__(self, *ignore_exc):
- for (k, v) in self._changed.items():
- if v is None:
- if k in self._environ:
- del self._environ[k]
- else:
- self._environ[k] = v
- os.environ = self._environ
-
-
-class DirsOnSysPath(object):
- """Context manager to temporarily add directories to sys.path.
-
- This makes a copy of sys.path, appends any directories given
- as positional arguments, then reverts sys.path to the copied
- settings when the context ends.
-
- Note that *all* sys.path modifications in the body of the
- context manager, including replacement of the object,
- will be reverted at the end of the block.
- """
-
- def __init__(self, *paths):
- self.original_value = sys.path[:]
- self.original_object = sys.path
- sys.path.extend(paths)
-
- def __enter__(self):
- return self
-
- def __exit__(self, *ignore_exc):
- sys.path = self.original_object
- sys.path[:] = self.original_value
-
-
-class TransientResource(object):
-
- """Raise ResourceDenied if an exception is raised while the context manager
- is in effect that matches the specified exception and attributes."""
-
- def __init__(self, exc, **kwargs):
- self.exc = exc
- self.attrs = kwargs
-
- def __enter__(self):
- return self
-
- def __exit__(self, type_=None, value=None, traceback=None):
- """If type_ is a subclass of self.exc and value has attributes matching
- self.attrs, raise ResourceDenied. Otherwise let the exception
- propagate (if any)."""
- if type_ is not None and issubclass(self.exc, type_):
- for attr, attr_value in self.attrs.iteritems():
- if not hasattr(value, attr):
- break
- if getattr(value, attr) != attr_value:
- break
- else:
- raise ResourceDenied("an optional resource is not available")
-
-
-@contextlib.contextmanager
-def transient_internet(resource_name, timeout=30.0, errnos=()):
- """Return a context manager that raises ResourceDenied when various issues
- with the Internet connection manifest themselves as exceptions."""
- default_errnos = [
- ('ECONNREFUSED', 111),
- ('ECONNRESET', 104),
- ('EHOSTUNREACH', 113),
- ('ENETUNREACH', 101),
- ('ETIMEDOUT', 110),
- ]
- default_gai_errnos = [
- ('EAI_AGAIN', -3),
- ('EAI_FAIL', -4),
- ('EAI_NONAME', -2),
- ('EAI_NODATA', -5),
- # Windows defines EAI_NODATA as 11001 but idiotic getaddrinfo()
- # implementation actually returns WSANO_DATA i.e. 11004.
- ('WSANO_DATA', 11004),
- ]
-
- denied = ResourceDenied("Resource '%s' is not available" % resource_name)
- captured_errnos = errnos
- gai_errnos = []
- if not captured_errnos:
- captured_errnos = [getattr(errno, name, num)
- for (name, num) in default_errnos]
- gai_errnos = [getattr(socket, name, num)
- for (name, num) in default_gai_errnos]
-
- def filter_error(err):
- n = getattr(err, 'errno', None)
- if (isinstance(err, socket.timeout) or
- (isinstance(err, socket.gaierror) and n in gai_errnos) or
- n in captured_errnos):
- if not verbose:
- sys.stderr.write(denied.args[0] + "\n")
- raise denied
-
- old_timeout = socket.getdefaulttimeout()
- try:
- if timeout is not None:
- socket.setdefaulttimeout(timeout)
- yield
- except IOError as err:
- # urllib can wrap original socket errors multiple times (!), we must
- # unwrap to get at the original error.
- while True:
- a = err.args
- if len(a) >= 1 and isinstance(a[0], IOError):
- err = a[0]
- # The error can also be wrapped as args[1]:
- # except socket.error as msg:
- # raise IOError('socket error', msg).with_traceback(sys.exc_info()[2])
- elif len(a) >= 2 and isinstance(a[1], IOError):
- err = a[1]
- else:
- break
- filter_error(err)
- raise
- # XXX should we catch generic exceptions and look for their
- # __cause__ or __context__?
- finally:
- socket.setdefaulttimeout(old_timeout)
-
-
-@contextlib.contextmanager
-def captured_output(stream_name):
- """Return a context manager used by captured_stdout and captured_stdin
- that temporarily replaces the sys stream *stream_name* with a StringIO."""
- import StringIO
- orig_stdout = getattr(sys, stream_name)
- setattr(sys, stream_name, StringIO.StringIO())
- try:
- yield getattr(sys, stream_name)
- finally:
- setattr(sys, stream_name, orig_stdout)
-
-def captured_stdout():
- """Capture the output of sys.stdout:
-
- with captured_stdout() as s:
- print "hello"
- self.assertEqual(s.getvalue(), "hello")
- """
- return captured_output("stdout")
-
-def captured_stderr():
- return captured_output("stderr")
-
-def captured_stdin():
- return captured_output("stdin")
-
-def gc_collect():
- """Force as many objects as possible to be collected.
-
- In non-CPython implementations of Python, this is needed because timely
- deallocation is not guaranteed by the garbage collector. (Even in CPython
- this can be the case in case of reference cycles.) This means that __del__
- methods may be called later than expected and weakrefs may remain alive for
- longer than expected. This function tries its best to force all garbage
- objects to disappear.
- """
- gc.collect()
- if is_jython:
- time.sleep(0.1)
- gc.collect()
- gc.collect()
-
-
-_header = '2P'
-if hasattr(sys, "gettotalrefcount"):
- _header = '2P' + _header
-_vheader = _header + 'P'
-
-def calcobjsize(fmt):
- return struct.calcsize(_header + fmt + '0P')
-
-def calcvobjsize(fmt):
- return struct.calcsize(_vheader + fmt + '0P')
-
-
-_TPFLAGS_HAVE_GC = 1<<14
-_TPFLAGS_HEAPTYPE = 1<<9
-
-def check_sizeof(test, o, size):
- import _testcapi
- result = sys.getsizeof(o)
- # add GC header size
- if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
- ((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
- size += _testcapi.SIZEOF_PYGC_HEAD
- msg = 'wrong size for %s: got %d, expected %d' \
- % (type(o), result, size)
- test.assertEqual(result, size, msg)
-
-
-#=======================================================================
-# Decorator for running a function in a different locale, correctly resetting
-# it afterwards.
-
-def run_with_locale(catstr, *locales):
- def decorator(func):
- def inner(*args, **kwds):
- try:
- import locale
- category = getattr(locale, catstr)
- orig_locale = locale.setlocale(category)
- except AttributeError:
- # if the test author gives us an invalid category string
- raise
- except:
- # cannot retrieve original locale, so do nothing
- locale = orig_locale = None
- else:
- for loc in locales:
- try:
- locale.setlocale(category, loc)
- break
- except:
- pass
-
- # now run the function, resetting the locale on exceptions
- try:
- return func(*args, **kwds)
- finally:
- if locale and orig_locale:
- locale.setlocale(category, orig_locale)
- inner.func_name = func.func_name
- inner.__doc__ = func.__doc__
- return inner
- return decorator
-
-#=======================================================================
-# Decorator for running a function in a specific timezone, correctly
-# resetting it afterwards.
-
-def run_with_tz(tz):
- def decorator(func):
- def inner(*args, **kwds):
- try:
- tzset = time.tzset
- except AttributeError:
- raise unittest.SkipTest("tzset required")
- if 'TZ' in os.environ:
- orig_tz = os.environ['TZ']
- else:
- orig_tz = None
- os.environ['TZ'] = tz
- tzset()
-
- # now run the function, resetting the tz on exceptions
- try:
- return func(*args, **kwds)
- finally:
- if orig_tz is None:
- del os.environ['TZ']
- else:
- os.environ['TZ'] = orig_tz
- time.tzset()
-
- inner.__name__ = func.__name__
- inner.__doc__ = func.__doc__
- return inner
- return decorator
-
-#=======================================================================
-# Big-memory-test support. Separate from 'resources' because memory use should be configurable.
-
-# Some handy shorthands. Note that these are used for byte-limits as well
-# as size-limits, in the various bigmem tests
-_1M = 1024*1024
-_1G = 1024 * _1M
-_2G = 2 * _1G
-_4G = 4 * _1G
-
-MAX_Py_ssize_t = sys.maxsize
-
-def set_memlimit(limit):
- global max_memuse
- global real_max_memuse
- sizes = {
- 'k': 1024,
- 'm': _1M,
- 'g': _1G,
- 't': 1024*_1G,
- }
- m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
- re.IGNORECASE | re.VERBOSE)
- if m is None:
- raise ValueError('Invalid memory limit %r' % (limit,))
- memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
- real_max_memuse = memlimit
- if memlimit > MAX_Py_ssize_t:
- memlimit = MAX_Py_ssize_t
- if memlimit < _2G - 1:
- raise ValueError('Memory limit %r too low to be useful' % (limit,))
- max_memuse = memlimit
-
-def bigmemtest(minsize, memuse, overhead=5*_1M):
- """Decorator for bigmem tests.
-
- 'minsize' is the minimum useful size for the test (in arbitrary,
- test-interpreted units.) 'memuse' is the number of 'bytes per size' for
- the test, or a good estimate of it. 'overhead' specifies fixed overhead,
- independent of the testsize, and defaults to 5Mb.
-
- The decorator tries to guess a good value for 'size' and passes it to
- the decorated test function. If minsize * memuse is more than the
- allowed memory use (as defined by max_memuse), the test is skipped.
- Otherwise, minsize is adjusted upward to use up to max_memuse.
- """
- def decorator(f):
- def wrapper(self):
- if not max_memuse:
- # If max_memuse is 0 (the default),
- # we still want to run the tests with size set to a few kb,
- # to make sure they work. We still want to avoid using
- # too much memory, though, but we do that noisily.
- maxsize = 5147
- self.assertFalse(maxsize * memuse + overhead > 20 * _1M)
- else:
- maxsize = int((max_memuse - overhead) / memuse)
- if maxsize < minsize:
- # Really ought to print 'test skipped' or something
- if verbose:
- sys.stderr.write("Skipping %s because of memory "
- "constraint\n" % (f.__name__,))
- return
- # Try to keep some breathing room in memory use
- maxsize = max(maxsize - 50 * _1M, minsize)
- return f(self, maxsize)
- wrapper.minsize = minsize
- wrapper.memuse = memuse
- wrapper.overhead = overhead
- return wrapper
- return decorator
-
-def precisionbigmemtest(size, memuse, overhead=5*_1M, dry_run=True):
- def decorator(f):
- def wrapper(self):
- if not real_max_memuse:
- maxsize = 5147
- else:
- maxsize = size
-
- if ((real_max_memuse or not dry_run)
- and real_max_memuse < maxsize * memuse):
- if verbose:
- sys.stderr.write("Skipping %s because of memory "
- "constraint\n" % (f.__name__,))
- return
-
- return f(self, maxsize)
- wrapper.size = size
- wrapper.memuse = memuse
- wrapper.overhead = overhead
- return wrapper
- return decorator
-
-def bigaddrspacetest(f):
- """Decorator for tests that fill the address space."""
- def wrapper(self):
- if max_memuse < MAX_Py_ssize_t:
- if verbose:
- sys.stderr.write("Skipping %s because of memory "
- "constraint\n" % (f.__name__,))
- else:
- return f(self)
- return wrapper
-
-#=======================================================================
-# unittest integration.
-
-class BasicTestRunner:
- def run(self, test):
- result = unittest.TestResult()
- test(result)
- return result
-
-def _id(obj):
- return obj
-
-def requires_resource(resource):
- if resource == 'gui' and not _is_gui_available():
- return unittest.skip(_is_gui_available.reason)
- if is_resource_enabled(resource):
- return _id
- else:
- return unittest.skip("resource {0!r} is not enabled".format(resource))
-
-def cpython_only(test):
- """
- Decorator for tests only applicable on CPython.
- """
- return impl_detail(cpython=True)(test)
-
-def impl_detail(msg=None, **guards):
- if check_impl_detail(**guards):
- return _id
- if msg is None:
- guardnames, default = _parse_guards(guards)
- if default:
- msg = "implementation detail not available on {0}"
- else:
- msg = "implementation detail specific to {0}"
- guardnames = sorted(guardnames.keys())
- msg = msg.format(' or '.join(guardnames))
- return unittest.skip(msg)
-
-def _parse_guards(guards):
- # Returns a tuple ({platform_name: run_me}, default_value)
- if not guards:
- return ({'cpython': True}, False)
- is_true = guards.values()[0]
- assert guards.values() == [is_true] * len(guards) # all True or all False
- return (guards, not is_true)
-
-# Use the following check to guard CPython's implementation-specific tests --
-# or to run them only on the implementation(s) guarded by the arguments.
-def check_impl_detail(**guards):
- """This function returns True or False depending on the host platform.
- Examples:
- if check_impl_detail(): # only on CPython (default)
- if check_impl_detail(jython=True): # only on Jython
- if check_impl_detail(cpython=False): # everywhere except on CPython
- """
- guards, default = _parse_guards(guards)
- return guards.get(platform.python_implementation().lower(), default)
-
-# ----------------------------------
-# PyPy extension: you can run::
-# python ..../test_foo.py --pdb
-# to get a pdb prompt in case of exceptions
-
-ResultClass = unittest.TextTestRunner.resultclass
-
-class TestResultWithPdb(ResultClass):
-
- def addError(self, testcase, exc_info):
- ResultClass.addError(self, testcase, exc_info)
- if '--pdb' in sys.argv:
- import pdb, traceback
- traceback.print_tb(exc_info[2])
- pdb.post_mortem(exc_info[2])
-
-# ----------------------------------
-
-def _run_suite(suite):
- """Run tests from a unittest.TestSuite-derived class."""
- if verbose:
- runner = unittest.TextTestRunner(sys.stdout, verbosity=2,
- resultclass=TestResultWithPdb)
- else:
- runner = BasicTestRunner()
-
- result = runner.run(suite)
- if not result.wasSuccessful():
- if len(result.errors) == 1 and not result.failures:
- err = result.errors[0][1]
- elif len(result.failures) == 1 and not result.errors:
- err = result.failures[0][1]
- else:
- err = "multiple errors occurred"
- if not verbose:
- err += "; run in verbose mode for details"
- raise TestFailed(err)
-
-# ----------------------------------
-# PyPy extension: you can run::
-# python ..../test_foo.py --filter bar
-# to run only the test cases whose name contains bar
-
-def filter_maybe(suite):
- try:
- i = sys.argv.index('--filter')
- filter = sys.argv[i+1]
- except (ValueError, IndexError):
- return suite
- tests = []
- for test in linearize_suite(suite):
- if filter in test._testMethodName:
- tests.append(test)
- return unittest.TestSuite(tests)
-
-def linearize_suite(suite_or_test):
- try:
- it = iter(suite_or_test)
- except TypeError:
- yield suite_or_test
- return
- for subsuite in it:
- for item in linearize_suite(subsuite):
- yield item
-
-# ----------------------------------
-
-def run_unittest(*classes):
- """Run tests from unittest.TestCase-derived classes."""
- valid_types = (unittest.TestSuite, unittest.TestCase)
- suite = unittest.TestSuite()
- for cls in classes:
- if isinstance(cls, str):
- if cls in sys.modules:
- suite.addTest(unittest.findTestCases(sys.modules[cls]))
- else:
- raise ValueError("str arguments must be keys in sys.modules")
- elif isinstance(cls, valid_types):
- suite.addTest(cls)
- else:
- suite.addTest(unittest.makeSuite(cls))
- suite = filter_maybe(suite)
- _run_suite(suite)
-
-#=======================================================================
-# Check for the presence of docstrings.
-
-HAVE_DOCSTRINGS = (check_impl_detail(cpython=False) or
- sys.platform == 'win32' or
- sysconfig.get_config_var('WITH_DOC_STRINGS'))
-
-requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
- "test requires docstrings")
-
-
-#=======================================================================
-# doctest driver.
-
-def run_doctest(module, verbosity=None):
- """Run doctest on the given module. Return (#failures, #tests).
-
- If optional argument verbosity is not specified (or is None), pass
- test_support's belief about verbosity on to doctest. Else doctest's
- usual behavior is used (it searches sys.argv for -v).
- """
-
- import doctest
-
- if verbosity is None:
- verbosity = verbose
- else:
- verbosity = None
-
- # Direct doctest output (normally just errors) to real stdout; doctest
- # output shouldn't be compared by regrtest.
- save_stdout = sys.stdout
- sys.stdout = get_original_stdout()
- try:
- f, t = doctest.testmod(module, verbose=verbosity)
- if f:
- raise TestFailed("%d of %d doctests failed" % (f, t))
- finally:
- sys.stdout = save_stdout
- if verbose:
- print 'doctest (%s) ... %d tests with zero failures' % (module.__name__, t)
- return f, t
-
-#=======================================================================
-# Threading support to prevent reporting refleaks when running regrtest.py -R
-
-# NOTE: we use thread._count() rather than threading.enumerate() (or the
-# moral equivalent thereof) because a threading.Thread object is still alive
-# until its __bootstrap() method has returned, even after it has been
-# unregistered from the threading module.
-# thread._count(), on the other hand, only gets decremented *after* the
-# __bootstrap() method has returned, which gives us reliable reference counts
-# at the end of a test run.
-
-def threading_setup():
- if thread:
- return thread._count(),
- else:
- return 1,
-
-def threading_cleanup(nb_threads):
- if not thread:
- return
-
- _MAX_COUNT = 10
- for count in range(_MAX_COUNT):
- n = thread._count()
- if n == nb_threads:
- break
- time.sleep(0.1)
- # XXX print a warning in case of failure?
-
-def reap_threads(func):
- """Use this function when threads are being used. This will
- ensure that the threads are cleaned up even when the test fails.
- If threading is unavailable this function does nothing.
- """
- if not thread:
- return func
-
- @functools.wraps(func)
- def decorator(*args):
- key = threading_setup()
- try:
- return func(*args)
- finally:
- threading_cleanup(*key)
- return decorator
-
-def reap_children():
- """Use this function at the end of test_main() whenever sub-processes
- are started. This will help ensure that no extra children (zombies)
- stick around to hog resources and create problems when looking
- for refleaks.
- """
-
- # Reap all our dead child processes so we don't leave zombies around.
- # These hog resources and might be causing some of the buildbots to die.
- if hasattr(os, 'waitpid'):
- any_process = -1
- while True:
- try:
- # This will raise an exception on Windows. That's ok.
- pid, status = os.waitpid(any_process, os.WNOHANG)
- if pid == 0:
- break
- except:
- break
-
-@contextlib.contextmanager
-def start_threads(threads, unlock=None):
- threads = list(threads)
- started = []
- try:
- try:
- for t in threads:
- t.start()
- started.append(t)
- except:
- if verbose:
- print("Can't start %d threads, only %d threads started" %
- (len(threads), len(started)))
- raise
- yield
- finally:
- if unlock:
- unlock()
- endtime = starttime = time.time()
- for timeout in range(1, 16):
- endtime += 60
- for t in started:
- t.join(max(endtime - time.time(), 0.01))
- started = [t for t in started if t.isAlive()]
- if not started:
- break
- if verbose:
- print('Unable to join %d threads during a period of '
- '%d minutes' % (len(started), timeout))
- started = [t for t in started if t.isAlive()]
- if started:
- raise AssertionError('Unable to join %d threads' % len(started))
-
-@contextlib.contextmanager
-def swap_attr(obj, attr, new_val):
- """Temporary swap out an attribute with a new object.
-
- Usage:
- with swap_attr(obj, "attr", 5):
- ...
-
- This will set obj.attr to 5 for the duration of the with: block,
- restoring the old value at the end of the block. If `attr` doesn't
- exist on `obj`, it will be created and then deleted at the end of the
- block.
- """
- if hasattr(obj, attr):
- real_val = getattr(obj, attr)
- setattr(obj, attr, new_val)
- try:
- yield
- finally:
- setattr(obj, attr, real_val)
- else:
- setattr(obj, attr, new_val)
- try:
- yield
- finally:
- delattr(obj, attr)
-
-def py3k_bytes(b):
- """Emulate the py3k bytes() constructor.
-
- NOTE: This is only a best effort function.
- """
- try:
- # memoryview?
- return b.tobytes()
- except AttributeError:
- try:
- # iterable of ints?
- return b"".join(chr(x) for x in b)
- except TypeError:
- return bytes(b)
-
-def args_from_interpreter_flags():
- """Return a list of command-line arguments reproducing the current
- settings in sys.flags."""
- import subprocess
- return subprocess._args_from_interpreter_flags()
-
-def strip_python_stderr(stderr):
- """Strip the stderr of a Python process from potential debug output
- emitted by the interpreter.
-
- This will typically be run on the result of the communicate() method
- of a subprocess.Popen object.
- """
- stderr = re.sub(br"\[\d+ refs\]\r?\n?$", b"", stderr).strip()
- return stderr
-
-
-def check_free_after_iterating(test, iter, cls, args=()):
- class A(cls):
- def __del__(self):
- done[0] = True
- try:
- next(it)
- except StopIteration:
- pass
-
- done = [False]
- it = iter(A(*args))
- # Issue 26494: Shouldn't crash
- test.assertRaises(StopIteration, next, it)
- # The sequence should be deallocated just after the end of iterating
- gc_collect()
- test.assertTrue(done[0])
+import test.support
+sys.modules['test.test_support'] = test.support
diff --git a/lib-python/2.7/test/test_syntax.py b/lib-python/2.7/test/test_syntax.py
index 797b7cc8e0..d09df51c7e 100644
--- a/lib-python/2.7/test/test_syntax.py
+++ b/lib-python/2.7/test/test_syntax.py
@@ -74,13 +74,12 @@ SyntaxError: can't assign to literal
>>> "abc" = 1
Traceback (most recent call last):
- File "<doctest test.test_syntax[8]>", line 1
+ File "<doctest test.test_syntax[9]>", line 1
SyntaxError: can't assign to literal
->>> `1` = 1
+>>> b"" = 1
Traceback (most recent call last):
- File "<doctest test.test_syntax[10]>", line 1
-SyntaxError: can't assign to repr
+SyntaxError: can't assign to literal
If the left-hand side of an assignment is a list or tuple, an illegal
expression inside that contain should still cause a syntax error.
@@ -149,7 +148,7 @@ SyntaxError: cannot assign to None
From ast_for_call():
->>> def f(it, *varargs):
+>>> def f(it, *varargs, **kwargs):
... return list(it)
>>> L = range(10)
>>> f(x for x in L)
@@ -158,73 +157,179 @@ From ast_for_call():
Traceback (most recent call last):
File "<doctest test.test_syntax[23]>", line 1
SyntaxError: Generator expression must be parenthesized if not sole argument
+>>> f(x for x in L, y=1)
+Traceback (most recent call last):
+SyntaxError: Generator expression must be parenthesized if not sole argument
+>>> f(L, x for x in L)
+Traceback (most recent call last):
+SyntaxError: Generator expression must be parenthesized if not sole argument
+>>> f(x for x in L, y for y in L)
+Traceback (most recent call last):
+SyntaxError: Generator expression must be parenthesized if not sole argument
+>>> f(x for x in L,)
+[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f((x for x in L), 1)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
->>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
-... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
-... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
-... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
-... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
-... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
-... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
-... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
-... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
-... i100, i101, i102, i103, i104, i105, i106, i107, i108,
-... i109, i110, i111, i112, i113, i114, i115, i116, i117,
-... i118, i119, i120, i121, i122, i123, i124, i125, i126,
-... i127, i128, i129, i130, i131, i132, i133, i134, i135,
-... i136, i137, i138, i139, i140, i141, i142, i143, i144,
-... i145, i146, i147, i148, i149, i150, i151, i152, i153,
-... i154, i155, i156, i157, i158, i159, i160, i161, i162,
-... i163, i164, i165, i166, i167, i168, i169, i170, i171,
-... i172, i173, i174, i175, i176, i177, i178, i179, i180,
-... i181, i182, i183, i184, i185, i186, i187, i188, i189,
-... i190, i191, i192, i193, i194, i195, i196, i197, i198,
-... i199, i200, i201, i202, i203, i204, i205, i206, i207,
-... i208, i209, i210, i211, i212, i213, i214, i215, i216,
-... i217, i218, i219, i220, i221, i222, i223, i224, i225,
-... i226, i227, i228, i229, i230, i231, i232, i233, i234,
-... i235, i236, i237, i238, i239, i240, i241, i242, i243,
-... i244, i245, i246, i247, i248, i249, i250, i251, i252,
-... i253, i254, i255)
+>>> def g(*args, **kwargs):
+... print args, sorted(kwargs.items())
+>>> g(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+... 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+... 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+... 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+... 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+... 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+... 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+... 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+... 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+... 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+... 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+... 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+... 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
+... 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+... 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+... 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+... 248, 249, 250, 251, 252, 253, 254) # doctest: +ELLIPSIS
+(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ..., 252, 253, 254) []
+>>> g(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+... 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+... 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+... 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+... 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+... 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+... 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+... 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+... 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+... 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+... 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+... 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+... 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
+... 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+... 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+... 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+... 248, 249, 250, 251, 252, 253, 254, 255)
Traceback (most recent call last):
File "<doctest test.test_syntax[25]>", line 1
SyntaxError: more than 255 arguments
-The actual error cases counts positional arguments, keyword arguments,
-and generator expression arguments separately. This test combines the
-three.
-
->>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
-... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
-... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
-... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
-... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
-... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
-... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
-... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
-... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
-... i100, i101, i102, i103, i104, i105, i106, i107, i108,
-... i109, i110, i111, i112, i113, i114, i115, i116, i117,
-... i118, i119, i120, i121, i122, i123, i124, i125, i126,
-... i127, i128, i129, i130, i131, i132, i133, i134, i135,
-... i136, i137, i138, i139, i140, i141, i142, i143, i144,
-... i145, i146, i147, i148, i149, i150, i151, i152, i153,
-... i154, i155, i156, i157, i158, i159, i160, i161, i162,
-... i163, i164, i165, i166, i167, i168, i169, i170, i171,
-... i172, i173, i174, i175, i176, i177, i178, i179, i180,
-... i181, i182, i183, i184, i185, i186, i187, i188, i189,
-... i190, i191, i192, i193, i194, i195, i196, i197, i198,
-... i199, i200, i201, i202, i203, i204, i205, i206, i207,
-... i208, i209, i210, i211, i212, i213, i214, i215, i216,
-... i217, i218, i219, i220, i221, i222, i223, i224, i225,
-... i226, i227, i228, i229, i230, i231, i232, i233, i234,
-... i235, i236, i237, i238, i239, i240, i241, i242, i243,
-... (x for x in i244), i245, i246, i247, i248, i249, i250, i251,
-... i252=1, i253=1, i254=1, i255=1)
-Traceback (most recent call last):
- File "<doctest test.test_syntax[26]>", line 1
+>>> g(a000=0, a001=1, a002=2, a003=3, a004=4, a005=5, a006=6, a007=7, a008=8,
+... a009=9, a010=10, a011=11, a012=12, a013=13, a014=14, a015=15, a016=16,
+... a017=17, a018=18, a019=19, a020=20, a021=21, a022=22, a023=23, a024=24,
+... a025=25, a026=26, a027=27, a028=28, a029=29, a030=30, a031=31, a032=32,
+... a033=33, a034=34, a035=35, a036=36, a037=37, a038=38, a039=39, a040=40,
+... a041=41, a042=42, a043=43, a044=44, a045=45, a046=46, a047=47, a048=48,
+... a049=49, a050=50, a051=51, a052=52, a053=53, a054=54, a055=55, a056=56,
+... a057=57, a058=58, a059=59, a060=60, a061=61, a062=62, a063=63, a064=64,
+... a065=65, a066=66, a067=67, a068=68, a069=69, a070=70, a071=71, a072=72,
+... a073=73, a074=74, a075=75, a076=76, a077=77, a078=78, a079=79, a080=80,
+... a081=81, a082=82, a083=83, a084=84, a085=85, a086=86, a087=87, a088=88,
+... a089=89, a090=90, a091=91, a092=92, a093=93, a094=94, a095=95, a096=96,
+... a097=97, a098=98, a099=99, a100=100, a101=101, a102=102, a103=103,
+... a104=104, a105=105, a106=106, a107=107, a108=108, a109=109, a110=110,
+... a111=111, a112=112, a113=113, a114=114, a115=115, a116=116, a117=117,
+... a118=118, a119=119, a120=120, a121=121, a122=122, a123=123, a124=124,
+... a125=125, a126=126, a127=127, a128=128, a129=129, a130=130, a131=131,
+... a132=132, a133=133, a134=134, a135=135, a136=136, a137=137, a138=138,
+... a139=139, a140=140, a141=141, a142=142, a143=143, a144=144, a145=145,
+... a146=146, a147=147, a148=148, a149=149, a150=150, a151=151, a152=152,
+... a153=153, a154=154, a155=155, a156=156, a157=157, a158=158, a159=159,
+... a160=160, a161=161, a162=162, a163=163, a164=164, a165=165, a166=166,
+... a167=167, a168=168, a169=169, a170=170, a171=171, a172=172, a173=173,
+... a174=174, a175=175, a176=176, a177=177, a178=178, a179=179, a180=180,
+... a181=181, a182=182, a183=183, a184=184, a185=185, a186=186, a187=187,
+... a188=188, a189=189, a190=190, a191=191, a192=192, a193=193, a194=194,
+... a195=195, a196=196, a197=197, a198=198, a199=199, a200=200, a201=201,
+... a202=202, a203=203, a204=204, a205=205, a206=206, a207=207, a208=208,
+... a209=209, a210=210, a211=211, a212=212, a213=213, a214=214, a215=215,
+... a216=216, a217=217, a218=218, a219=219, a220=220, a221=221, a222=222,
+... a223=223, a224=224, a225=225, a226=226, a227=227, a228=228, a229=229,
+... a230=230, a231=231, a232=232, a233=233, a234=234, a235=235, a236=236,
+... a237=237, a238=238, a239=239, a240=240, a241=241, a242=242, a243=243,
+... a244=244, a245=245, a246=246, a247=247, a248=248, a249=249, a250=250,
+... a251=251, a252=252, a253=253, a254=254) # doctest: +ELLIPSIS
+() [('a000', 0), ('a001', 1), ('a002', 2), ..., ('a253', 253), ('a254', 254)]
+>>> g(a000=0, a001=1, a002=2, a003=3, a004=4, a005=5, a006=6, a007=7, a008=8,
+... a009=9, a010=10, a011=11, a012=12, a013=13, a014=14, a015=15, a016=16,
+... a017=17, a018=18, a019=19, a020=20, a021=21, a022=22, a023=23, a024=24,
+... a025=25, a026=26, a027=27, a028=28, a029=29, a030=30, a031=31, a032=32,
+... a033=33, a034=34, a035=35, a036=36, a037=37, a038=38, a039=39, a040=40,
+... a041=41, a042=42, a043=43, a044=44, a045=45, a046=46, a047=47, a048=48,
+... a049=49, a050=50, a051=51, a052=52, a053=53, a054=54, a055=55, a056=56,
+... a057=57, a058=58, a059=59, a060=60, a061=61, a062=62, a063=63, a064=64,
+... a065=65, a066=66, a067=67, a068=68, a069=69, a070=70, a071=71, a072=72,
+... a073=73, a074=74, a075=75, a076=76, a077=77, a078=78, a079=79, a080=80,
+... a081=81, a082=82, a083=83, a084=84, a085=85, a086=86, a087=87, a088=88,
+... a089=89, a090=90, a091=91, a092=92, a093=93, a094=94, a095=95, a096=96,
+... a097=97, a098=98, a099=99, a100=100, a101=101, a102=102, a103=103,
+... a104=104, a105=105, a106=106, a107=107, a108=108, a109=109, a110=110,
+... a111=111, a112=112, a113=113, a114=114, a115=115, a116=116, a117=117,
+... a118=118, a119=119, a120=120, a121=121, a122=122, a123=123, a124=124,
+... a125=125, a126=126, a127=127, a128=128, a129=129, a130=130, a131=131,
+... a132=132, a133=133, a134=134, a135=135, a136=136, a137=137, a138=138,
+... a139=139, a140=140, a141=141, a142=142, a143=143, a144=144, a145=145,
+... a146=146, a147=147, a148=148, a149=149, a150=150, a151=151, a152=152,
+... a153=153, a154=154, a155=155, a156=156, a157=157, a158=158, a159=159,
+... a160=160, a161=161, a162=162, a163=163, a164=164, a165=165, a166=166,
+... a167=167, a168=168, a169=169, a170=170, a171=171, a172=172, a173=173,
+... a174=174, a175=175, a176=176, a177=177, a178=178, a179=179, a180=180,
+... a181=181, a182=182, a183=183, a184=184, a185=185, a186=186, a187=187,
+... a188=188, a189=189, a190=190, a191=191, a192=192, a193=193, a194=194,
+... a195=195, a196=196, a197=197, a198=198, a199=199, a200=200, a201=201,
+... a202=202, a203=203, a204=204, a205=205, a206=206, a207=207, a208=208,
+... a209=209, a210=210, a211=211, a212=212, a213=213, a214=214, a215=215,
+... a216=216, a217=217, a218=218, a219=219, a220=220, a221=221, a222=222,
+... a223=223, a224=224, a225=225, a226=226, a227=227, a228=228, a229=229,
+... a230=230, a231=231, a232=232, a233=233, a234=234, a235=235, a236=236,
+... a237=237, a238=238, a239=239, a240=240, a241=241, a242=242, a243=243,
+... a244=244, a245=245, a246=246, a247=247, a248=248, a249=249, a250=250,
+... a251=251, a252=252, a253=253, a254=254, a255=255)
+Traceback (most recent call last):
+ File "<doctest test.test_syntax[35]>", line 1
+SyntaxError: more than 255 arguments
+
+>>> class C:
+... def meth(self, *args):
+... return args
+>>> obj = C()
+>>> obj.meth(
+... 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+... 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+... 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+... 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+... 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+... 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+... 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+... 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+... 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+... 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+... 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+... 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+... 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
+... 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+... 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+... 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+... 248, 249, 250, 251, 252, 253, 254) # doctest: +ELLIPSIS
+(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ..., 252, 253, 254)
+>>> obj.meth(
+... 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+... 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+... 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+... 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+... 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+... 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+... 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+... 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+... 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+... 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+... 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+... 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+... 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
+... 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+... 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+... 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
+... 248, 249, 250, 251, 252, 253, 254, 255)
+Traceback (most recent call last):
+ File "<doctest test.test_syntax[38]>", line 1
SyntaxError: more than 255 arguments
>>> f(lambda x: x[0] = 3)
@@ -404,6 +509,16 @@ build. The number of blocks must be greater than CO_MAXBLOCKS. SF #1565514
...
SyntaxError: too many statically nested blocks
+Misuse of the global statement can lead to a few unique syntax errors.
+
+ >>> def f(x):
+ ... global x
+ ... # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ SyntaxError: name 'x' is local and global
+
+
This tests assignment-context; there was a bug in Python 2.5 where compiling
a complex 'if' (one with 'elif') would fail to notice an invalid suite,
leading to spurious errors.
@@ -459,9 +574,21 @@ leading to spurious errors.
File "<doctest test.test_syntax[48]>", line 6
SyntaxError: can't assign to function call
+Test the "raise X, Y[, Z]" form:
+
+ >>> raise ArithmeticError, 'bad number'
+ Traceback (most recent call last):
+ ...
+ ArithmeticError: bad number
+ >>> raise ArithmeticError, 'bad number', None
+ Traceback (most recent call last):
+ ...
+ ArithmeticError: bad number
+
+
>>> f(a=23, a=234)
Traceback (most recent call last):
- ...
+ ...
File "<doctest test.test_syntax[49]>", line 1
SyntaxError: keyword argument repeated
@@ -477,6 +604,12 @@ Traceback (most recent call last):
File "<doctest test.test_syntax[50]>", line 1
SyntaxError: can't assign to literal
+Corner-case that used to fail to raise the correct error:
+
+ >>> def f(x=lambda __debug__:0): pass
+ Traceback (most recent call last):
+ SyntaxError: cannot assign to __debug__
+
Corner-case that used to crash:
>>> def f(*xx, **__debug__): pass
@@ -489,12 +622,12 @@ import re
import unittest
import warnings
-from test import test_support
+from test import support
class SyntaxTestCase(unittest.TestCase):
def _check_error(self, code, errtext,
- filename="<testcase>", mode="exec", subclass=None):
+ filename="<testcase>", mode="exec", subclass=None, lineno=None, offset=None):
"""Check that compiling code raises SyntaxError with errtext.
errtest is a regular expression that must be present in the
@@ -502,13 +635,19 @@ class SyntaxTestCase(unittest.TestCase):
is the expected subclass of SyntaxError (e.g. IndentationError).
"""
try:
- compile(code, filename, mode)
- except SyntaxError, err:
+ compile(code, filename or "<testcase>", mode)
+ except SyntaxError as err:
if subclass and not isinstance(err, subclass):
self.fail("SyntaxError is not a %s" % subclass.__name__)
mo = re.search(errtext, str(err))
if mo is None:
self.fail("%s did not contain '%r'" % (err, errtext,))
+ if filename is not None:
+ self.assertEqual(err.filename, filename)
+ if lineno is not None:
+ self.assertEqual(err.lineno, lineno)
+ if offset is not None:
+ self.assertEqual(err.offset, offset)
else:
self.fail("compile() did not raise SyntaxError")
@@ -516,6 +655,11 @@ class SyntaxTestCase(unittest.TestCase):
self._check_error("def f((x)=23): pass",
"parenthesized arg with default")
+ def test_assign_repr(self):
+ with support.check_py3k_warnings(('backquote not supported',
+ SyntaxWarning)):
+ self._check_error("`1` = 1", "assign to repr")
+
def test_assign_call(self):
self._check_error("f() = 1", "assign")
@@ -525,28 +669,48 @@ class SyntaxTestCase(unittest.TestCase):
def test_global_err_then_warn(self):
# Bug tickler: The SyntaxError raised for one global statement
# shouldn't be clobbered by a SyntaxWarning issued for a later one.
- source = re.sub('(?m)^ *:', '', """\
- :def error(a):
- : global a # SyntaxError
- :def warning():
- : b = 1
- : global b # SyntaxWarning
- :""")
- warnings.filterwarnings(action='ignore', category=SyntaxWarning)
- self._check_error(source, "global")
- warnings.filters.pop(0)
+ source = """if 1:
+ def error(a):
+ global a # SyntaxError
+ def warning():
+ b = 1
+ global b # SyntaxWarning
+ """
+ with support.check_warnings((".*assigned to before global declaration",
+ SyntaxWarning)):
+ self._check_error(source, "local and global", lineno=2)
+
+ def test_misuse_global(self):
+ source = """if 1:
+ def f():
+ print(x)
+ global x
+ """
+ with support.check_warnings(('.*used prior to global declaration',
+ SyntaxWarning)):
+ compile(source, '<testcase>', 'exec')
+
+ def test_misuse_global_2(self):
+ source = """if 1:
+ def f():
+ x = 1
+ global x
+ """
+ with support.check_warnings(('.*assigned to before global declaration',
+ SyntaxWarning)):
+ compile(source, '<testcase>', 'exec')
def test_break_outside_loop(self):
self._check_error("break", "outside loop")
def test_delete_deref(self):
- source = re.sub('(?m)^ *:', '', """\
- :def foo(x):
- : def bar():
- : print x
- : del x
- :""")
- self._check_error(source, "nested scope")
+ source = """if 1:
+ def foo(x):
+ def bar():
+ print(x)
+ del x
+ """
+ self._check_error(source, "nested scope", filename=None)
def test_unexpected_indent(self):
self._check_error("foo()\n bar()\n", "unexpected indent",
@@ -565,11 +729,9 @@ class SyntaxTestCase(unittest.TestCase):
self._check_error("int(base=10, '2')", "non-keyword arg")
def test_main():
- test_support.run_unittest(SyntaxTestCase)
+ support.run_unittest(SyntaxTestCase)
from test import test_syntax
- with test_support.check_py3k_warnings(("backquote not supported",
- SyntaxWarning)):
- test_support.run_doctest(test_syntax, verbosity=True)
+ support.run_doctest(test_syntax, verbosity=True)
if __name__ == "__main__":
test_main()
diff --git a/lib-python/2.7/test/test_sys.py b/lib-python/2.7/test/test_sys.py
index f7f238338e..6de0bba5eb 100644
--- a/lib-python/2.7/test/test_sys.py
+++ b/lib-python/2.7/test/test_sys.py
@@ -164,6 +164,17 @@ class SysModuleTest(unittest.TestCase):
self.assertEqual(out, b'')
self.assertEqual(err, b'')
+ # test that the exit machinery handles long exit codes
+ rc, out, err = assert_python_failure('-c', 'raise SystemExit(47L)')
+ self.assertEqual(rc, 47)
+ self.assertEqual(out, b'')
+ self.assertEqual(err, b'')
+
+ rc, out, err = assert_python_ok('-c', 'raise SystemExit(0L)')
+ self.assertEqual(rc, 0)
+ self.assertEqual(out, b'')
+ self.assertEqual(err, b'')
+
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
@@ -746,7 +757,10 @@ class SizeofTest(unittest.TestCase):
# tupleiterator
check(iter(()), size('lP'))
# type
- s = vsize('P2P15Pl4PP9PP11PI' # PyTypeObject
+ fmt = 'P2P15Pl4PP9PP11PI'
+ if hasattr(sys, 'getcounts'):
+ fmt += '3P2P'
+ s = vsize(fmt + # PyTypeObject
'39P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
diff --git a/lib-python/2.7/test/test_sys_settrace.py b/lib-python/2.7/test/test_sys_settrace.py
index bf002054c3..2f745fa323 100644
--- a/lib-python/2.7/test/test_sys_settrace.py
+++ b/lib-python/2.7/test/test_sys_settrace.py
@@ -5,6 +5,19 @@ import unittest
import sys
import difflib
import gc
+from functools import wraps
+
+class tracecontext:
+ """Contex manager that traces its enter and exit."""
+ def __init__(self, output, value):
+ self.output = output
+ self.value = value
+
+ def __enter__(self):
+ self.output.append(self.value)
+
+ def __exit__(self, *exc_info):
+ self.output.append(-self.value)
# A very basic example. If this fails, we're in deep trouble.
def basic():
@@ -478,311 +491,528 @@ class RaisingTraceFuncTestCase(unittest.TestCase):
# command (aka. "Set next statement").
class JumpTracer:
- """Defines a trace function that jumps from one place to another,
- with the source and destination lines of the jump being defined by
- the 'jump' property of the function under test."""
-
- def __init__(self, function):
- self.function = function
- self.jumpFrom = function.jump[0]
- self.jumpTo = function.jump[1]
+ """Defines a trace function that jumps from one place to another."""
+
+ def __init__(self, function, jumpFrom, jumpTo, event='line',
+ decorated=False):
+ self.code = function.func_code
+ self.jumpFrom = jumpFrom
+ self.jumpTo = jumpTo
+ self.event = event
+ self.firstLine = None if decorated else self.code.co_firstlineno
self.done = False
def trace(self, frame, event, arg):
- if not self.done and frame.f_code == self.function.func_code:
- firstLine = frame.f_code.co_firstlineno
- if event == 'line' and frame.f_lineno == firstLine + self.jumpFrom:
+ if self.done:
+ return
+ # frame.f_code.co_firstlineno is the first line of the decorator when
+ # 'function' is decorated and the decorator may be written using
+ # multiple physical lines when it is too long. Use the first line
+ # trace event in 'function' to find the first line of 'function'.
+ if (self.firstLine is None and frame.f_code == self.code and
+ event == 'line'):
+ self.firstLine = frame.f_lineno - 1
+ if (event == self.event and self.firstLine and
+ frame.f_lineno == self.firstLine + self.jumpFrom):
+ f = frame
+ while f is not None and f.f_code != self.code:
+ f = f.f_back
+ if f is not None:
# Cope with non-integer self.jumpTo (because of
# no_jump_to_non_integers below).
try:
- frame.f_lineno = firstLine + self.jumpTo
+ frame.f_lineno = self.firstLine + self.jumpTo
except TypeError:
frame.f_lineno = self.jumpTo
self.done = True
return self.trace
-# The first set of 'jump' tests are for things that are allowed:
+# This verifies the line-numbers-must-be-integers rule.
+def no_jump_to_non_integers(output):
+ try:
+ output.append(2)
+ except ValueError as e:
+ output.append('integer' in str(e))
+
+# This verifies that you can't set f_lineno via _getframe or similar
+# trickery.
+def no_jump_without_trace_function():
+ try:
+ previous_frame = sys._getframe().f_back
+ previous_frame.f_lineno = previous_frame.f_lineno
+ except ValueError as e:
+ # This is the exception we wanted; make sure the error message
+ # talks about trace functions.
+ if 'trace' not in str(e):
+ raise
+ else:
+ # Something's wrong - the expected exception wasn't raised.
+ raise AssertionError("Trace-function-less jump failed to fail")
+
+
+class JumpTestCase(unittest.TestCase):
+ def setUp(self):
+ self.addCleanup(sys.settrace, sys.gettrace())
+ sys.settrace(None)
+
+ def compare_jump_output(self, expected, received):
+ if received != expected:
+ self.fail( "Outputs don't match:\n" +
+ "Expected: " + repr(expected) + "\n" +
+ "Received: " + repr(received))
-def jump_simple_forwards(output):
- output.append(1)
- output.append(2)
- output.append(3)
+ def run_test(self, func, jumpFrom, jumpTo, expected, error=None,
+ event='line', decorated=False):
+ tracer = JumpTracer(func, jumpFrom, jumpTo, event, decorated)
+ sys.settrace(tracer.trace)
+ output = []
+ if error is None:
+ func(output)
+ else:
+ with self.assertRaisesRegexp(*error):
+ func(output)
+ sys.settrace(None)
+ self.compare_jump_output(expected, output)
+
+ def jump_test(jumpFrom, jumpTo, expected, error=None, event='line'):
+ """Decorator that creates a test that makes a jump
+ from one place to another in the following code.
+ """
+ def decorator(func):
+ @wraps(func)
+ def test(self):
+ self.run_test(func, jumpFrom, jumpTo, expected,
+ error=error, event=event, decorated=True)
+ return test
+ return decorator
+
+ ## The first set of 'jump' tests are for things that are allowed:
+
+ @jump_test(1, 3, [3])
+ def test_jump_simple_forwards(output):
+ output.append(1)
+ output.append(2)
+ output.append(3)
-jump_simple_forwards.jump = (1, 3)
-jump_simple_forwards.output = [3]
+ @jump_test(2, 1, [1, 1, 2])
+ def test_jump_simple_backwards(output):
+ output.append(1)
+ output.append(2)
-def jump_simple_backwards(output):
- output.append(1)
- output.append(2)
+ @jump_test(3, 5, [2, 5])
+ def test_jump_out_of_block_forwards(output):
+ for i in 1, 2:
+ output.append(2)
+ for j in [3]: # Also tests jumping over a block
+ output.append(4)
+ output.append(5)
+
+ @jump_test(6, 1, [1, 3, 5, 1, 3, 5, 6, 7])
+ def test_jump_out_of_block_backwards(output):
+ output.append(1)
+ for i in [1]:
+ output.append(3)
+ for j in [2]: # Also tests jumping over a block
+ output.append(5)
+ output.append(6)
+ output.append(7)
-jump_simple_backwards.jump = (2, 1)
-jump_simple_backwards.output = [1, 1, 2]
+ @jump_test(1, 2, [3])
+ def test_jump_to_codeless_line(output):
+ output.append(1)
+ # Jumping to this line should skip to the next one.
+ output.append(3)
-def jump_out_of_block_forwards(output):
- for i in 1, 2:
+ @jump_test(2, 2, [1, 2, 3])
+ def test_jump_to_same_line(output):
+ output.append(1)
output.append(2)
- for j in [3]: # Also tests jumping over a block
+ output.append(3)
+
+ # Tests jumping within a finally block, and over one.
+ @jump_test(4, 9, [2, 9])
+ def test_jump_in_nested_finally(output):
+ try:
+ output.append(2)
+ finally:
output.append(4)
- output.append(5)
+ try:
+ output.append(6)
+ finally:
+ output.append(8)
+ output.append(9)
+
+ @jump_test(6, 7, [2, 7], (ZeroDivisionError, ''))
+ def test_jump_in_nested_finally_2(output):
+ try:
+ output.append(2)
+ 1.0/0.0
+ return
+ finally:
+ output.append(6)
+ output.append(7)
+ output.append(8)
-jump_out_of_block_forwards.jump = (3, 5)
-jump_out_of_block_forwards.output = [2, 5]
+ @jump_test(6, 11, [2, 11], (ZeroDivisionError, ''))
+ def test_jump_in_nested_finally_3(output):
+ try:
+ output.append(2)
+ 1.0/0.0
+ return
+ finally:
+ output.append(6)
+ try:
+ output.append(8)
+ finally:
+ output.append(10)
+ output.append(11)
+ output.append(12)
+
+ @jump_test(3, 4, [1, 4])
+ def test_jump_infinite_while_loop(output):
+ output.append(1)
+ while True:
+ output.append(3)
+ output.append(4)
-def jump_out_of_block_backwards(output):
- output.append(1)
- for i in [1]:
+ @jump_test(2, 3, [1, 3])
+ def test_jump_forwards_out_of_with_block(output):
+ with tracecontext(output, 1):
+ output.append(2)
output.append(3)
- for j in [2]: # Also tests jumping over a block
- output.append(5)
- output.append(6)
- output.append(7)
-jump_out_of_block_backwards.jump = (6, 1)
-jump_out_of_block_backwards.output = [1, 3, 5, 1, 3, 5, 6, 7]
+ @jump_test(3, 1, [1, 2, 1, 2, 3, -2])
+ def test_jump_backwards_out_of_with_block(output):
+ output.append(1)
+ with tracecontext(output, 2):
+ output.append(3)
-def jump_to_codeless_line(output):
- output.append(1)
- # Jumping to this line should skip to the next one.
- output.append(3)
+ @jump_test(2, 5, [5])
+ def test_jump_forwards_out_of_try_finally_block(output):
+ try:
+ output.append(2)
+ finally:
+ output.append(4)
+ output.append(5)
-jump_to_codeless_line.jump = (1, 2)
-jump_to_codeless_line.output = [3]
+ @jump_test(3, 1, [1, 1, 3, 5])
+ def test_jump_backwards_out_of_try_finally_block(output):
+ output.append(1)
+ try:
+ output.append(3)
+ finally:
+ output.append(5)
-def jump_to_same_line(output):
- output.append(1)
- output.append(2)
- output.append(3)
+ @jump_test(2, 6, [6])
+ def test_jump_forwards_out_of_try_except_block(output):
+ try:
+ output.append(2)
+ except:
+ output.append(4)
+ raise
+ output.append(6)
-jump_to_same_line.jump = (2, 2)
-jump_to_same_line.output = [1, 2, 3]
+ @jump_test(3, 1, [1, 1, 3])
+ def test_jump_backwards_out_of_try_except_block(output):
+ output.append(1)
+ try:
+ output.append(3)
+ except:
+ output.append(5)
+ raise
-# Tests jumping within a finally block, and over one.
-def jump_in_nested_finally(output):
- try:
- output.append(2)
- finally:
- output.append(4)
+ @jump_test(5, 7, [4, 7, 8])
+ def test_jump_between_except_blocks(output):
try:
+ 1.0/0.0
+ except ZeroDivisionError:
+ output.append(4)
+ output.append(5)
+ except FloatingPointError:
+ output.append(7)
+ output.append(8)
+
+ @jump_test(5, 6, [4, 6, 7])
+ def test_jump_within_except_block(output):
+ try:
+ 1.0/0.0
+ except:
+ output.append(4)
+ output.append(5)
output.append(6)
- finally:
- output.append(8)
- output.append(9)
+ output.append(7)
-jump_in_nested_finally.jump = (4, 9)
-jump_in_nested_finally.output = [2, 9]
+ @jump_test(2, 4, [1, 4, 5, -4])
+ def test_jump_across_with(output):
+ output.append(1)
+ with tracecontext(output, 2):
+ output.append(3)
+ with tracecontext(output, 4):
+ output.append(5)
-def jump_infinite_while_loop(output):
- output.append(1)
- while 1:
- output.append(2)
- output.append(3)
+ @jump_test(4, 5, [1, 3, 5, 6])
+ def test_jump_out_of_with_block_within_for_block(output):
+ output.append(1)
+ for i in [1]:
+ with tracecontext(output, 3):
+ output.append(4)
+ output.append(5)
+ output.append(6)
-jump_infinite_while_loop.jump = (3, 4)
-jump_infinite_while_loop.output = [1, 3]
+ @jump_test(4, 5, [1, 2, 3, 5, -2, 6])
+ def test_jump_out_of_with_block_within_with_block(output):
+ output.append(1)
+ with tracecontext(output, 2):
+ with tracecontext(output, 3):
+ output.append(4)
+ output.append(5)
+ output.append(6)
-# The second set of 'jump' tests are for things that are not allowed:
+ @jump_test(5, 6, [2, 4, 6, 7])
+ def test_jump_out_of_with_block_within_finally_block(output):
+ try:
+ output.append(2)
+ finally:
+ with tracecontext(output, 4):
+ output.append(5)
+ output.append(6)
+ output.append(7)
-def no_jump_too_far_forwards(output):
- try:
- output.append(2)
- output.append(3)
- except ValueError, e:
- output.append('after' in str(e))
+ @jump_test(8, 11, [1, 3, 5, 11, 12])
+ def test_jump_out_of_complex_nested_blocks(output):
+ output.append(1)
+ for i in [1]:
+ output.append(3)
+ for j in [1, 2]:
+ output.append(5)
+ try:
+ for k in [1, 2]:
+ output.append(8)
+ finally:
+ output.append(10)
+ output.append(11)
+ output.append(12)
+
+ @jump_test(3, 5, [1, 2, 5])
+ def test_jump_out_of_with_assignment(output):
+ output.append(1)
+ with tracecontext(output, 2) \
+ as x:
+ output.append(4)
+ output.append(5)
-no_jump_too_far_forwards.jump = (3, 6)
-no_jump_too_far_forwards.output = [2, True]
+ @jump_test(3, 6, [1, 6, 8, 9])
+ def test_jump_over_return_in_try_finally_block(output):
+ output.append(1)
+ try:
+ output.append(3)
+ if not output: # always false
+ return
+ output.append(6)
+ finally:
+ output.append(8)
+ output.append(9)
-def no_jump_too_far_backwards(output):
- try:
- output.append(2)
- output.append(3)
- except ValueError, e:
- output.append('before' in str(e))
+ @jump_test(5, 8, [1, 3, 8, 10, 11, 13])
+ def test_jump_over_break_in_try_finally_block(output):
+ output.append(1)
+ while True:
+ output.append(3)
+ try:
+ output.append(5)
+ if not output: # always false
+ break
+ output.append(8)
+ finally:
+ output.append(10)
+ output.append(11)
+ break
+ output.append(13)
+
+ @jump_test(1, 7, [7, 8])
+ def test_jump_over_for_block_before_else(output):
+ output.append(1)
+ if not output: # always false
+ for i in [3]:
+ output.append(4)
+ else:
+ output.append(6)
+ output.append(7)
+ output.append(8)
-no_jump_too_far_backwards.jump = (3, -1)
-no_jump_too_far_backwards.output = [2, True]
+ # The second set of 'jump' tests are for things that are not allowed:
-# Test each kind of 'except' line.
-def no_jump_to_except_1(output):
- try:
+ @jump_test(2, 3, [1], (ValueError, 'after'))
+ def test_no_jump_too_far_forwards(output):
+ output.append(1)
output.append(2)
- except:
- e = sys.exc_info()[1]
- output.append('except' in str(e))
-
-no_jump_to_except_1.jump = (2, 3)
-no_jump_to_except_1.output = [True]
-def no_jump_to_except_2(output):
- try:
+ @jump_test(2, -2, [1], (ValueError, 'before'))
+ def test_no_jump_too_far_backwards(output):
+ output.append(1)
output.append(2)
- except ValueError:
- e = sys.exc_info()[1]
- output.append('except' in str(e))
-no_jump_to_except_2.jump = (2, 3)
-no_jump_to_except_2.output = [True]
+ # Test each kind of 'except' line.
+ @jump_test(2, 3, [4], (ValueError, 'except'))
+ def test_no_jump_to_except_1(output):
+ try:
+ output.append(2)
+ except:
+ output.append(4)
+ raise
-def no_jump_to_except_3(output):
- try:
- output.append(2)
- except ValueError, e:
- output.append('except' in str(e))
+ @jump_test(2, 3, [4], (ValueError, 'except'))
+ def test_no_jump_to_except_2(output):
+ try:
+ output.append(2)
+ except ValueError:
+ output.append(4)
+ raise
-no_jump_to_except_3.jump = (2, 3)
-no_jump_to_except_3.output = [True]
+ @jump_test(2, 3, [4], (ValueError, 'except'))
+ def test_no_jump_to_except_3(output):
+ try:
+ output.append(2)
+ except ValueError as e:
+ output.append(4)
+ raise e
-def no_jump_to_except_4(output):
- try:
- output.append(2)
- except (ValueError, RuntimeError), e:
- output.append('except' in str(e))
+ @jump_test(2, 3, [4], (ValueError, 'except'))
+ def test_no_jump_to_except_4(output):
+ try:
+ output.append(2)
+ except (ValueError, RuntimeError) as e:
+ output.append(4)
+ raise e
-no_jump_to_except_4.jump = (2, 3)
-no_jump_to_except_4.output = [True]
+ @jump_test(1, 3, [], (ValueError, 'into'))
+ def test_no_jump_forwards_into_for_block(output):
+ output.append(1)
+ for i in 1, 2:
+ output.append(3)
-def no_jump_forwards_into_block(output):
- try:
- output.append(2)
+ @jump_test(3, 2, [2, 2], (ValueError, 'into'))
+ def test_no_jump_backwards_into_for_block(output):
for i in 1, 2:
+ output.append(2)
+ output.append(3)
+
+ @jump_test(2, 4, [], (ValueError, 'into'))
+ def test_no_jump_forwards_into_while_block(output):
+ i = 1
+ output.append(2)
+ while i <= 2:
output.append(4)
- except ValueError, e:
- output.append('into' in str(e))
+ i += 1
-no_jump_forwards_into_block.jump = (2, 4)
-no_jump_forwards_into_block.output = [True]
+ @jump_test(5, 3, [3, 3], (ValueError, 'into'))
+ def test_no_jump_backwards_into_while_block(output):
+ i = 1
+ while i <= 2:
+ output.append(3)
+ i += 1
+ output.append(5)
-def no_jump_backwards_into_block(output):
- try:
- for i in 1, 2:
+ @jump_test(1, 3, [], (ValueError, 'into'))
+ def test_no_jump_forwards_into_with_block(output):
+ output.append(1)
+ with tracecontext(output, 2):
output.append(3)
- output.append(4)
- except ValueError, e:
- output.append('into' in str(e))
-no_jump_backwards_into_block.jump = (4, 3)
-no_jump_backwards_into_block.output = [3, 3, True]
+ @jump_test(3, 2, [1, 2, -1], (ValueError, 'into'))
+ def test_no_jump_backwards_into_with_block(output):
+ with tracecontext(output, 1):
+ output.append(2)
+ output.append(3)
-def no_jump_into_finally_block(output):
- try:
+ @jump_test(1, 3, [], (ValueError, 'into'))
+ def test_no_jump_forwards_into_try_finally_block(output):
+ output.append(1)
try:
output.append(3)
- x = 1
finally:
- output.append(6)
- except ValueError, e:
- output.append('finally' in str(e))
+ output.append(5)
-no_jump_into_finally_block.jump = (4, 6)
-no_jump_into_finally_block.output = [3, 6, True] # The 'finally' still runs
+ @jump_test(5, 2, [2, 4], (ValueError, 'into'))
+ def test_no_jump_backwards_into_try_finally_block(output):
+ try:
+ output.append(2)
+ finally:
+ output.append(4)
+ output.append(5)
-def no_jump_out_of_finally_block(output):
- try:
+ @jump_test(1, 3, [], (ValueError, 'into'))
+ def test_no_jump_forwards_into_try_except_block(output):
+ output.append(1)
try:
output.append(3)
- finally:
+ except:
output.append(5)
- output.append(6)
- except ValueError, e:
- output.append('finally' in str(e))
+ raise
-no_jump_out_of_finally_block.jump = (5, 1)
-no_jump_out_of_finally_block.output = [3, True]
+ @jump_test(6, 2, [2], (ValueError, 'into'))
+ def test_no_jump_backwards_into_try_except_block(output):
+ try:
+ output.append(2)
+ except:
+ output.append(4)
+ raise
+ output.append(6)
-# This verifies the line-numbers-must-be-integers rule.
-def no_jump_to_non_integers(output):
- try:
- output.append(2)
- except ValueError, e:
- output.append('integer' in str(e))
+ @jump_test(3, 6, [2, 5, 6], (ValueError, 'finally'))
+ def test_no_jump_into_finally_block(output):
+ try:
+ output.append(2)
+ output.append(3)
+ finally: # still executed if the jump is failed
+ output.append(5)
+ output.append(6)
+ output.append(7)
-no_jump_to_non_integers.jump = (2, "Spam")
-no_jump_to_non_integers.output = [True]
+ @jump_test(1, 5, [], (ValueError, 'finally'))
+ def test_no_jump_into_finally_block_2(output):
+ output.append(1)
+ try:
+ output.append(3)
+ finally:
+ output.append(5)
-def jump_across_with(output):
- with open(test_support.TESTFN, "wb") as fp:
- pass
- with open(test_support.TESTFN, "wb") as fp:
- pass
-jump_across_with.jump = (1, 3)
-jump_across_with.output = []
+ @jump_test(5, 1, [1, 3], (ValueError, 'finally'))
+ def test_no_jump_out_of_finally_block(output):
+ output.append(1)
+ try:
+ output.append(3)
+ finally:
+ output.append(5)
-# This verifies that you can't set f_lineno via _getframe or similar
-# trickery.
-def no_jump_without_trace_function():
- try:
- previous_frame = sys._getframe().f_back
- previous_frame.f_lineno = previous_frame.f_lineno
- except ValueError, e:
- # This is the exception we wanted; make sure the error message
- # talks about trace functions.
- if 'trace' not in str(e):
- raise
- else:
- # Something's wrong - the expected exception wasn't raised.
- raise RuntimeError, "Trace-function-less jump failed to fail"
+ @jump_test(3, 5, [1, 2, -2], (ValueError, 'into'))
+ def test_no_jump_between_with_blocks(output):
+ output.append(1)
+ with tracecontext(output, 2):
+ output.append(3)
+ with tracecontext(output, 4):
+ output.append(5)
+ @jump_test(7, 4, [1, 6], (ValueError, 'into'))
+ def test_no_jump_into_for_block_before_else(output):
+ output.append(1)
+ if not output: # always false
+ for i in [3]:
+ output.append(4)
+ else:
+ output.append(6)
+ output.append(7)
+ output.append(8)
-class JumpTestCase(unittest.TestCase):
- def compare_jump_output(self, expected, received):
- if received != expected:
- self.fail( "Outputs don't match:\n" +
- "Expected: " + repr(expected) + "\n" +
- "Received: " + repr(received))
+ def test_no_jump_to_non_integers(self):
+ self.run_test(no_jump_to_non_integers, 2, "Spam", [True])
- def run_test(self, func):
- tracer = JumpTracer(func)
- sys.settrace(tracer.trace)
- output = []
- func(output)
- sys.settrace(None)
- self.compare_jump_output(func.output, output)
-
- def test_01_jump_simple_forwards(self):
- self.run_test(jump_simple_forwards)
- def test_02_jump_simple_backwards(self):
- self.run_test(jump_simple_backwards)
- def test_03_jump_out_of_block_forwards(self):
- self.run_test(jump_out_of_block_forwards)
- def test_04_jump_out_of_block_backwards(self):
- self.run_test(jump_out_of_block_backwards)
- def test_05_jump_to_codeless_line(self):
- self.run_test(jump_to_codeless_line)
- def test_06_jump_to_same_line(self):
- self.run_test(jump_to_same_line)
- def test_07_jump_in_nested_finally(self):
- self.run_test(jump_in_nested_finally)
- def test_jump_infinite_while_loop(self):
- self.run_test(jump_infinite_while_loop)
- def test_08_no_jump_too_far_forwards(self):
- self.run_test(no_jump_too_far_forwards)
- def test_09_no_jump_too_far_backwards(self):
- self.run_test(no_jump_too_far_backwards)
- def test_10_no_jump_to_except_1(self):
- self.run_test(no_jump_to_except_1)
- def test_11_no_jump_to_except_2(self):
- self.run_test(no_jump_to_except_2)
- def test_12_no_jump_to_except_3(self):
- self.run_test(no_jump_to_except_3)
- def test_13_no_jump_to_except_4(self):
- self.run_test(no_jump_to_except_4)
- def test_14_no_jump_forwards_into_block(self):
- self.run_test(no_jump_forwards_into_block)
- def test_15_no_jump_backwards_into_block(self):
- self.run_test(no_jump_backwards_into_block)
- def test_16_no_jump_into_finally_block(self):
- self.run_test(no_jump_into_finally_block)
- def test_17_no_jump_out_of_finally_block(self):
- self.run_test(no_jump_out_of_finally_block)
- def test_18_no_jump_to_non_integers(self):
- self.run_test(no_jump_to_non_integers)
- def test_19_no_jump_without_trace_function(self):
+ def test_no_jump_without_trace_function(self):
+ # Must set sys.settrace(None) in setUp(), else condition is not
+ # triggered.
no_jump_without_trace_function()
- def test_jump_across_with(self):
- self.addCleanup(test_support.unlink, test_support.TESTFN)
- self.run_test(jump_across_with)
- def test_20_large_function(self):
+ def test_large_function(self):
d = {}
exec("""def f(output): # line 0
x = 0 # line 1
@@ -794,10 +1024,7 @@ class JumpTestCase(unittest.TestCase):
output.append(x) # line 1007
return""" % ('\n' * 1000,), d)
f = d['f']
-
- f.jump = (2, 1007)
- f.output = [0]
- self.run_test(f)
+ self.run_test(f, 2, 1007, [0])
def test_jump_to_firstlineno(self):
# This tests that PDB can jump back to the first line in a
@@ -811,14 +1038,43 @@ output.append(4)
""", "<fake module>", "exec")
class fake_function:
func_code = code
- jump = (2, 0)
- tracer = JumpTracer(fake_function)
+ tracer = JumpTracer(fake_function, 2, 0)
sys.settrace(tracer.trace)
namespace = {"output": []}
exec code in namespace
sys.settrace(None)
self.compare_jump_output([2, 3, 2, 3, 4], namespace["output"])
+ @jump_test(2, 3, [1], event='call', error=(ValueError, "can't jump from"
+ " the 'call' trace event of a new frame"))
+ def test_no_jump_from_call(output):
+ output.append(1)
+ def nested():
+ output.append(3)
+ nested()
+ output.append(5)
+
+ @jump_test(2, 1, [1], event='return', error=(ValueError,
+ "can only jump from a 'line' trace event"))
+ def test_no_jump_from_return_event(output):
+ output.append(1)
+ return
+
+ @jump_test(2, 1, [1], event='exception', error=(ValueError,
+ "can only jump from a 'line' trace event"))
+ def test_no_jump_from_exception_event(output):
+ output.append(1)
+ 1 // 0
+
+ @jump_test(3, 2, [2], event='return', error=(ValueError,
+ "can't jump from a yield statement"))
+ def test_no_jump_from_yield(output):
+ def gen():
+ output.append(2)
+ yield 3
+ next(gen())
+ output.append(5)
+
def test_main():
test_support.run_unittest(
diff --git a/lib-python/2.7/test/test_sysconfig.py b/lib-python/2.7/test/test_sysconfig.py
index 69d660ba6a..5715c584f2 100644
--- a/lib-python/2.7/test/test_sysconfig.py
+++ b/lib-python/2.7/test/test_sysconfig.py
@@ -252,7 +252,7 @@ class TestSysConfig(unittest.TestCase):
not import_module('_testcapi'), config_h)
def test_get_scheme_names(self):
- wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user',
+ wanted = ('nt', 'nt_pypy', 'nt_user', 'os2', 'os2_home', 'osx_framework_user',
'posix_home', 'posix_prefix', 'posix_user', 'pypy')
self.assertEqual(get_scheme_names(), wanted)
diff --git a/lib-python/2.7/test/test_tcl.py b/lib-python/2.7/test/test_tcl.py
index 4c2e8d5200..84c4ceab00 100644
--- a/lib-python/2.7/test/test_tcl.py
+++ b/lib-python/2.7/test/test_tcl.py
@@ -256,7 +256,7 @@ class TclTest(unittest.TestCase):
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
except WindowsError as e:
- if e.winerror == 5:
+ if e.winerror == 5 or e.winerror == 2:
self.skipTest('Not permitted to start the child process')
else:
raise
@@ -661,6 +661,43 @@ class TclTest(unittest.TestCase):
expected = {'a': (1, 2, 3), 'something': 'foo', 'status': ''}
self.assertEqual(splitdict(tcl, arg), expected)
+ def test_join(self):
+ join = tkinter._join
+ tcl = self.interp.tk
+ def unpack(s):
+ return tcl.call('lindex', s, 0)
+ def check(value):
+ self.assertEqual(unpack(join([value])), value)
+ self.assertEqual(unpack(join([value, 0])), value)
+ self.assertEqual(unpack(unpack(join([[value]]))), value)
+ self.assertEqual(unpack(unpack(join([[value, 0]]))), value)
+ self.assertEqual(unpack(unpack(join([[value], 0]))), value)
+ self.assertEqual(unpack(unpack(join([[value, 0], 0]))), value)
+ check('')
+ check('spam')
+ check('sp am')
+ check('sp\tam')
+ check('sp\nam')
+ check(' \t\n')
+ check('{spam}')
+ check('{sp am}')
+ check('"spam"')
+ check('"sp am"')
+ check('{"spam"}')
+ check('"{spam}"')
+ check('sp\\am')
+ check('"sp\\am"')
+ check('"{}" "{}"')
+ check('"\\')
+ check('"{')
+ check('"}')
+ check('\n\\')
+ check('\n{')
+ check('\n}')
+ check('\\\n')
+ check('{\n')
+ check('}\n')
+
character_size = 4 if sys.maxunicode > 0xFFFF else 2
@@ -705,25 +742,25 @@ class BigmemTclTest(unittest.TestCase):
self.check_huge_string_builtins(value)
def check_huge_string_builtins(self, value):
- self.assertRaises(OverflowError, self.interp.tk.getint, value)
- self.assertRaises(OverflowError, self.interp.tk.getdouble, value)
- self.assertRaises(OverflowError, self.interp.tk.getboolean, value)
- self.assertRaises(OverflowError, self.interp.eval, value)
- self.assertRaises(OverflowError, self.interp.evalfile, value)
- self.assertRaises(OverflowError, self.interp.record, value)
- self.assertRaises(OverflowError, self.interp.adderrorinfo, value)
- self.assertRaises(OverflowError, self.interp.setvar, value, 'x', 'a')
- self.assertRaises(OverflowError, self.interp.setvar, 'x', value, 'a')
- self.assertRaises(OverflowError, self.interp.unsetvar, value)
- self.assertRaises(OverflowError, self.interp.unsetvar, 'x', value)
- self.assertRaises(OverflowError, self.interp.adderrorinfo, value)
- self.assertRaises(OverflowError, self.interp.exprstring, value)
- self.assertRaises(OverflowError, self.interp.exprlong, value)
- self.assertRaises(OverflowError, self.interp.exprboolean, value)
- self.assertRaises(OverflowError, self.interp.splitlist, value)
- self.assertRaises(OverflowError, self.interp.split, value)
- self.assertRaises(OverflowError, self.interp.createcommand, value, max)
- self.assertRaises(OverflowError, self.interp.deletecommand, value)
+ tk = self.interp.tk
+ self.assertRaises(OverflowError, tk.getint, value)
+ self.assertRaises(OverflowError, tk.getdouble, value)
+ self.assertRaises(OverflowError, tk.getboolean, value)
+ self.assertRaises(OverflowError, tk.eval, value)
+ self.assertRaises(OverflowError, tk.evalfile, value)
+ self.assertRaises(OverflowError, tk.record, value)
+ self.assertRaises(OverflowError, tk.adderrorinfo, value)
+ self.assertRaises(OverflowError, tk.setvar, value, 'x', 'a')
+ self.assertRaises(OverflowError, tk.setvar, 'x', value, 'a')
+ self.assertRaises(OverflowError, tk.unsetvar, value)
+ self.assertRaises(OverflowError, tk.unsetvar, 'x', value)
+ self.assertRaises(OverflowError, tk.exprstring, value)
+ self.assertRaises(OverflowError, tk.exprlong, value)
+ self.assertRaises(OverflowError, tk.exprboolean, value)
+ self.assertRaises(OverflowError, tk.splitlist, value)
+ self.assertRaises(OverflowError, tk.split, value)
+ self.assertRaises(OverflowError, tk.createcommand, value, max)
+ self.assertRaises(OverflowError, tk.deletecommand, value)
def setUpModule():
diff --git a/lib-python/2.7/test/test_tempfile.py b/lib-python/2.7/test/test_tempfile.py
index 5a526b8230..8085bf394e 100644
--- a/lib-python/2.7/test/test_tempfile.py
+++ b/lib-python/2.7/test/test_tempfile.py
@@ -141,12 +141,15 @@ class test__RandomNameSequence(TC):
try:
pid = os.fork()
if not pid:
+ # child process
os.close(read_fd)
os.write(write_fd, next(self.r).encode("ascii"))
os.close(write_fd)
# bypass the normal exit handlers- leave those to
# the parent.
os._exit(0)
+
+ # parent process
parent_value = next(self.r)
child_value = os.read(read_fd, len(parent_value)).decode("ascii")
finally:
@@ -157,6 +160,10 @@ class test__RandomNameSequence(TC):
os.kill(pid, signal.SIGKILL)
except EnvironmentError:
pass
+
+ # Read the process exit status to avoid zombie process
+ os.waitpid(pid, 0)
+
os.close(read_fd)
os.close(write_fd)
self.assertNotEqual(child_value, parent_value)
@@ -235,13 +242,12 @@ class TestGetDefaultTempdir(TC):
self.assertEqual(cm.exception.errno, errno.ENOENT)
self.assertEqual(os.listdir(our_temp_directory), [])
- open = io.open
def bad_writer(*args, **kwargs):
- fp = open(*args, **kwargs)
+ fp = orig_open(*args, **kwargs)
fp.write = raise_OSError
return fp
- with support.swap_attr(io, "open", bad_writer):
+ with support.swap_attr(io, "open", bad_writer) as orig_open:
# test again with failing write()
with self.assertRaises(IOError) as cm:
tempfile._get_default_tempdir()
@@ -819,6 +825,7 @@ class test_NamedTemporaryFile(TC):
old_fdopen = os.fdopen
closed = []
def close(fd):
+ old_close(fd)
closed.append(fd)
def fdopen(*args):
raise ValueError()
diff --git a/lib-python/2.7/test/test_test_support.py b/lib-python/2.7/test/test_test_support.py
new file mode 100644
index 0000000000..ca12c1e376
--- /dev/null
+++ b/lib-python/2.7/test/test_test_support.py
@@ -0,0 +1,465 @@
+import importlib
+import shutil
+import stat
+import sys
+import os
+import unittest
+import socket
+import tempfile
+import textwrap
+import errno
+from test import support
+from test.support import script_helper
+
+TESTFN = support.TESTFN
+
+
+class ClassicClass:
+ pass
+
+class NewStyleClass(object):
+ pass
+
+
+class TestSupport(unittest.TestCase):
+
+ def test_import_module(self):
+ support.import_module("ftplib")
+ self.assertRaises(unittest.SkipTest, support.import_module, "foo")
+
+ def test_import_fresh_module(self):
+ support.import_fresh_module("ftplib")
+
+ def test_get_attribute(self):
+ self.assertEqual(support.get_attribute(self, "test_get_attribute"),
+ self.test_get_attribute)
+ self.assertRaises(unittest.SkipTest, support.get_attribute, self, "foo")
+ with self.assertRaisesRegexp(unittest.SkipTest, 'unittest'):
+ support.get_attribute(unittest, 'foo')
+ with self.assertRaisesRegexp(unittest.SkipTest, 'ClassicClass'):
+ support.get_attribute(ClassicClass, 'foo')
+ with self.assertRaisesRegexp(unittest.SkipTest, 'ClassicClass'):
+ support.get_attribute(ClassicClass(), 'foo')
+ with self.assertRaisesRegexp(unittest.SkipTest, 'NewStyleClass'):
+ support.get_attribute(NewStyleClass, 'foo')
+ with self.assertRaisesRegexp(unittest.SkipTest, 'NewStyleClass'):
+ support.get_attribute(NewStyleClass(), 'foo')
+
+ @unittest.skip("failing buildbots")
+ def test_get_original_stdout(self):
+ self.assertEqual(support.get_original_stdout(), sys.stdout)
+
+ def test_unload(self):
+ import sched
+ self.assertIn("sched", sys.modules)
+ support.unload("sched")
+ self.assertNotIn("sched", sys.modules)
+
+ def test_unlink(self):
+ with open(TESTFN, "w") as f:
+ pass
+ support.unlink(TESTFN)
+ self.assertFalse(os.path.exists(TESTFN))
+ support.unlink(TESTFN)
+
+ def test_rmtree(self):
+ dirpath = support.TESTFN + 'd'
+ subdirpath = os.path.join(dirpath, 'subdir')
+ os.mkdir(dirpath)
+ os.mkdir(subdirpath)
+ support.rmtree(dirpath)
+ self.assertFalse(os.path.exists(dirpath))
+ with support.swap_attr(support, 'verbose', 0):
+ support.rmtree(dirpath)
+
+ os.mkdir(dirpath)
+ os.mkdir(subdirpath)
+ os.chmod(dirpath, stat.S_IRUSR|stat.S_IXUSR)
+ with support.swap_attr(support, 'verbose', 0):
+ support.rmtree(dirpath)
+ self.assertFalse(os.path.exists(dirpath))
+
+ os.mkdir(dirpath)
+ os.mkdir(subdirpath)
+ os.chmod(dirpath, 0)
+ with support.swap_attr(support, 'verbose', 0):
+ support.rmtree(dirpath)
+ self.assertFalse(os.path.exists(dirpath))
+
+ def test_forget(self):
+ mod_filename = TESTFN + '.py'
+ with open(mod_filename, 'wt') as f:
+ f.write('foo = 1\n')
+ sys.path.insert(0, os.curdir)
+ try:
+ mod = __import__(TESTFN)
+ self.assertIn(TESTFN, sys.modules)
+
+ support.forget(TESTFN)
+ self.assertNotIn(TESTFN, sys.modules)
+ finally:
+ del sys.path[0]
+ support.unlink(mod_filename)
+ support.rmtree('__pycache__')
+
+ def test_HOST(self):
+ s = socket.socket()
+ s.bind((support.HOST, 0))
+ s.close()
+
+ def test_find_unused_port(self):
+ port = support.find_unused_port()
+ s = socket.socket()
+ s.bind((support.HOST, port))
+ s.close()
+
+ def test_bind_port(self):
+ s = socket.socket()
+ support.bind_port(s)
+ s.listen(5)
+ s.close()
+
+ # Tests for temp_dir()
+
+ def test_temp_dir(self):
+ """Test that temp_dir() creates and destroys its directory."""
+ parent_dir = tempfile.mkdtemp()
+ parent_dir = os.path.realpath(parent_dir)
+
+ try:
+ path = os.path.join(parent_dir, 'temp')
+ self.assertFalse(os.path.isdir(path))
+ with support.temp_dir(path) as temp_path:
+ self.assertEqual(temp_path, path)
+ self.assertTrue(os.path.isdir(path))
+ self.assertFalse(os.path.isdir(path))
+ finally:
+ support.rmtree(parent_dir)
+
+ def test_temp_dir__path_none(self):
+ """Test passing no path."""
+ with support.temp_dir() as temp_path:
+ self.assertTrue(os.path.isdir(temp_path))
+ self.assertFalse(os.path.isdir(temp_path))
+
+ def test_temp_dir__existing_dir__quiet_default(self):
+ """Test passing a directory that already exists."""
+ def call_temp_dir(path):
+ with support.temp_dir(path) as temp_path:
+ raise Exception("should not get here")
+
+ path = tempfile.mkdtemp()
+ path = os.path.realpath(path)
+ try:
+ self.assertTrue(os.path.isdir(path))
+ with self.assertRaises(OSError) as cm:
+ call_temp_dir(path)
+ self.assertEqual(cm.exception.errno, errno.EEXIST)
+ # Make sure temp_dir did not delete the original directory.
+ self.assertTrue(os.path.isdir(path))
+ finally:
+ shutil.rmtree(path)
+
+ def test_temp_dir__existing_dir__quiet_true(self):
+ """Test passing a directory that already exists with quiet=True."""
+ path = tempfile.mkdtemp()
+ path = os.path.realpath(path)
+
+ try:
+ with support.check_warnings() as recorder:
+ with support.temp_dir(path, quiet=True) as temp_path:
+ self.assertEqual(path, temp_path)
+ warnings = [str(w.message) for w in recorder.warnings]
+ # Make sure temp_dir did not delete the original directory.
+ self.assertTrue(os.path.isdir(path))
+ finally:
+ shutil.rmtree(path)
+
+ expected = ['tests may fail, unable to create temp dir: ' + path]
+ self.assertEqual(warnings, expected)
+
+ @unittest.skipUnless(hasattr(os, "fork"), "test requires os.fork")
+ def test_temp_dir__forked_child(self):
+ """Test that a forked child process does not remove the directory."""
+ # See bpo-30028 for details.
+ # Run the test as an external script, because it uses fork.
+ script_helper.assert_python_ok("-c", textwrap.dedent("""
+ import os
+ from test import support
+ with support.temp_cwd() as temp_path:
+ pid = os.fork()
+ if pid != 0:
+ # parent process (child has pid == 0)
+
+ # wait for the child to terminate
+ (pid, status) = os.waitpid(pid, 0)
+ if status != 0:
+ raise AssertionError("Child process failed with exit "
+ "status indication "
+ "0x{:x}.".format(status))
+
+ # Make sure that temp_path is still present. When the child
+ # process leaves the 'temp_cwd'-context, the __exit__()-
+ # method of the context must not remove the temporary
+ # directory.
+ if not os.path.isdir(temp_path):
+ raise AssertionError("Child removed temp_path.")
+ """))
+
+ # Tests for change_cwd()
+
+ def test_change_cwd(self):
+ original_cwd = os.getcwd()
+
+ with support.temp_dir() as temp_path:
+ with support.change_cwd(temp_path) as new_cwd:
+ self.assertEqual(new_cwd, temp_path)
+ self.assertEqual(os.getcwd(), new_cwd)
+
+ self.assertEqual(os.getcwd(), original_cwd)
+
+ def test_change_cwd__non_existent_dir(self):
+ """Test passing a non-existent directory."""
+ original_cwd = os.getcwd()
+
+ def call_change_cwd(path):
+ with support.change_cwd(path) as new_cwd:
+ raise Exception("should not get here")
+
+ with support.temp_dir() as parent_dir:
+ non_existent_dir = os.path.join(parent_dir, 'does_not_exist')
+ with self.assertRaises(OSError) as cm:
+ call_change_cwd(non_existent_dir)
+ self.assertEqual(cm.exception.errno, errno.ENOENT)
+
+ self.assertEqual(os.getcwd(), original_cwd)
+
+ def test_change_cwd__non_existent_dir__quiet_true(self):
+ """Test passing a non-existent directory with quiet=True."""
+ original_cwd = os.getcwd()
+
+ with support.temp_dir() as parent_dir:
+ bad_dir = os.path.join(parent_dir, 'does_not_exist')
+ with support.check_warnings() as recorder:
+ with support.change_cwd(bad_dir, quiet=True) as new_cwd:
+ self.assertEqual(new_cwd, original_cwd)
+ self.assertEqual(os.getcwd(), new_cwd)
+ warnings = [str(w.message) for w in recorder.warnings]
+
+ expected = ['tests may fail, unable to change CWD to: ' + bad_dir]
+ self.assertEqual(warnings, expected)
+
+ # Tests for change_cwd()
+
+ def test_change_cwd__chdir_warning(self):
+ """Check the warning message when os.chdir() fails."""
+ path = TESTFN + '_does_not_exist'
+ with support.check_warnings() as recorder:
+ with support.change_cwd(path=path, quiet=True):
+ pass
+ messages = [str(w.message) for w in recorder.warnings]
+ self.assertEqual(messages, ['tests may fail, unable to change CWD to: ' + path])
+
+ # Tests for temp_cwd()
+
+ def test_temp_cwd(self):
+ here = os.getcwd()
+ with support.temp_cwd(name=TESTFN):
+ self.assertEqual(os.path.basename(os.getcwd()), TESTFN)
+ self.assertFalse(os.path.exists(TESTFN))
+ self.assertEqual(os.getcwd(), here)
+
+
+ def test_temp_cwd__name_none(self):
+ """Test passing None to temp_cwd()."""
+ original_cwd = os.getcwd()
+ with support.temp_cwd(name=None) as new_cwd:
+ self.assertNotEqual(new_cwd, original_cwd)
+ self.assertTrue(os.path.isdir(new_cwd))
+ self.assertEqual(os.getcwd(), new_cwd)
+ self.assertEqual(os.getcwd(), original_cwd)
+
+ def test_sortdict(self):
+ self.assertEqual(support.sortdict({3:3, 2:2, 1:1}), "{1: 1, 2: 2, 3: 3}")
+
+ def test_make_bad_fd(self):
+ fd = support.make_bad_fd()
+ with self.assertRaises(OSError) as cm:
+ os.write(fd, b"foo")
+ self.assertEqual(cm.exception.errno, errno.EBADF)
+
+ def test_check_syntax_error(self):
+ support.check_syntax_error(self, "def class", lineno=1, offset=5)
+ with self.assertRaises(AssertionError):
+ support.check_syntax_error(self, "x=1")
+
+ def test_CleanImport(self):
+ import importlib
+ with support.CleanImport("asyncore"):
+ importlib.import_module("asyncore")
+
+ def test_DirsOnSysPath(self):
+ with support.DirsOnSysPath('foo', 'bar'):
+ self.assertIn("foo", sys.path)
+ self.assertIn("bar", sys.path)
+ self.assertNotIn("foo", sys.path)
+ self.assertNotIn("bar", sys.path)
+
+ def test_captured_stdout(self):
+ with support.captured_stdout() as stdout:
+ print "hello"
+ self.assertEqual(stdout.getvalue(), "hello\n")
+
+ def test_captured_stderr(self):
+ with support.captured_stderr() as stderr:
+ print >>sys.stderr, "hello"
+ self.assertEqual(stderr.getvalue(), "hello\n")
+
+ def test_captured_stdin(self):
+ with support.captured_stdin() as stdin:
+ stdin.write('hello\n')
+ stdin.seek(0)
+ # call test code that consumes from sys.stdin
+ captured = raw_input()
+ self.assertEqual(captured, "hello")
+
+ def test_gc_collect(self):
+ support.gc_collect()
+
+ def test_python_is_optimized(self):
+ self.assertIsInstance(support.python_is_optimized(), bool)
+
+ def test_swap_attr(self):
+ class Obj:
+ pass
+ obj = Obj()
+ obj.x = 1
+ with support.swap_attr(obj, "x", 5) as x:
+ self.assertEqual(obj.x, 5)
+ self.assertEqual(x, 1)
+ self.assertEqual(obj.x, 1)
+ with support.swap_attr(obj, "y", 5) as y:
+ self.assertEqual(obj.y, 5)
+ self.assertIsNone(y)
+ self.assertFalse(hasattr(obj, 'y'))
+ with support.swap_attr(obj, "y", 5):
+ del obj.y
+ self.assertFalse(hasattr(obj, 'y'))
+
+ def test_swap_item(self):
+ D = {"x":1}
+ with support.swap_item(D, "x", 5) as x:
+ self.assertEqual(D["x"], 5)
+ self.assertEqual(x, 1)
+ self.assertEqual(D["x"], 1)
+ with support.swap_item(D, "y", 5) as y:
+ self.assertEqual(D["y"], 5)
+ self.assertIsNone(y)
+ self.assertNotIn("y", D)
+ with support.swap_item(D, "y", 5):
+ del D["y"]
+ self.assertNotIn("y", D)
+
+ def test_match_test(self):
+ class Test:
+ def __init__(self, test_id):
+ self.test_id = test_id
+
+ def id(self):
+ return self.test_id
+
+ test_access = Test('test.test_os.FileTests.test_access')
+ test_chdir = Test('test.test_os.Win32ErrorTests.test_chdir')
+
+ with support.swap_attr(support, '_match_test_func', None):
+ # match all
+ support.set_match_tests([])
+ self.assertTrue(support.match_test(test_access))
+ self.assertTrue(support.match_test(test_chdir))
+
+ # match all using None
+ support.set_match_tests(None)
+ self.assertTrue(support.match_test(test_access))
+ self.assertTrue(support.match_test(test_chdir))
+
+ # match the full test identifier
+ support.set_match_tests([test_access.id()])
+ self.assertTrue(support.match_test(test_access))
+ self.assertFalse(support.match_test(test_chdir))
+
+ # match the module name
+ support.set_match_tests(['test_os'])
+ self.assertTrue(support.match_test(test_access))
+ self.assertTrue(support.match_test(test_chdir))
+
+ # Test '*' pattern
+ support.set_match_tests(['test_*'])
+ self.assertTrue(support.match_test(test_access))
+ self.assertTrue(support.match_test(test_chdir))
+
+ # Test case sensitivity
+ support.set_match_tests(['filetests'])
+ self.assertFalse(support.match_test(test_access))
+ support.set_match_tests(['FileTests'])
+ self.assertTrue(support.match_test(test_access))
+
+ # Test pattern containing '.' and a '*' metacharacter
+ support.set_match_tests(['*test_os.*.test_*'])
+ self.assertTrue(support.match_test(test_access))
+ self.assertTrue(support.match_test(test_chdir))
+
+ # Multiple patterns
+ support.set_match_tests([test_access.id(), test_chdir.id()])
+ self.assertTrue(support.match_test(test_access))
+ self.assertTrue(support.match_test(test_chdir))
+
+ support.set_match_tests(['test_access', 'DONTMATCH'])
+ self.assertTrue(support.match_test(test_access))
+ self.assertFalse(support.match_test(test_chdir))
+
+ def test_fd_count(self):
+ # We cannot test the absolute value of fd_count(): on old Linux
+ # kernel or glibc versions, os.urandom() keeps a FD open on
+ # /dev/urandom device and Python has 4 FD opens instead of 3.
+ start = support.fd_count()
+ fd = os.open(__file__, os.O_RDONLY)
+ try:
+ more = support.fd_count()
+ finally:
+ os.close(fd)
+ self.assertEqual(more - start, 1)
+
+ # XXX -follows a list of untested API
+ # make_legacy_pyc
+ # is_resource_enabled
+ # requires
+ # fcmp
+ # umaks
+ # findfile
+ # check_warnings
+ # EnvironmentVarGuard
+ # TransientResource
+ # transient_internet
+ # run_with_locale
+ # set_memlimit
+ # bigmemtest
+ # precisionbigmemtest
+ # bigaddrspacetest
+ # requires_resource
+ # run_doctest
+ # threading_cleanup
+ # reap_threads
+ # reap_children
+ # strip_python_stderr
+ # args_from_interpreter_flags
+ # can_symlink
+ # skip_unless_symlink
+ # SuppressCrashReport
+
+
+def test_main():
+ tests = [TestSupport]
+ support.run_unittest(*tests)
+
+if __name__ == '__main__':
+ test_main()
diff --git a/lib-python/2.7/test/test_thread.py b/lib-python/2.7/test/test_thread.py
index 40e6566c54..a04e960305 100644
--- a/lib-python/2.7/test/test_thread.py
+++ b/lib-python/2.7/test/test_thread.py
@@ -1,8 +1,8 @@
import os
import unittest
import random
-from test import test_support
-thread = test_support.import_module('thread')
+from test import support
+thread = support.import_module('thread')
import time
import sys
import weakref
@@ -17,11 +17,12 @@ _print_mutex = thread.allocate_lock()
def verbose_print(arg):
"""Helper function for printing out debugging output."""
- if test_support.verbose:
+ if support.verbose:
with _print_mutex:
print arg
+
class BasicThreadTest(unittest.TestCase):
def setUp(self):
@@ -33,6 +34,9 @@ class BasicThreadTest(unittest.TestCase):
self.running = 0
self.next_ident = 0
+ key = support.threading_setup()
+ self.addCleanup(support.threading_cleanup, *key)
+
class ThreadRunningTests(BasicThreadTest):
@@ -56,12 +60,13 @@ class ThreadRunningTests(BasicThreadTest):
self.done_mutex.release()
def test_starting_threads(self):
- # Basic test for thread creation.
- for i in range(NUMTASKS):
- self.newtask()
- verbose_print("waiting for tasks to complete...")
- self.done_mutex.acquire()
- verbose_print("all tasks done")
+ with support.wait_threads_exit():
+ # Basic test for thread creation.
+ for i in range(NUMTASKS):
+ self.newtask()
+ verbose_print("waiting for tasks to complete...")
+ self.done_mutex.acquire()
+ verbose_print("all tasks done")
def test_stack_size(self):
# Various stack size tests.
@@ -91,12 +96,13 @@ class ThreadRunningTests(BasicThreadTest):
verbose_print("trying stack_size = (%d)" % tss)
self.next_ident = 0
self.created = 0
- for i in range(NUMTASKS):
- self.newtask()
+ with support.wait_threads_exit():
+ for i in range(NUMTASKS):
+ self.newtask()
- verbose_print("waiting for all tasks to complete")
- self.done_mutex.acquire()
- verbose_print("all tasks done")
+ verbose_print("waiting for all tasks to complete")
+ self.done_mutex.acquire()
+ verbose_print("all tasks done")
thread.stack_size(0)
@@ -106,26 +112,30 @@ class ThreadRunningTests(BasicThreadTest):
mut = thread.allocate_lock()
mut.acquire()
started = []
+
def task():
started.append(None)
mut.acquire()
mut.release()
- thread.start_new_thread(task, ())
- while not started:
- time.sleep(0.01)
- self.assertEqual(thread._count(), orig + 1)
- # Allow the task to finish.
- mut.release()
- # The only reliable way to be sure that the thread ended from the
- # interpreter's point of view is to wait for the function object to be
- # destroyed.
- done = []
- wr = weakref.ref(task, lambda _: done.append(None))
- del task
- while not done:
- time.sleep(0.01)
- test_support.gc_collect()
- self.assertEqual(thread._count(), orig)
+
+ with support.wait_threads_exit():
+ thread.start_new_thread(task, ())
+ while not started:
+ time.sleep(0.01)
+ self.assertEqual(thread._count(), orig + 1)
+ # Allow the task to finish.
+ mut.release()
+ # The only reliable way to be sure that the thread ended from the
+ # interpreter's point of view is to wait for the function object to be
+ # destroyed.
+ done = []
+ wr = weakref.ref(task, lambda _: done.append(None))
+ del task
+ while not done:
+ time.sleep(0.01)
+ # pypy: explicitly collect garbage
+ support.gc_collect()
+ self.assertEqual(thread._count(), orig)
def test_save_exception_state_on_error(self):
# See issue #14474
@@ -140,14 +150,13 @@ class ThreadRunningTests(BasicThreadTest):
real_write(self, *args)
c = thread._count()
started = thread.allocate_lock()
- with test_support.captured_output("stderr") as stderr:
+ with support.captured_output("stderr") as stderr:
real_write = stderr.write
stderr.write = mywrite
started.acquire()
- thread.start_new_thread(task, ())
- started.acquire()
- while thread._count() > c:
- time.sleep(0.01)
+ with support.wait_threads_exit():
+ thread.start_new_thread(task, ())
+ started.acquire()
self.assertIn("Traceback", stderr.getvalue())
@@ -179,13 +188,14 @@ class Barrier:
class BarrierTest(BasicThreadTest):
def test_barrier(self):
- self.bar = Barrier(NUMTASKS)
- self.running = NUMTASKS
- for i in range(NUMTASKS):
- thread.start_new_thread(self.task2, (i,))
- verbose_print("waiting for tasks to end")
- self.done_mutex.acquire()
- verbose_print("tasks done")
+ with support.wait_threads_exit():
+ self.bar = Barrier(NUMTASKS)
+ self.running = NUMTASKS
+ for i in range(NUMTASKS):
+ thread.start_new_thread(self.task2, (i,))
+ verbose_print("waiting for tasks to end")
+ self.done_mutex.acquire()
+ verbose_print("tasks done")
def task2(self, ident):
for i in range(NUMTRIPS):
@@ -223,8 +233,9 @@ class TestForkInThread(unittest.TestCase):
@unittest.skipIf(sys.platform.startswith('win'),
"This test is only appropriate for POSIX-like systems.")
- @test_support.reap_threads
+ @support.reap_threads
def test_forkinthread(self):
+ non_local = {'status': None}
def thread1():
try:
pid = os.fork() # fork in a thread
@@ -242,10 +253,14 @@ class TestForkInThread(unittest.TestCase):
os._exit(0)
else: # parent
os.close(self.write_fd)
+ pid, status = os.waitpid(pid, 0)
+ non_local['status'] = status
- thread.start_new_thread(thread1, ())
- self.assertEqual(os.read(self.read_fd, 2), "OK",
- "Unable to fork() in thread")
+ with support.wait_threads_exit():
+ thread.start_new_thread(thread1, ())
+ self.assertEqual(os.read(self.read_fd, 2), "OK",
+ "Unable to fork() in thread")
+ self.assertEqual(non_local['status'], 0)
def tearDown(self):
try:
@@ -260,7 +275,7 @@ class TestForkInThread(unittest.TestCase):
def test_main():
- test_support.run_unittest(ThreadRunningTests, BarrierTest, LockTests,
+ support.run_unittest(ThreadRunningTests, BarrierTest, LockTests,
TestForkInThread)
if __name__ == "__main__":
diff --git a/lib-python/2.7/test/test_threadsignals.py b/lib-python/2.7/test/test_threadsignals.py
index 2f7eb607c7..3d79fd508e 100644
--- a/lib-python/2.7/test/test_threadsignals.py
+++ b/lib-python/2.7/test/test_threadsignals.py
@@ -52,9 +52,11 @@ class ThreadSignals(unittest.TestCase):
# wait for it return.
if signal_blackboard[signal.SIGUSR1]['tripped'] == 0 \
or signal_blackboard[signal.SIGUSR2]['tripped'] == 0:
- signal.alarm(1)
- signal.pause()
- signal.alarm(0)
+ try:
+ signal.alarm(1)
+ signal.pause()
+ finally:
+ signal.alarm(0)
self.assertEqual( signal_blackboard[signal.SIGUSR1]['tripped'], 1)
self.assertEqual( signal_blackboard[signal.SIGUSR1]['tripped_by'],
diff --git a/lib-python/2.7/test/test_time.py b/lib-python/2.7/test/test_time.py
index 4571c108d6..0b2ae41e69 100644
--- a/lib-python/2.7/test/test_time.py
+++ b/lib-python/2.7/test/test_time.py
@@ -2,6 +2,12 @@ from test import test_support
import time
import unittest
import sys
+import sysconfig
+
+
+# Max year is only limited by the size of C int.
+SIZEOF_INT = sysconfig.get_config_var('SIZEOF_INT') or 4
+TIME_MAXYEAR = (1 << 8 * SIZEOF_INT - 1) - 1
class TimeTestCase(unittest.TestCase):
@@ -45,6 +51,66 @@ class TimeTestCase(unittest.TestCase):
with self.assertRaises(ValueError):
time.strftime('%f')
+ def _bounds_checking(self, func):
+ # Make sure that strftime() checks the bounds of the various parts
+ # of the time tuple (0 is valid for *all* values).
+
+ # The year field is tested by other test cases above
+
+ # Check month [1, 12] + zero support
+ func((1900, 0, 1, 0, 0, 0, 0, 1, -1))
+ func((1900, 12, 1, 0, 0, 0, 0, 1, -1))
+ self.assertRaises(ValueError, func,
+ (1900, -1, 1, 0, 0, 0, 0, 1, -1))
+ self.assertRaises(ValueError, func,
+ (1900, 13, 1, 0, 0, 0, 0, 1, -1))
+ # Check day of month [1, 31] + zero support
+ func((1900, 1, 0, 0, 0, 0, 0, 1, -1))
+ func((1900, 1, 31, 0, 0, 0, 0, 1, -1))
+ self.assertRaises(ValueError, func,
+ (1900, 1, -1, 0, 0, 0, 0, 1, -1))
+ self.assertRaises(ValueError, func,
+ (1900, 1, 32, 0, 0, 0, 0, 1, -1))
+ # Check hour [0, 23]
+ func((1900, 1, 1, 23, 0, 0, 0, 1, -1))
+ self.assertRaises(ValueError, func,
+ (1900, 1, 1, -1, 0, 0, 0, 1, -1))
+ self.assertRaises(ValueError, func,
+ (1900, 1, 1, 24, 0, 0, 0, 1, -1))
+ # Check minute [0, 59]
+ func((1900, 1, 1, 0, 59, 0, 0, 1, -1))
+ self.assertRaises(ValueError, func,
+ (1900, 1, 1, 0, -1, 0, 0, 1, -1))
+ self.assertRaises(ValueError, func,
+ (1900, 1, 1, 0, 60, 0, 0, 1, -1))
+ # Check second [0, 61]
+ self.assertRaises(ValueError, func,
+ (1900, 1, 1, 0, 0, -1, 0, 1, -1))
+ # C99 only requires allowing for one leap second, but Python's docs say
+ # allow two leap seconds (0..61)
+ func((1900, 1, 1, 0, 0, 60, 0, 1, -1))
+ func((1900, 1, 1, 0, 0, 61, 0, 1, -1))
+ self.assertRaises(ValueError, func,
+ (1900, 1, 1, 0, 0, 62, 0, 1, -1))
+ # No check for upper-bound day of week;
+ # value forced into range by a ``% 7`` calculation.
+ # Start check at -2 since gettmarg() increments value before taking
+ # modulo.
+ self.assertEqual(func((1900, 1, 1, 0, 0, 0, -1, 1, -1)),
+ func((1900, 1, 1, 0, 0, 0, +6, 1, -1)))
+ self.assertRaises(ValueError, func,
+ (1900, 1, 1, 0, 0, 0, -2, 1, -1))
+ # Check day of the year [1, 366] + zero support
+ func((1900, 1, 1, 0, 0, 0, 0, 0, -1))
+ func((1900, 1, 1, 0, 0, 0, 0, 366, -1))
+ self.assertRaises(ValueError, func,
+ (1900, 1, 1, 0, 0, 0, 0, -1, -1))
+ self.assertRaises(ValueError, func,
+ (1900, 1, 1, 0, 0, 0, 0, 367, -1))
+
+ def test_strftime_bounding_check(self):
+ self._bounds_checking(lambda tup: time.strftime('', tup))
+
def test_strftime_bounds_checking(self):
# Make sure that strftime() checks the bounds of the various parts
#of the time tuple (0 is valid for *all* values).
@@ -123,15 +189,16 @@ class TimeTestCase(unittest.TestCase):
time.asctime(time.gmtime(self.t))
self.assertRaises(TypeError, time.asctime, 0)
self.assertRaises(TypeError, time.asctime, ())
- # XXX: Posix compiant asctime should refuse to convert
- # year > 9999, but Linux implementation does not.
- # self.assertRaises(ValueError, time.asctime,
- # (12345, 1, 0, 0, 0, 0, 0, 0, 0))
- # XXX: For now, just make sure we don't have a crash:
- try:
- time.asctime((12345, 1, 1, 0, 0, 0, 0, 1, 0))
- except ValueError:
- pass
+
+ # Max year is only limited by the size of C int.
+ asc = time.asctime((TIME_MAXYEAR, 6, 1) + (0,) * 6)
+ self.assertEqual(asc[-len(str(TIME_MAXYEAR)):], str(TIME_MAXYEAR))
+ # pypy: raises ValueError instead of OverflowError on 64bit
+ self.assertRaises((ValueError, OverflowError), time.asctime,
+ (TIME_MAXYEAR + 1,) + (0,) * 8)
+ self.assertRaises(TypeError, time.asctime, 0)
+ self.assertRaises(TypeError, time.asctime, ())
+ self.assertRaises(TypeError, time.asctime, (0,) * 10)
@unittest.skipIf(not hasattr(time, "tzset"),
"time module has no attribute tzset")
diff --git a/lib-python/2.7/test/test_timeout.py b/lib-python/2.7/test/test_timeout.py
index bb9252d1a4..51690335b7 100644
--- a/lib-python/2.7/test/test_timeout.py
+++ b/lib-python/2.7/test/test_timeout.py
@@ -106,6 +106,7 @@ class TimeoutTestCase(unittest.TestCase):
def tearDown(self):
self.sock.close()
+ @unittest.skipIf(True, 'need to replace these hosts; see bpo-35518')
def testConnectTimeout(self):
# Choose a private address that is unlikely to exist to prevent
# failures due to the connect succeeding before the timeout.
diff --git a/lib-python/2.7/test/test_tools.py b/lib-python/2.7/test/test_tools.py
index 57b3ef11fd..39116b3a79 100644
--- a/lib-python/2.7/test/test_tools.py
+++ b/lib-python/2.7/test/test_tools.py
@@ -416,12 +416,15 @@ class FixcidTests(unittest.TestCase):
with open(os.path.join(test_support.TESTFN, "file.py"), "w") as file:
file.write("xx = 'unaltered'\n")
script = os.path.join(scriptsdir, "fixcid.py")
- output = self.run_script(args=(test_support.TESTFN,))
+ # ignore dbg() messages
+ with test_support.captured_stderr() as stderr:
+ output = self.run_script(args=(test_support.TESTFN,))
self.assertMultiLineEqual(output,
"{}:\n"
"1\n"
'< int xx;\n'
- '> int yy;\n'.format(c_filename)
+ '> int yy;\n'.format(c_filename),
+ "stderr: %s" % stderr.getvalue()
)
def run_script(self, input="", args=("-",), substfile="xx yy\n"):
@@ -442,6 +445,33 @@ class FixcidTests(unittest.TestCase):
return output.getvalue()
+class LllTests(unittest.TestCase):
+
+ script = os.path.join(scriptsdir, 'lll.py')
+
+ @unittest.skipUnless(hasattr(os, 'symlink'), 'Requires symlink support')
+ def test_lll_multiple_dirs(self):
+ dir1 = tempfile.mkdtemp()
+ dir2 = tempfile.mkdtemp()
+ self.addCleanup(test_support.rmtree, dir1)
+ self.addCleanup(test_support.rmtree, dir2)
+ fn1 = os.path.join(dir1, 'foo1')
+ fn2 = os.path.join(dir2, 'foo2')
+ for fn, dir in (fn1, dir1), (fn2, dir2):
+ open(fn, 'w').close()
+ os.symlink(fn, os.path.join(dir, 'symlink'))
+
+ rc, out, err = assert_python_ok(self.script, dir1, dir2)
+ self.assertEqual(out,
+ '{dir1}:\n'
+ 'symlink -> {fn1}\n'
+ '\n'
+ '{dir2}:\n'
+ 'symlink -> {fn2}\n'
+ .format(dir1=dir1, fn1=fn1, dir2=dir2, fn2=fn2)
+ )
+
+
def test_main():
test_support.run_unittest(*[obj for obj in globals().values()
if isinstance(obj, type)])
diff --git a/lib-python/2.7/test/test_unicode.py b/lib-python/2.7/test/test_unicode.py
index 58456a5c52..b754b606e8 100644
--- a/lib-python/2.7/test/test_unicode.py
+++ b/lib-python/2.7/test/test_unicode.py
@@ -1820,7 +1820,7 @@ class CAPITest(unittest.TestCase):
b'repr=%V', None, b'abc\xff')
# not supported: copy the raw format string. these tests are just here
- # to check for crashs and should not be considered as specifications
+ # to check for crashes and should not be considered as specifications
check_format(u'%s',
b'%1%s', b'abc')
check_format(u'%1abc',
@@ -1830,6 +1830,12 @@ class CAPITest(unittest.TestCase):
check_format(u'%s',
b'%.%s', b'abc')
+ # Issue #33817: empty strings
+ check_format(u'',
+ b'')
+ check_format(u'',
+ b'%s', b'')
+
@test_support.cpython_only
def test_encode_decimal(self):
from _testcapi import unicode_encodedecimal
diff --git a/lib-python/2.7/test/test_unicodedata.py b/lib-python/2.7/test/test_unicodedata.py
index d974d3ce5a..fb7a6425ec 100644
--- a/lib-python/2.7/test/test_unicodedata.py
+++ b/lib-python/2.7/test/test_unicodedata.py
@@ -204,6 +204,19 @@ class UnicodeFunctionsTest(UnicodeDatabaseTest):
b = u'C\u0338' * 20 + u'\xC7'
self.assertEqual(self.db.normalize('NFC', a), b)
+ def test_issue29456(self):
+ # Fix #29456
+ u1176_str_a = u'\u1100\u1176\u11a8'
+ u1176_str_b = u'\u1100\u1176\u11a8'
+ u11a7_str_a = u'\u1100\u1175\u11a7'
+ u11a7_str_b = u'\uae30\u11a7'
+ u11c3_str_a = u'\u1100\u1175\u11c3'
+ u11c3_str_b = u'\uae30\u11c3'
+ self.assertEqual(self.db.normalize('NFC', u1176_str_a), u1176_str_b)
+ self.assertEqual(self.db.normalize('NFC', u11a7_str_a), u11a7_str_b)
+ self.assertEqual(self.db.normalize('NFC', u11c3_str_a), u11c3_str_b)
+
+
def test_east_asian_width(self):
eaw = self.db.east_asian_width
self.assertRaises(TypeError, eaw, 'a')
diff --git a/lib-python/2.7/test/test_urllib.py b/lib-python/2.7/test/test_urllib.py
index 7b1ef6a0b8..22b0874a92 100644
--- a/lib-python/2.7/test/test_urllib.py
+++ b/lib-python/2.7/test/test_urllib.py
@@ -185,11 +185,12 @@ class ProxyTests(unittest.TestCase):
def test_proxy_bypass_environment_host_match(self):
bypass = urllib.proxy_bypass_environment
self.env.set('NO_PROXY',
- 'localhost, anotherdomain.com, newdomain.com:1234')
+ 'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t')
self.assertTrue(bypass('localhost'))
self.assertTrue(bypass('LocalHost')) # MixedCase
self.assertTrue(bypass('LOCALHOST')) # UPPERCASE
self.assertTrue(bypass('newdomain.com:1234'))
+ self.assertTrue(bypass('foo.d.o.t')) # issue 29142
self.assertTrue(bypass('anotherdomain.com:8888'))
self.assertTrue(bypass('www.newdomain.com:1234'))
self.assertFalse(bypass('prelocalhost'))
@@ -256,6 +257,31 @@ class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin):
finally:
self.unfakehttp()
+ def test_url_with_control_char_rejected(self):
+ for char_no in range(0, 0x21) + range(0x7f, 0x100):
+ char = chr(char_no)
+ schemeless_url = "//localhost:7777/test%s/" % char
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
+ try:
+ # urllib quotes the URL so there is no injection.
+ resp = urllib.urlopen("http:" + schemeless_url)
+ self.assertNotIn(char, resp.geturl())
+ finally:
+ self.unfakehttp()
+
+ def test_url_with_newline_header_injection_rejected(self):
+ self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
+ host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
+ schemeless_url = "//" + host + ":8080/test/?test=a"
+ try:
+ # urllib quotes the URL so there is no injection.
+ resp = urllib.urlopen("http:" + schemeless_url)
+ self.assertNotIn(' ', resp.geturl())
+ self.assertNotIn('\r', resp.geturl())
+ self.assertNotIn('\n', resp.geturl())
+ finally:
+ self.unfakehttp()
+
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp('''HTTP/1.1 401 Authentication Required
@@ -878,6 +904,26 @@ class Utility_Tests(unittest.TestCase):
self.assertEqual(splithost('/foo/bar/baz.html'),
(None, '/foo/bar/baz.html'))
+ # bpo-30500: # starts a fragment.
+ self.assertEqual(splithost('//127.0.0.1#@host.com'),
+ ('127.0.0.1', '/#@host.com'))
+ self.assertEqual(splithost('//127.0.0.1#@host.com:80'),
+ ('127.0.0.1', '/#@host.com:80'))
+ self.assertEqual(splithost('//127.0.0.1:80#@host.com'),
+ ('127.0.0.1:80', '/#@host.com'))
+
+ # Empty host is returned as empty string.
+ self.assertEqual(splithost("///file"),
+ ('', '/file'))
+
+ # Trailing semicolon, question mark and hash symbol are kept.
+ self.assertEqual(splithost("//example.net/file;"),
+ ('example.net', '/file;'))
+ self.assertEqual(splithost("//example.net/file?"),
+ ('example.net', '/file?'))
+ self.assertEqual(splithost("//example.net/file#"),
+ ('example.net', '/file#'))
+
def test_splituser(self):
splituser = urllib.splituser
self.assertEqual(splituser('User:Pass@www.python.org:080'),
@@ -1002,6 +1048,17 @@ class URLopener_Tests(unittest.TestCase):
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
+ def test_local_file_open(self):
+ # bpo-35907, CVE-2019-9948: urllib must reject local_file:// scheme
+ class DummyURLopener(urllib.URLopener):
+ def open_local_file(self, url):
+ return url
+ for url in ('local_file://example', 'local-file://example'):
+ self.assertRaises(IOError, urllib.urlopen, url)
+ self.assertRaises(IOError, urllib.URLopener().open, url)
+ self.assertRaises(IOError, urllib.URLopener().retrieve, url)
+ self.assertRaises(IOError, DummyURLopener().open, url)
+ self.assertRaises(IOError, DummyURLopener().retrieve, url)
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
diff --git a/lib-python/2.7/test/test_urllib2.py b/lib-python/2.7/test/test_urllib2.py
index f0cbab0576..1829799a93 100644
--- a/lib-python/2.7/test/test_urllib2.py
+++ b/lib-python/2.7/test/test_urllib2.py
@@ -1145,21 +1145,21 @@ class HandlerTests(unittest.TestCase):
def test_basic_auth(self):
realm = "realm2@example.com"
realm2 = "realm2@example.com"
- basic = 'Basic realm="%s"' % (realm,)
- basic2 = 'Basic realm="%s"' % (realm2,)
+ basic = 'Basic realm="{realm}"'.format(realm=realm)
+ basic2 = 'Basic realm="{realm2}"'.format(realm2=realm2)
other_no_realm = 'Otherscheme xxx'
- digest = ('Digest realm="%s", '
+ digest = ('Digest realm="{realm2}", '
'qop="auth, auth-int", '
'nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093", '
- 'opaque="5ccc069c403ebaf9f0171e9517f40e41"') % (
- (realm2,))
+ 'opaque="5ccc069c403ebaf9f0171e9517f40e41"'
+ .format(realm2=realm2))
for realm_str in (
# test "quote" and 'quote'
- 'Basic realm="%s"' % (realm,),
- "Basic realm='%s'" % (realm,),
+ 'Basic realm="{realm}"'.format(realm=realm),
+ "Basic realm='{realm}'".format(realm=realm),
# charset is ignored
- 'Basic realm="%s", charset="UTF-8"' % (realm,),
+ 'Basic realm="{realm}", charset="UTF-8"'.format(realm=realm),
# Multiple challenges per header
', '.join((basic, basic2)),
@@ -1168,13 +1168,15 @@ class HandlerTests(unittest.TestCase):
', '.join((basic, digest)),
', '.join((digest, basic)),
):
- headers = ['WWW-Authenticate: %s' % (realm_str,)]
+ headers = ['WWW-Authenticate: {realm_str}'
+ .format(realm_str=realm_str)]
self.check_basic_auth(headers, realm)
# no quote: expect a warning
with test_support.check_warnings(("Basic Auth Realm was unquoted",
UserWarning)):
- headers = ['WWW-Authenticate: Basic realm=%s' % (realm,)]
+ headers = ['WWW-Authenticate: Basic realm={realm}'
+ .format(realm=realm)]
self.check_basic_auth(headers, realm)
# Multiple headers: one challenge per header.
@@ -1184,7 +1186,8 @@ class HandlerTests(unittest.TestCase):
[basic, digest],
[digest, basic],
):
- headers = ['WWW-Authenticate: %s' % (challenge,)
+ headers = ['WWW-Authenticate: {challenge}'
+ .format(challenge=challenge)
for challenge in challenges]
self.check_basic_auth(headers, realm)
diff --git a/lib-python/2.7/test/test_urllib2_localnet.py b/lib-python/2.7/test/test_urllib2_localnet.py
index 9199cb9310..932b57223a 100644
--- a/lib-python/2.7/test/test_urllib2_localnet.py
+++ b/lib-python/2.7/test/test_urllib2_localnet.py
@@ -278,6 +278,7 @@ class BaseTestCase(unittest.TestCase):
self._threads = test_support.threading_setup()
def tearDown(self):
+ self.doCleanups()
test_support.threading_cleanup(*self._threads)
@@ -296,10 +297,7 @@ class BasicAuthTests(BaseTestCase):
self.server_url = 'http://127.0.0.1:%s' % self.server.port
self.server.start()
self.server.ready.wait()
-
- def tearDown(self):
- self.server.stop()
- super(BasicAuthTests, self).tearDown()
+ self.addCleanup(self.server.stop)
def test_basic_auth_success(self):
ah = urllib2.HTTPBasicAuthHandler()
@@ -347,15 +345,12 @@ class ProxyAuthTests(BaseTestCase):
self.server = LoopbackHttpServerThread(create_fake_proxy_handler)
self.server.start()
self.server.ready.wait()
+ self.addCleanup(self.server.stop)
proxy_url = "http://127.0.0.1:%d" % self.server.port
handler = urllib2.ProxyHandler({"http" : proxy_url})
self.proxy_digest_handler = urllib2.ProxyDigestAuthHandler()
self.opener = urllib2.build_opener(handler, self.proxy_digest_handler)
- def tearDown(self):
- self.server.stop()
- super(ProxyAuthTests, self).tearDown()
-
def test_proxy_with_bad_password_raises_httperror(self):
self.proxy_digest_handler.add_password(self.REALM, self.URL,
self.USER, self.PASSWD+"bad")
@@ -472,6 +467,7 @@ class TestUrlopen(BaseTestCase):
self.server = LoopbackHttpServerThread(handler)
self.server.start()
self.server.ready.wait()
+ self.addCleanup(self.server.stop)
port = self.server.port
handler.port = port
return handler
@@ -496,15 +492,12 @@ class TestUrlopen(BaseTestCase):
handler = self.start_server(responses)
- try:
- f = urllib2.urlopen('http://localhost:%s/' % handler.port)
- data = f.read()
- f.close()
+ f = urllib2.urlopen('http://localhost:%s/' % handler.port)
+ data = f.read()
+ f.close()
- self.assertEqual(data, expected_response)
- self.assertEqual(handler.requests, ['/', '/somewhere_else'])
- finally:
- self.server.stop()
+ self.assertEqual(data, expected_response)
+ self.assertEqual(handler.requests, ['/', '/somewhere_else'])
def test_404(self):
@@ -512,49 +505,40 @@ class TestUrlopen(BaseTestCase):
handler = self.start_server([(404, [], expected_response)])
try:
- try:
- urllib2.urlopen('http://localhost:%s/weeble' % handler.port)
- except urllib2.URLError, f:
- pass
- else:
- self.fail('404 should raise URLError')
+ urllib2.urlopen('http://localhost:%s/weeble' % handler.port)
+ except urllib2.URLError, f:
+ pass
+ else:
+ self.fail('404 should raise URLError')
- data = f.read()
- f.close()
+ data = f.read()
+ f.close()
- self.assertEqual(data, expected_response)
- self.assertEqual(handler.requests, ['/weeble'])
- finally:
- self.server.stop()
+ self.assertEqual(data, expected_response)
+ self.assertEqual(handler.requests, ['/weeble'])
def test_200(self):
expected_response = 'pycon 2008...'
handler = self.start_server([(200, [], expected_response)])
- try:
- f = urllib2.urlopen('http://localhost:%s/bizarre' % handler.port)
- data = f.read()
- f.close()
+ f = urllib2.urlopen('http://localhost:%s/bizarre' % handler.port)
+ data = f.read()
+ f.close()
- self.assertEqual(data, expected_response)
- self.assertEqual(handler.requests, ['/bizarre'])
- finally:
- self.server.stop()
+ self.assertEqual(data, expected_response)
+ self.assertEqual(handler.requests, ['/bizarre'])
def test_200_with_parameters(self):
expected_response = 'pycon 2008...'
handler = self.start_server([(200, [], expected_response)])
- try:
- f = urllib2.urlopen('http://localhost:%s/bizarre' % handler.port, 'get=with_feeling')
- data = f.read()
- f.close()
+ f = urllib2.urlopen('http://localhost:%s/bizarre' % handler.port, 'get=with_feeling')
+ data = f.read()
+ f.close()
- self.assertEqual(data, expected_response)
- self.assertEqual(handler.requests, ['/bizarre', 'get=with_feeling'])
- finally:
- self.server.stop()
+ self.assertEqual(data, expected_response)
+ self.assertEqual(handler.requests, ['/bizarre', 'get=with_feeling'])
def test_https(self):
handler = self.start_https_server()
@@ -593,7 +577,7 @@ class TestUrlopen(BaseTestCase):
sni_name = [None]
def cb_sni(ssl_sock, server_name, initial_context):
sni_name[0] = server_name
- context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
+ context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.set_servername_callback(cb_sni)
handler = self.start_https_server(context=context, certfile=CERT_localhost)
context = ssl.create_default_context(cafile=CERT_localhost)
@@ -603,52 +587,40 @@ class TestUrlopen(BaseTestCase):
def test_sending_headers(self):
handler = self.start_server([(200, [], "we don't care")])
- try:
- req = urllib2.Request("http://localhost:%s/" % handler.port,
- headers={'Range': 'bytes=20-39'})
- urllib2.urlopen(req)
- self.assertEqual(handler.headers_received['Range'], 'bytes=20-39')
- finally:
- self.server.stop()
+ req = urllib2.Request("http://localhost:%s/" % handler.port,
+ headers={'Range': 'bytes=20-39'})
+ urllib2.urlopen(req)
+ self.assertEqual(handler.headers_received['Range'], 'bytes=20-39')
def test_basic(self):
handler = self.start_server([(200, [], "we don't care")])
+ open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
+ for attr in ("read", "close", "info", "geturl"):
+ self.assertTrue(hasattr(open_url, attr), "object returned from "
+ "urlopen lacks the %s attribute" % attr)
try:
- open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
- for attr in ("read", "close", "info", "geturl"):
- self.assertTrue(hasattr(open_url, attr), "object returned from "
- "urlopen lacks the %s attribute" % attr)
- try:
- self.assertTrue(open_url.read(), "calling 'read' failed")
- finally:
- open_url.close()
+ self.assertTrue(open_url.read(), "calling 'read' failed")
finally:
- self.server.stop()
+ open_url.close()
def test_info(self):
handler = self.start_server([(200, [], "we don't care")])
- try:
- open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
- info_obj = open_url.info()
- self.assertIsInstance(info_obj, mimetools.Message,
- "object returned by 'info' is not an "
- "instance of mimetools.Message")
- self.assertEqual(info_obj.getsubtype(), "plain")
- finally:
- self.server.stop()
+ open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
+ info_obj = open_url.info()
+ self.assertIsInstance(info_obj, mimetools.Message,
+ "object returned by 'info' is not an "
+ "instance of mimetools.Message")
+ self.assertEqual(info_obj.getsubtype(), "plain")
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
handler = self.start_server([(200, [], "we don't care")])
- try:
- open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
- url = open_url.geturl()
- self.assertEqual(url, "http://localhost:%s" % handler.port)
- finally:
- self.server.stop()
+ open_url = urllib2.urlopen("http://localhost:%s" % handler.port)
+ url = open_url.geturl()
+ self.assertEqual(url, "http://localhost:%s" % handler.port)
def test_bad_address(self):
@@ -682,26 +654,21 @@ class TestUrlopen(BaseTestCase):
def test_iteration(self):
expected_response = "pycon 2008..."
handler = self.start_server([(200, [], expected_response)])
- try:
- data = urllib2.urlopen("http://localhost:%s" % handler.port)
- for line in data:
- self.assertEqual(line, expected_response)
- finally:
- self.server.stop()
+
+ data = urllib2.urlopen("http://localhost:%s" % handler.port)
+ for line in data:
+ self.assertEqual(line, expected_response)
def ztest_line_iteration(self):
lines = ["We\n", "got\n", "here\n", "verylong " * 8192 + "\n"]
expected_response = "".join(lines)
handler = self.start_server([(200, [], expected_response)])
- try:
- data = urllib2.urlopen("http://localhost:%s" % handler.port)
- for index, line in enumerate(data):
- self.assertEqual(line, lines[index],
- "Fetched line number %s doesn't match expected:\n"
- " Expected length was %s, got %s" %
- (index, len(lines[index]), len(line)))
- finally:
- self.server.stop()
+ data = urllib2.urlopen("http://localhost:%s" % handler.port)
+ for index, line in enumerate(data):
+ self.assertEqual(line, lines[index],
+ "Fetched line number %s doesn't match expected:\n"
+ " Expected length was %s, got %s" %
+ (index, len(lines[index]), len(line)))
self.assertEqual(index + 1, len(lines))
def test_main():
diff --git a/lib-python/2.7/test/test_urllib2net.py b/lib-python/2.7/test/test_urllib2net.py
index d50cc927b3..ff5278e619 100644
--- a/lib-python/2.7/test/test_urllib2net.py
+++ b/lib-python/2.7/test/test_urllib2net.py
@@ -25,6 +25,13 @@ def _wrap_with_retry_thrice(func, exc):
return _retry_thrice(func, exc, *args, **kwargs)
return wrapped
+# bpo-35411: FTP tests of test_urllib2net randomly fail
+# with "425 Security: Bad IP connecting" on Travis CI
+skip_ftp_test_on_travis = unittest.skipIf('TRAVIS' in os.environ,
+ 'bpo-35411: skip FTP test '
+ 'on Travis CI')
+
+
# Connecting to remote hosts is flaky. Make it more robust by retrying
# the connection several times.
_urlopen_with_retry = _wrap_with_retry_thrice(urllib2.urlopen, urllib2.URLError)
@@ -78,7 +85,7 @@ class CloseSocketTest(unittest.TestCase):
# underlying socket
# delve deep into response to fetch socket._socketobject
- response = _urlopen_with_retry("http://www.example.com/")
+ response = _urlopen_with_retry(test_support.TEST_HTTP_URL)
abused_fileobject = response.fp
self.assertIs(abused_fileobject.__class__, socket._fileobject)
httpresponse = abused_fileobject._sock
@@ -100,10 +107,11 @@ class OtherNetworkTests(unittest.TestCase):
# XXX The rest of these tests aren't very good -- they don't check much.
# They do sometimes catch some major disasters, though.
+ @skip_ftp_test_on_travis
def test_ftp(self):
urls = [
- 'ftp://ftp.debian.org/debian/README',
- ('ftp://ftp.debian.org/debian/non-existent-file',
+ 'ftp://www.pythontest.net/README',
+ ('ftp://www.pythontest.net/non-existent-file',
None, urllib2.URLError),
]
self._test_urls(urls, self._extra_handlers())
@@ -161,7 +169,7 @@ class OtherNetworkTests(unittest.TestCase):
"http://www.pythontest.net/index.html#frag")
def test_fileno(self):
- req = urllib2.Request("http://www.example.com")
+ req = urllib2.Request(test_support.TEST_HTTP_URL)
opener = urllib2.build_opener()
res = opener.open(req)
try:
@@ -172,7 +180,7 @@ class OtherNetworkTests(unittest.TestCase):
res.close()
def test_custom_headers(self):
- url = "http://www.example.com"
+ url = test_support.TEST_HTTP_URL
with test_support.transient_internet(url):
opener = urllib2.build_opener()
request = urllib2.Request(url)
@@ -184,6 +192,7 @@ class OtherNetworkTests(unittest.TestCase):
opener.open(request)
self.assertEqual(request.get_header('User-agent'),'Test-Agent')
+ @unittest.skip('XXX: http://www.imdb.com is gone')
def test_sites_no_connection_close(self):
# Some sites do not send Connection: close header.
# Verify that those work properly. (#issue12576)
@@ -249,7 +258,7 @@ class OtherNetworkTests(unittest.TestCase):
class TimeoutTest(unittest.TestCase):
def test_http_basic(self):
self.assertIsNone(socket.getdefaulttimeout())
- url = "http://www.example.com"
+ url = test_support.TEST_HTTP_URL
with test_support.transient_internet(url, timeout=None):
u = _urlopen_with_retry(url)
self.assertIsNone(u.fp._sock.fp._sock.gettimeout())
@@ -257,7 +266,7 @@ class TimeoutTest(unittest.TestCase):
def test_http_default_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
- url = "http://www.example.com"
+ url = test_support.TEST_HTTP_URL
with test_support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
@@ -269,7 +278,7 @@ class TimeoutTest(unittest.TestCase):
def test_http_no_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
- url = "http://www.example.com"
+ url = test_support.TEST_HTTP_URL
with test_support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
@@ -280,7 +289,7 @@ class TimeoutTest(unittest.TestCase):
u.close()
def test_http_timeout(self):
- url = "http://www.example.com"
+ url = test_support.TEST_HTTP_URL
with test_support.transient_internet(url):
u = _urlopen_with_retry(url, timeout=120)
self.assertEqual(u.fp._sock.fp._sock.gettimeout(), 120)
@@ -288,6 +297,7 @@ class TimeoutTest(unittest.TestCase):
FTP_HOST = 'ftp://www.pythontest.net/'
+ @skip_ftp_test_on_travis
def test_ftp_basic(self):
self.assertIsNone(socket.getdefaulttimeout())
with test_support.transient_internet(self.FTP_HOST, timeout=None):
@@ -295,6 +305,7 @@ class TimeoutTest(unittest.TestCase):
self.assertIsNone(u.fp.fp._sock.gettimeout())
u.close()
+ @skip_ftp_test_on_travis
def test_ftp_default_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
with test_support.transient_internet(self.FTP_HOST):
@@ -306,6 +317,7 @@ class TimeoutTest(unittest.TestCase):
self.assertEqual(u.fp.fp._sock.gettimeout(), 60)
u.close()
+ @skip_ftp_test_on_travis
def test_ftp_no_timeout(self):
self.assertIsNone(socket.getdefaulttimeout(),)
with test_support.transient_internet(self.FTP_HOST):
@@ -317,6 +329,7 @@ class TimeoutTest(unittest.TestCase):
self.assertIsNone(u.fp.fp._sock.gettimeout())
u.close()
+ @skip_ftp_test_on_travis
def test_ftp_timeout(self):
with test_support.transient_internet(self.FTP_HOST):
try:
diff --git a/lib-python/2.7/test/test_urllibnet.py b/lib-python/2.7/test/test_urllibnet.py
index a4b4d92503..ef33e3a0ea 100644
--- a/lib-python/2.7/test/test_urllibnet.py
+++ b/lib-python/2.7/test/test_urllibnet.py
@@ -1,5 +1,6 @@
import unittest
from test import test_support
+from test.test_urllib2net import skip_ftp_test_on_travis
import socket
import urllib
@@ -43,7 +44,7 @@ class URLTimeoutTest(unittest.TestCase):
socket.setdefaulttimeout(None)
def testURLread(self):
- f = _open_with_retry(urllib.urlopen, "http://www.example.com/")
+ f = _open_with_retry(urllib.urlopen, test_support.TEST_HTTP_URL)
x = f.read()
class urlopenNetworkTests(unittest.TestCase):
@@ -66,7 +67,7 @@ class urlopenNetworkTests(unittest.TestCase):
def test_basic(self):
# Simple test expected to pass.
- open_url = self.urlopen("http://www.example.com/")
+ open_url = self.urlopen(test_support.TEST_HTTP_URL)
for attr in ("read", "readline", "readlines", "fileno", "close",
"info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
@@ -78,7 +79,7 @@ class urlopenNetworkTests(unittest.TestCase):
def test_readlines(self):
# Test both readline and readlines.
- open_url = self.urlopen("http://www.example.com/")
+ open_url = self.urlopen(test_support.TEST_HTTP_URL)
try:
self.assertIsInstance(open_url.readline(), basestring,
"readline did not return a string")
@@ -89,7 +90,7 @@ class urlopenNetworkTests(unittest.TestCase):
def test_info(self):
# Test 'info'.
- open_url = self.urlopen("http://www.example.com/")
+ open_url = self.urlopen(test_support.TEST_HTTP_URL)
try:
info_obj = open_url.info()
finally:
@@ -101,13 +102,12 @@ class urlopenNetworkTests(unittest.TestCase):
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
- URL = "http://www.example.com/"
- open_url = self.urlopen(URL)
+ open_url = self.urlopen(test_support.TEST_HTTP_URL)
try:
gotten_url = open_url.geturl()
finally:
open_url.close()
- self.assertEqual(gotten_url, URL)
+ self.assertEqual(gotten_url, test_support.TEST_HTTP_URL)
def test_getcode(self):
# test getcode() with the fancy opener to get 404 error codes
@@ -123,12 +123,13 @@ class urlopenNetworkTests(unittest.TestCase):
@unittest.skipUnless(hasattr(os, 'fdopen'), 'os.fdopen not available')
def test_fileno(self):
# Make sure fd returned by fileno is valid.
- open_url = self.urlopen("http://www.example.com/")
+ open_url = self.urlopen(test_support.TEST_HTTP_URL)
fd = open_url.fileno()
FILE = os.fdopen(fd)
try:
- self.assertTrue(FILE.read(), "reading from file created using fd "
- "returned by fileno failed")
+ self.assertTrue(FILE.read(),
+ "reading from file created using fd "
+ "returned by fileno failed")
finally:
FILE.close()
@@ -161,7 +162,7 @@ class urlretrieveNetworkTests(unittest.TestCase):
def test_basic(self):
# Test basic functionality.
- file_location,info = self.urlretrieve("http://www.example.com/")
+ file_location,info = self.urlretrieve(test_support.TEST_HTTP_URL)
self.assertTrue(os.path.exists(file_location), "file location returned by"
" urlretrieve is not a valid path")
FILE = file(file_location)
@@ -174,7 +175,7 @@ class urlretrieveNetworkTests(unittest.TestCase):
def test_specified_path(self):
# Make sure that specifying the location of the file to write to works.
- file_location,info = self.urlretrieve("http://www.example.com/",
+ file_location,info = self.urlretrieve(test_support.TEST_HTTP_URL,
test_support.TESTFN)
self.assertEqual(file_location, test_support.TESTFN)
self.assertTrue(os.path.exists(file_location))
@@ -187,13 +188,13 @@ class urlretrieveNetworkTests(unittest.TestCase):
def test_header(self):
# Make sure header returned as 2nd value from urlretrieve is good.
- file_location, header = self.urlretrieve("http://www.example.com/")
+ file_location, header = self.urlretrieve(test_support.TEST_HTTP_URL)
os.unlink(file_location)
self.assertIsInstance(header, mimetools.Message,
"header is not an instance of mimetools.Message")
def test_data_header(self):
- logo = "http://www.example.com/"
+ logo = test_support.TEST_HTTP_URL
file_location, fileheaders = self.urlretrieve(logo)
os.unlink(file_location)
datevalue = fileheaders.getheader('Date')
@@ -213,6 +214,41 @@ class urlopen_HttpsTests(unittest.TestCase):
self.assertIn("Python", response.read())
+class urlopen_FTPTest(unittest.TestCase):
+ FTP_TEST_FILE = 'ftp://www.pythontest.net/README'
+ NUM_FTP_RETRIEVES = 3
+
+ @skip_ftp_test_on_travis
+ def test_multiple_ftp_retrieves(self):
+
+ with test_support.transient_internet(self.FTP_TEST_FILE):
+ try:
+ for file_num in range(self.NUM_FTP_RETRIEVES):
+ with test_support.temp_dir() as td:
+ urllib.FancyURLopener().retrieve(self.FTP_TEST_FILE,
+ os.path.join(td, str(file_num)))
+ except IOError as e:
+ self.fail("Failed FTP retrieve while accessing ftp url "
+ "multiple times.\n Error message was : %s" % e)
+
+ @skip_ftp_test_on_travis
+ def test_multiple_ftp_urlopen_same_host(self):
+ with test_support.transient_internet(self.FTP_TEST_FILE):
+ ftp_fds_to_close = []
+ try:
+ for _ in range(self.NUM_FTP_RETRIEVES):
+ fd = urllib.urlopen(self.FTP_TEST_FILE)
+ # test ftp open without closing fd as a supported scenario.
+ ftp_fds_to_close.append(fd)
+ except IOError as e:
+ self.fail("Failed FTP binary file open. "
+ "Error message was: %s" % e)
+ finally:
+ # close the open fds
+ for fd in ftp_fds_to_close:
+ fd.close()
+
+
def test_main():
test_support.requires('network')
with test_support.check_py3k_warnings(
@@ -220,7 +256,8 @@ def test_main():
test_support.run_unittest(URLTimeoutTest,
urlopenNetworkTests,
urlretrieveNetworkTests,
- urlopen_HttpsTests)
+ urlopen_HttpsTests,
+ urlopen_FTPTest)
if __name__ == "__main__":
test_main()
diff --git a/lib-python/2.7/test/test_urlparse.py b/lib-python/2.7/test/test_urlparse.py
index 4e1ded73c2..86c4a0595c 100644
--- a/lib-python/2.7/test/test_urlparse.py
+++ b/lib-python/2.7/test/test_urlparse.py
@@ -1,4 +1,6 @@
from test import test_support
+import sys
+import unicodedata
import unittest
import urlparse
@@ -624,6 +626,45 @@ class UrlParseTestCase(unittest.TestCase):
self.assertEqual(urlparse.urlparse("http://www.python.org:80"),
('http','www.python.org:80','','','',''))
+ def test_urlsplit_normalization(self):
+ # Certain characters should never occur in the netloc,
+ # including under normalization.
+ # Ensure that ALL of them are detected and cause an error
+ illegal_chars = u'/:#?@'
+ hex_chars = {'{:04X}'.format(ord(c)) for c in illegal_chars}
+ denorm_chars = [
+ c for c in map(unichr, range(128, sys.maxunicode))
+ if (hex_chars & set(unicodedata.decomposition(c).split()))
+ and c not in illegal_chars
+ ]
+ # Sanity check that we found at least one such character
+ self.assertIn(u'\u2100', denorm_chars)
+ self.assertIn(u'\uFF03', denorm_chars)
+
+ # bpo-36742: Verify port separators are ignored when they
+ # existed prior to decomposition
+ urlparse.urlsplit(u'http://\u30d5\u309a:80')
+ with self.assertRaises(ValueError):
+ urlparse.urlsplit(u'http://\u30d5\u309a\ufe1380')
+
+ for scheme in [u"http", u"https", u"ftp"]:
+ for netloc in [u"netloc{}false.netloc", u"n{}user@netloc"]:
+ for c in denorm_chars:
+ url = u"{}://{}/path".format(scheme, netloc.format(c))
+ if test_support.verbose:
+ print "Checking %r" % url
+ with self.assertRaises(ValueError):
+ urlparse.urlsplit(url)
+
+ # check error message: invalid netloc must be formated with repr()
+ # to get an ASCII error message
+ with self.assertRaises(ValueError) as cm:
+ urlparse.urlsplit(u'http://example.com\uFF03@bing.com')
+ self.assertEqual(str(cm.exception),
+ "netloc u'example.com\\uff03@bing.com' contains invalid characters "
+ "under NFKC normalization")
+ self.assertIsInstance(cm.exception.args[0], str)
+
def test_main():
test_support.run_unittest(UrlParseTestCase)
diff --git a/lib-python/2.7/test/test_uu.py b/lib-python/2.7/test/test_uu.py
index 51a4fbeaf9..f016bb2c67 100644
--- a/lib-python/2.7/test/test_uu.py
+++ b/lib-python/2.7/test/test_uu.py
@@ -4,10 +4,12 @@ Nick Mathewson
"""
import unittest
-from test import test_support
+from test import test_support as support
-import sys, os, uu, cStringIO
+import cStringIO
+import sys
import uu
+import io
plaintext = "The smooth-scaled python crept over the sleeping dog\n"
@@ -81,6 +83,15 @@ class UUTest(unittest.TestCase):
decoded = codecs.decode(encodedtext, "uu_codec")
self.assertEqual(decoded, plaintext)
+ def test_newlines_escaped(self):
+ # Test newlines are escaped with uu.encode
+ inp = io.BytesIO(plaintext)
+ out = io.BytesIO()
+ filename = "test.txt\n\roverflow.txt"
+ safefilename = b"test.txt\\n\\roverflow.txt"
+ uu.encode(inp, out, filename)
+ self.assertIn(safefilename, out.getvalue())
+
class UUStdIOTest(unittest.TestCase):
def setUp(self):
@@ -108,114 +119,64 @@ class UUStdIOTest(unittest.TestCase):
class UUFileTest(unittest.TestCase):
- def _kill(self, f):
- # close and remove file
- try:
- f.close()
- except (SystemExit, KeyboardInterrupt):
- raise
- except:
- pass
- try:
- os.unlink(f.name)
- except (SystemExit, KeyboardInterrupt):
- raise
- except:
- pass
-
def setUp(self):
- self.tmpin = test_support.TESTFN + "i"
- self.tmpout = test_support.TESTFN + "o"
-
- def tearDown(self):
- del self.tmpin
- del self.tmpout
+ self.tmpin = support.TESTFN + "i"
+ self.tmpout = support.TESTFN + "o"
+ self.addCleanup(support.unlink, self.tmpin)
+ self.addCleanup(support.unlink, self.tmpout)
def test_encode(self):
- fin = fout = None
- try:
- test_support.unlink(self.tmpin)
- fin = open(self.tmpin, 'wb')
+ with open(self.tmpin, 'wb') as fin:
fin.write(plaintext)
- fin.close()
- fin = open(self.tmpin, 'rb')
- fout = open(self.tmpout, 'w')
- uu.encode(fin, fout, self.tmpin, mode=0644)
- fin.close()
- fout.close()
+ with open(self.tmpin, 'rb') as fin:
+ with open(self.tmpout, 'w') as fout:
+ uu.encode(fin, fout, self.tmpin, mode=0o644)
- fout = open(self.tmpout, 'r')
+ with open(self.tmpout, 'r') as fout:
s = fout.read()
- fout.close()
- self.assertEqual(s, encodedtextwrapped % (0644, self.tmpin))
+ self.assertEqual(s, encodedtextwrapped % (0o644, self.tmpin))
- # in_file and out_file as filenames
- uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0644)
- fout = open(self.tmpout, 'r')
+ # in_file and out_file as filenames
+ uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0o644)
+ with open(self.tmpout, 'r') as fout:
s = fout.read()
- fout.close()
- self.assertEqual(s, encodedtextwrapped % (0644, self.tmpin))
-
- finally:
- self._kill(fin)
- self._kill(fout)
+ self.assertEqual(s, encodedtextwrapped % (0o644, self.tmpin))
def test_decode(self):
- f = None
- try:
- test_support.unlink(self.tmpin)
- f = open(self.tmpin, 'w')
- f.write(encodedtextwrapped % (0644, self.tmpout))
- f.close()
+ with open(self.tmpin, 'w') as f:
+ f.write(encodedtextwrapped % (0o644, self.tmpout))
- f = open(self.tmpin, 'r')
+ with open(self.tmpin, 'r') as f:
uu.decode(f)
- f.close()
- f = open(self.tmpout, 'r')
+ with open(self.tmpout, 'r') as f:
s = f.read()
- f.close()
- self.assertEqual(s, plaintext)
- # XXX is there an xp way to verify the mode?
- finally:
- self._kill(f)
+ self.assertEqual(s, plaintext)
+ # XXX is there an xp way to verify the mode?
def test_decode_filename(self):
- f = None
- try:
- test_support.unlink(self.tmpin)
- f = open(self.tmpin, 'w')
- f.write(encodedtextwrapped % (0644, self.tmpout))
- f.close()
+ with open(self.tmpin, 'w') as f:
+ f.write(encodedtextwrapped % (0o644, self.tmpout))
- uu.decode(self.tmpin)
+ uu.decode(self.tmpin)
- f = open(self.tmpout, 'r')
+ with open(self.tmpout, 'r') as f:
s = f.read()
- f.close()
- self.assertEqual(s, plaintext)
- finally:
- self._kill(f)
+ self.assertEqual(s, plaintext)
def test_decodetwice(self):
# Verify that decode() will refuse to overwrite an existing file
- f = None
- try:
- f = cStringIO.StringIO(encodedtextwrapped % (0644, self.tmpout))
-
- f = open(self.tmpin, 'r')
+ with open(self.tmpin, 'wb') as f:
+ f.write(encodedtextwrapped % (0o644, self.tmpout))
+ with open(self.tmpin, 'r') as f:
uu.decode(f)
- f.close()
- f = open(self.tmpin, 'r')
+ with open(self.tmpin, 'r') as f:
self.assertRaises(uu.Error, uu.decode, f)
- f.close()
- finally:
- self._kill(f)
def test_main():
- test_support.run_unittest(UUTest, UUStdIOTest, UUFileTest)
+ support.run_unittest(UUTest, UUStdIOTest, UUFileTest)
if __name__=="__main__":
test_main()
diff --git a/lib-python/2.7/test/test_uuid.py b/lib-python/2.7/test/test_uuid.py
index 0a8130ee0c..6cd2c39293 100644
--- a/lib-python/2.7/test/test_uuid.py
+++ b/lib-python/2.7/test/test_uuid.py
@@ -287,6 +287,39 @@ class TestUUID(unittest.TestCase):
node2 = uuid.getnode()
self.assertEqual(node1, node2, '%012x != %012x' % (node1, node2))
+ # bpo-32502: UUID1 requires a 48-bit identifier, but hardware identifiers
+ # need not necessarily be 48 bits (e.g., EUI-64).
+ def test_uuid1_eui64(self):
+ # Confirm that uuid.getnode ignores hardware addresses larger than 48
+ # bits. Mock out each platform's *_getnode helper functions to return
+ # something just larger than 48 bits to test. This will cause
+ # uuid.getnode to fall back on uuid._random_getnode, which will
+ # generate a valid value.
+ too_large_getter = lambda: 1 << 48
+
+ uuid_real__node = uuid._node
+ uuid_real__NODE_GETTERS_WIN32 = uuid._NODE_GETTERS_WIN32
+ uuid_real__NODE_GETTERS_UNIX = uuid._NODE_GETTERS_UNIX
+ uuid._node = None
+ uuid._NODE_GETTERS_WIN32 = [too_large_getter]
+ uuid._NODE_GETTERS_UNIX = [too_large_getter]
+ try:
+ node = uuid.getnode()
+ finally:
+ uuid._node = uuid_real__node
+ uuid._NODE_GETTERS_WIN32 = uuid_real__NODE_GETTERS_WIN32
+ uuid._NODE_GETTERS_UNIX = uuid_real__NODE_GETTERS_UNIX
+
+ self.assertTrue(0 < node < (1 << 48), '%012x' % node)
+
+ # Confirm that uuid1 can use the generated node, i.e., the that
+ # uuid.getnode fell back on uuid._random_getnode() rather than using
+ # the value from too_large_getter above.
+ try:
+ uuid.uuid1(node=node)
+ except ValueError as e:
+ self.fail('uuid1 was given an invalid node ID')
+
@unittest.skipUnless(importable('ctypes'), 'requires ctypes')
def test_uuid1(self):
equal = self.assertEqual
@@ -429,54 +462,52 @@ eth0 Link encap:Ethernet HWaddr 12:34:56:78:90:ab
)
self.assertEqual(mac, 0x1234567890ab)
- def check_node(self, node, requires=None, network=False):
+ def check_node(self, node, requires=None):
if requires and node is None:
self.skipTest('requires ' + requires)
hex = '%012x' % node
if test_support.verbose >= 2:
print hex + ' ',
- if network:
- # 47 bit will never be set in IEEE 802 addresses obtained
- # from network cards.
- self.assertFalse(node & 0x010000000000, hex)
self.assertTrue(0 < node < (1L << 48),
"%s is not an RFC 4122 node ID" % hex)
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
def test_ifconfig_getnode(self):
node = uuid._ifconfig_getnode()
- self.check_node(node, 'ifconfig', True)
+ self.check_node(node, 'ifconfig')
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
def test_arp_getnode(self):
node = uuid._arp_getnode()
- self.check_node(node, 'arp', True)
+ self.check_node(node, 'arp')
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
def test_lanscan_getnode(self):
node = uuid._lanscan_getnode()
- self.check_node(node, 'lanscan', True)
+ self.check_node(node, 'lanscan')
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
def test_netstat_getnode(self):
node = uuid._netstat_getnode()
- self.check_node(node, 'netstat', True)
+ self.check_node(node, 'netstat')
@unittest.skipUnless(os.name == 'nt', 'requires Windows')
def test_ipconfig_getnode(self):
node = uuid._ipconfig_getnode()
- self.check_node(node, 'ipconfig', True)
+ self.check_node(node, 'ipconfig')
@unittest.skipUnless(importable('win32wnet'), 'requires win32wnet')
@unittest.skipUnless(importable('netbios'), 'requires netbios')
def test_netbios_getnode(self):
node = uuid._netbios_getnode()
- self.check_node(node, network=True)
+ self.check_node(node)
def test_random_getnode(self):
node = uuid._random_getnode()
- # Least significant bit of first octet must be set.
- self.assertTrue(node & 0x010000000000, '%012x' % node)
+ # The multicast bit, i.e. the least significant bit of first octet,
+ # must be set for randomly generated MAC addresses. See RFC 4122,
+ # $4.1.6.
+ self.assertTrue(node & (1 << 40), '%012x' % node)
self.check_node(node)
@unittest.skipUnless(os.name == 'posix', 'requires Posix')
diff --git a/lib-python/2.7/test/test_warnings.py b/lib-python/2.7/test/test_warnings.py
index 4644cbaa81..d0805f0a00 100644
--- a/lib-python/2.7/test/test_warnings.py
+++ b/lib-python/2.7/test/test_warnings.py
@@ -107,10 +107,14 @@ class FilterTests(object):
self.module.resetwarnings()
self.module.filterwarnings("always", category=UserWarning)
message = "FilterTests.test_always"
- self.module.warn(message, UserWarning)
- self.assertTrue(message, w[-1].message)
- self.module.warn(message, UserWarning)
- self.assertTrue(w[-1].message, message)
+ def f():
+ self.module.warn(message, UserWarning)
+ f()
+ self.assertEqual(len(w), 1)
+ self.assertEqual(w[-1].message.args[0], message)
+ f()
+ self.assertEqual(len(w), 2)
+ self.assertEqual(w[-1].message.args[0], message)
def test_default(self):
with original_warnings.catch_warnings(record=True,
@@ -586,6 +590,38 @@ class _WarningsTests(BaseTest):
self.assertNotIn(b'Warning!', stderr)
self.assertNotIn(b'Error', stderr)
+ def test_issue31285(self):
+ # warn_explicit() shouldn't raise a SystemError in case the return
+ # value of get_source() has a bad splitlines() method.
+ class BadLoader:
+ def get_source(self, fullname):
+ class BadSource(str):
+ def splitlines(self):
+ return 42
+ return BadSource('spam')
+
+ wmod = self.module
+ with original_warnings.catch_warnings(module=wmod):
+ wmod.filterwarnings('default', category=UserWarning)
+
+ with test_support.captured_stderr() as stderr:
+ wmod.warn_explicit(
+ 'foo', UserWarning, 'bar', 1,
+ module_globals={'__loader__': BadLoader(),
+ '__name__': 'foobar'})
+ self.assertIn('UserWarning: foo', stderr.getvalue())
+
+ @test_support.cpython_only
+ def test_issue31411(self):
+ # warn_explicit() shouldn't raise a SystemError in case
+ # warnings.onceregistry isn't a dictionary.
+ wmod = self.module
+ with original_warnings.catch_warnings(module=wmod):
+ wmod.filterwarnings('once')
+ with test_support.swap_attr(wmod, 'onceregistry', None):
+ with self.assertRaises(TypeError):
+ wmod.warn_explicit('foo', Warning, 'bar', 1, registry=None)
+
class WarningsDisplayTests(unittest.TestCase):
diff --git a/lib-python/2.7/test/test_weakref.py b/lib-python/2.7/test/test_weakref.py
index fd498d201d..beea573e55 100644
--- a/lib-python/2.7/test/test_weakref.py
+++ b/lib-python/2.7/test/test_weakref.py
@@ -6,6 +6,7 @@ import weakref
import operator
import contextlib
import copy
+import time
from test import test_support
from test.test_support import gc_collect
@@ -57,6 +58,32 @@ class RefCycle:
self.cycle = self
+@contextlib.contextmanager
+def collect_in_thread(period=0.001):
+ """
+ Ensure GC collections happen in a different thread, at a high frequency.
+ """
+ threading = test_support.import_module('threading')
+ please_stop = False
+
+ def collect():
+ while not please_stop:
+ time.sleep(period)
+ gc.collect()
+
+ with test_support.disable_gc():
+ old_interval = sys.getcheckinterval()
+ sys.setcheckinterval(20)
+ t = threading.Thread(target=collect)
+ t.start()
+ try:
+ yield
+ finally:
+ please_stop = True
+ t.join()
+ sys.setcheckinterval(old_interval)
+
+
class TestBase(unittest.TestCase):
def setUp(self):
@@ -575,6 +602,7 @@ class ReferencesTestCase(TestBase):
del c1, c2, C, D
gc_collect()
+ @test_support.requires_type_collecting
def test_callback_in_cycle_resurrection(self):
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
@@ -1403,6 +1431,35 @@ class MappingTestCase(TestBase):
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
+ def test_threaded_weak_valued_setdefault(self):
+ d = weakref.WeakValueDictionary()
+ with collect_in_thread():
+ for i in range(50000):
+ x = d.setdefault(10, RefCycle())
+ self.assertIsNot(x, None) # we never put None in there!
+ del x
+
+ def test_threaded_weak_valued_pop(self):
+ d = weakref.WeakValueDictionary()
+ with collect_in_thread():
+ for i in range(50000):
+ d[10] = RefCycle()
+ x = d.pop(10, 10)
+ self.assertIsNot(x, None) # we never put None in there!
+
+ def test_threaded_weak_valued_consistency(self):
+ # Issue #28427: old keys should not remove new values from
+ # WeakValueDictionary when collecting from another thread.
+ d = weakref.WeakValueDictionary()
+ with collect_in_thread():
+ for i in range(200000):
+ o = RefCycle()
+ d[10] = o
+ # o is still alive, so the dict can't be empty
+ self.assertEqual(len(d), 1)
+ o = None # lose ref
+
+
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
diff --git a/lib-python/2.7/test/test_wsgiref.py b/lib-python/2.7/test/test_wsgiref.py
index 2469f677ca..20129e7edc 100644
--- a/lib-python/2.7/test/test_wsgiref.py
+++ b/lib-python/2.7/test/test_wsgiref.py
@@ -13,7 +13,7 @@ import os
import re
import sys
-from test import test_support
+from test import support
class MockServer(WSGIServer):
"""Non-socket HTTP server"""
@@ -377,32 +377,62 @@ class TestHandler(ErrorHandler):
class HandlerTests(TestCase):
-
- def checkEnvironAttrs(self, handler):
- env = handler.environ
- for attr in [
- 'version','multithread','multiprocess','run_once','file_wrapper'
- ]:
- if attr=='file_wrapper' and handler.wsgi_file_wrapper is None:
- continue
- self.assertEqual(getattr(handler,'wsgi_'+attr),env['wsgi.'+attr])
-
- def checkOSEnviron(self,handler):
- empty = {}; setup_testing_defaults(empty)
- env = handler.environ
- from os import environ
- for k,v in environ.items():
- if k not in empty:
- self.assertEqual(env[k],v)
- for k,v in empty.items():
- self.assertIn(k, env)
+ # testEnviron() can produce long error message
+ maxDiff = 80 * 50
def testEnviron(self):
- h = TestHandler(X="Y")
- h.setup_environ()
- self.checkEnvironAttrs(h)
- self.checkOSEnviron(h)
- self.assertEqual(h.environ["X"],"Y")
+ os_environ = {
+ # very basic environment
+ 'HOME': '/my/home',
+ 'PATH': '/my/path',
+ 'LANG': 'fr_FR.UTF-8',
+
+ # set some WSGI variables
+ 'SCRIPT_NAME': 'test_script_name',
+ 'SERVER_NAME': 'test_server_name',
+ }
+
+ with support.swap_attr(TestHandler, 'os_environ', os_environ):
+ # override X and HOME variables
+ handler = TestHandler(X="Y", HOME="/override/home")
+ handler.setup_environ()
+
+ # Check that wsgi_xxx attributes are copied to wsgi.xxx variables
+ # of handler.environ
+ for attr in ('version', 'multithread', 'multiprocess', 'run_once',
+ 'file_wrapper'):
+ self.assertEqual(getattr(handler, 'wsgi_' + attr),
+ handler.environ['wsgi.' + attr])
+
+ # Test handler.environ as a dict
+ expected = {}
+ setup_testing_defaults(expected)
+ # Handler inherits os_environ variables which are not overriden
+ # by SimpleHandler.add_cgi_vars() (SimpleHandler.base_env)
+ for key, value in os_environ.items():
+ if key not in expected:
+ expected[key] = value
+ expected.update({
+ # X doesn't exist in os_environ
+ "X": "Y",
+ # HOME is overriden by TestHandler
+ 'HOME': "/override/home",
+
+ # overriden by setup_testing_defaults()
+ "SCRIPT_NAME": "",
+ "SERVER_NAME": "127.0.0.1",
+
+ # set by BaseHandler.setup_environ()
+ 'wsgi.input': handler.get_stdin(),
+ 'wsgi.errors': handler.get_stderr(),
+ 'wsgi.version': (1, 0),
+ 'wsgi.run_once': False,
+ 'wsgi.url_scheme': 'http',
+ 'wsgi.multithread': True,
+ 'wsgi.multiprocess': True,
+ 'wsgi.file_wrapper': util.FileWrapper,
+ })
+ self.assertDictEqual(handler.environ, expected)
def testCGIEnviron(self):
h = BaseCGIHandler(None,None,None,{})
@@ -565,7 +595,7 @@ class HandlerTests(TestCase):
def test_main():
- test_support.run_unittest(__name__)
+ support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
diff --git a/lib-python/2.7/test/test_xml_etree.py b/lib-python/2.7/test/test_xml_etree.py
index 8ba0e2c600..f6d5d17fc3 100644
--- a/lib-python/2.7/test/test_xml_etree.py
+++ b/lib-python/2.7/test/test_xml_etree.py
@@ -1,26 +1,36 @@
-# xml.etree test. This file contains enough tests to make sure that
-# all included components work as they should.
-# Large parts are extracted from the upstream test suite.
-
-# IMPORTANT: the same doctests are run from "test_xml_etree_c" in
-# order to ensure consistency between the C implementation and the
-# Python implementation.
+# -*- coding: utf-8 -*-
+# IMPORTANT: the same tests are run from "test_xml_etree_c" in order
+# to ensure consistency between the C implementation and the Python
+# implementation.
#
# For this purpose, the module-level "ET" symbol is temporarily
# monkey-patched when running the "test_xml_etree_c" test suite.
-# Don't re-import "xml.etree.ElementTree" module in the docstring,
-# except if the test is specific to the Python implementation.
-import sys
import cgi
+import copy
+import functools
+import io
+import pickle
+import StringIO
+import sys
+import types
+import unittest
+import warnings
+import weakref
-from test import test_support
-from test.test_support import findfile
+from test import test_support as support
+from test.test_support import TESTFN, findfile, gc_collect, swap_attr
-from xml.etree import ElementTree as ET
+# pyET is the pure-Python implementation.
+#
+# ET is pyET in test_xml_etree and is the C accelerated version in
+# test_xml_etree_c.
+from xml.etree import ElementTree as pyET
+ET = None
SIMPLE_XMLFILE = findfile("simple.xml", subdir="xmltestdata")
SIMPLE_NS_XMLFILE = findfile("simple-ns.xml", subdir="xmltestdata")
+UTF8_BUG_XMLFILE = findfile("expat224_utf8_bug.xml", subdir="xmltestdata")
SAMPLE_XML = """\
<body>
@@ -52,22 +62,54 @@ SAMPLE_XML_NS = """
</body>
"""
+SAMPLE_XML_NS_ELEMS = """
+<root>
+<h:table xmlns:h="hello">
+ <h:tr>
+ <h:td>Apples</h:td>
+ <h:td>Bananas</h:td>
+ </h:tr>
+</h:table>
+
+<f:table xmlns:f="foo">
+ <f:name>African Coffee Table</f:name>
+ <f:width>80</f:width>
+ <f:length>120</f:length>
+</f:table>
+</root>
+"""
+
+ENTITY_XML = """\
+<!DOCTYPE points [
+<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
+%user-entities;
+]>
+<document>&entity;</document>
+"""
+
+
+def checkwarnings(*filters):
+ def decorator(test):
+ def newtest(*args, **kwargs):
+ with support.check_warnings(*filters):
+ test(*args, **kwargs)
+ functools.update_wrapper(newtest, test)
+ return newtest
+ return decorator
-def sanity():
- """
- Import sanity.
- >>> from xml.etree import ElementTree
- >>> from xml.etree import ElementInclude
- >>> from xml.etree import ElementPath
- """
+class ModuleTest(unittest.TestCase):
+ # TODO: this should be removed once we get rid of the global module vars
+
+ def test_sanity(self):
+ # Import sanity.
+
+ from xml.etree import ElementTree
+ from xml.etree import ElementInclude
+ from xml.etree import ElementPath
-def check_method(method):
- if not hasattr(method, '__call__'):
- print method, "not callable"
def serialize(elem, to_string=True, **options):
- import StringIO
file = StringIO.StringIO()
tree = ET.ElementTree(elem)
tree.write(file, **options)
@@ -77,13 +119,8 @@ def serialize(elem, to_string=True, **options):
file.seek(0)
return file
-def summarize(elem):
- if elem.tag == ET.Comment:
- return "<Comment>"
- return elem.tag
-
def summarize_list(seq):
- return [summarize(elem) for elem in seq]
+ return [elem.tag for elem in seq]
def normalize_crlf(tree):
for elem in tree.iter():
@@ -92,1168 +129,879 @@ def normalize_crlf(tree):
if elem.tail:
elem.tail = elem.tail.replace("\r\n", "\n")
-def check_string(string):
- len(string)
- for char in string:
- if len(char) != 1:
- print "expected one-character string, got %r" % char
- new_string = string + ""
- new_string = string + " "
- string[:0]
-
-def check_mapping(mapping):
- len(mapping)
- keys = mapping.keys()
- items = mapping.items()
- for key in keys:
- item = mapping[key]
- mapping["key"] = "value"
- if mapping["key"] != "value":
- print "expected value string, got %r" % mapping["key"]
-
-def check_element(element):
- if not ET.iselement(element):
- print "not an element"
- if not hasattr(element, "tag"):
- print "no tag member"
- if not hasattr(element, "attrib"):
- print "no attrib member"
- if not hasattr(element, "text"):
- print "no text member"
- if not hasattr(element, "tail"):
- print "no tail member"
-
- check_string(element.tag)
- check_mapping(element.attrib)
- if element.text is not None:
- check_string(element.text)
- if element.tail is not None:
- check_string(element.tail)
- for elem in element:
- check_element(elem)
+def python_only(test):
+ def wrapper(*args):
+ if ET is not pyET:
+ raise unittest.SkipTest('only for the Python version')
+ return test(*args)
+ return wrapper
+
+def cet_only(test):
+ def wrapper(*args):
+ if ET is pyET:
+ raise unittest.SkipTest('only for the C version')
+ return test(*args)
+ return wrapper
# --------------------------------------------------------------------
# element tree tests
-def interface():
- r"""
- Test element tree interface.
-
- >>> element = ET.Element("tag")
- >>> check_element(element)
- >>> tree = ET.ElementTree(element)
- >>> check_element(tree.getroot())
-
- >>> element = ET.Element("t\xe4g", key="value")
- >>> tree = ET.ElementTree(element)
- >>> repr(element) # doctest: +ELLIPSIS
- "<Element 't\\xe4g' at 0x...>"
- >>> element = ET.Element("tag", key="value")
-
- Make sure all standard element methods exist.
-
- >>> check_method(element.append)
- >>> check_method(element.extend)
- >>> check_method(element.insert)
- >>> check_method(element.remove)
- >>> check_method(element.getchildren)
- >>> check_method(element.find)
- >>> check_method(element.iterfind)
- >>> check_method(element.findall)
- >>> check_method(element.findtext)
- >>> check_method(element.clear)
- >>> check_method(element.get)
- >>> check_method(element.set)
- >>> check_method(element.keys)
- >>> check_method(element.items)
- >>> check_method(element.iter)
- >>> check_method(element.itertext)
- >>> check_method(element.getiterator)
-
- These methods return an iterable. See bug 6472.
-
- >>> check_method(element.iter("tag").next)
- >>> check_method(element.iterfind("tag").next)
- >>> check_method(element.iterfind("*").next)
- >>> check_method(tree.iter("tag").next)
- >>> check_method(tree.iterfind("tag").next)
- >>> check_method(tree.iterfind("*").next)
-
- These aliases are provided:
-
- >>> assert ET.XML == ET.fromstring
- >>> assert ET.PI == ET.ProcessingInstruction
- >>> assert ET.XMLParser == ET.XMLTreeBuilder
- """
-
-def simpleops():
- """
- Basic method sanity checks.
-
- >>> elem = ET.XML("<body><tag/></body>")
- >>> serialize(elem)
- '<body><tag /></body>'
- >>> e = ET.Element("tag2")
- >>> elem.append(e)
- >>> serialize(elem)
- '<body><tag /><tag2 /></body>'
- >>> elem.remove(e)
- >>> serialize(elem)
- '<body><tag /></body>'
- >>> elem.insert(0, e)
- >>> serialize(elem)
- '<body><tag2 /><tag /></body>'
- >>> elem.remove(e)
- >>> elem.extend([e])
- >>> serialize(elem)
- '<body><tag /><tag2 /></body>'
- >>> elem.remove(e)
-
- >>> element = ET.Element("tag", key="value")
- >>> serialize(element) # 1
- '<tag key="value" />'
- >>> subelement = ET.Element("subtag")
- >>> element.append(subelement)
- >>> serialize(element) # 2
- '<tag key="value"><subtag /></tag>'
- >>> element.insert(0, subelement)
- >>> serialize(element) # 3
- '<tag key="value"><subtag /><subtag /></tag>'
- >>> element.remove(subelement)
- >>> serialize(element) # 4
- '<tag key="value"><subtag /></tag>'
- >>> element.remove(subelement)
- >>> serialize(element) # 5
- '<tag key="value" />'
- >>> element.remove(subelement) # doctest: +ELLIPSIS
- Traceback (most recent call last):
- ValueError: list.remove(...
- >>> serialize(element) # 6
- '<tag key="value" />'
- >>> element[0:0] = [subelement, subelement, subelement]
- >>> serialize(element[1])
- '<subtag />'
- >>> element[1:9] == [element[1], element[2]]
- True
- >>> element[:9:2] == [element[0], element[2]]
- True
- >>> del element[1:2]
- >>> serialize(element)
- '<tag key="value"><subtag /><subtag /></tag>'
- """
-
-def cdata():
- """
- Test CDATA handling (etc).
-
- >>> serialize(ET.XML("<tag>hello</tag>"))
- '<tag>hello</tag>'
- >>> serialize(ET.XML("<tag>&#104;&#101;&#108;&#108;&#111;</tag>"))
- '<tag>hello</tag>'
- >>> serialize(ET.XML("<tag><![CDATA[hello]]></tag>"))
- '<tag>hello</tag>'
- """
-
-# Only with Python implementation
-def simplefind():
- """
- Test find methods using the elementpath fallback.
-
- >>> from xml.etree import ElementTree
-
- >>> CurrentElementPath = ElementTree.ElementPath
- >>> ElementTree.ElementPath = ElementTree._SimpleElementPath()
- >>> elem = ElementTree.XML(SAMPLE_XML)
- >>> elem.find("tag").tag
- 'tag'
- >>> ElementTree.ElementTree(elem).find("tag").tag
- 'tag'
- >>> elem.findtext("tag")
- 'text'
- >>> elem.findtext("tog")
- >>> elem.findtext("tog", "default")
- 'default'
- >>> ElementTree.ElementTree(elem).findtext("tag")
- 'text'
- >>> summarize_list(elem.findall("tag"))
- ['tag', 'tag']
- >>> summarize_list(elem.findall(".//tag"))
- ['tag', 'tag', 'tag']
-
- Path syntax doesn't work in this case.
-
- >>> elem.find("section/tag")
- >>> elem.findtext("section/tag")
- >>> summarize_list(elem.findall("section/tag"))
- []
-
- >>> ElementTree.ElementPath = CurrentElementPath
- """
-
-def find():
- """
- Test find methods (including xpath syntax).
-
- >>> elem = ET.XML(SAMPLE_XML)
- >>> elem.find("tag").tag
- 'tag'
- >>> ET.ElementTree(elem).find("tag").tag
- 'tag'
- >>> elem.find("section/tag").tag
- 'tag'
- >>> elem.find("./tag").tag
- 'tag'
- >>> ET.ElementTree(elem).find("./tag").tag
- 'tag'
- >>> ET.ElementTree(elem).find("/tag").tag
- 'tag'
- >>> elem[2] = ET.XML(SAMPLE_SECTION)
- >>> elem.find("section/nexttag").tag
- 'nexttag'
- >>> ET.ElementTree(elem).find("section/tag").tag
- 'tag'
- >>> ET.ElementTree(elem).find("tog")
- >>> ET.ElementTree(elem).find("tog/foo")
- >>> elem.findtext("tag")
- 'text'
- >>> elem.findtext("section/nexttag")
- ''
- >>> elem.findtext("section/nexttag", "default")
- ''
- >>> elem.findtext("tog")
- >>> elem.findtext("tog", "default")
- 'default'
- >>> ET.ElementTree(elem).findtext("tag")
- 'text'
- >>> ET.ElementTree(elem).findtext("tog/foo")
- >>> ET.ElementTree(elem).findtext("tog/foo", "default")
- 'default'
- >>> ET.ElementTree(elem).findtext("./tag")
- 'text'
- >>> ET.ElementTree(elem).findtext("/tag")
- 'text'
- >>> elem.findtext("section/tag")
- 'subtext'
- >>> ET.ElementTree(elem).findtext("section/tag")
- 'subtext'
- >>> summarize_list(elem.findall("."))
- ['body']
- >>> summarize_list(elem.findall("tag"))
- ['tag', 'tag']
- >>> summarize_list(elem.findall("tog"))
- []
- >>> summarize_list(elem.findall("tog/foo"))
- []
- >>> summarize_list(elem.findall("*"))
- ['tag', 'tag', 'section']
- >>> summarize_list(elem.findall(".//tag"))
- ['tag', 'tag', 'tag', 'tag']
- >>> summarize_list(elem.findall("section/tag"))
- ['tag']
- >>> summarize_list(elem.findall("section//tag"))
- ['tag', 'tag']
- >>> summarize_list(elem.findall("section/*"))
- ['tag', 'nexttag', 'nextsection']
- >>> summarize_list(elem.findall("section//*"))
- ['tag', 'nexttag', 'nextsection', 'tag']
- >>> summarize_list(elem.findall("section/.//*"))
- ['tag', 'nexttag', 'nextsection', 'tag']
- >>> summarize_list(elem.findall("*/*"))
- ['tag', 'nexttag', 'nextsection']
- >>> summarize_list(elem.findall("*//*"))
- ['tag', 'nexttag', 'nextsection', 'tag']
- >>> summarize_list(elem.findall("*/tag"))
- ['tag']
- >>> summarize_list(elem.findall("*/./tag"))
- ['tag']
- >>> summarize_list(elem.findall("./tag"))
- ['tag', 'tag']
- >>> summarize_list(elem.findall(".//tag"))
- ['tag', 'tag', 'tag', 'tag']
- >>> summarize_list(elem.findall("././tag"))
- ['tag', 'tag']
- >>> summarize_list(elem.findall(".//tag[@class]"))
- ['tag', 'tag', 'tag']
- >>> summarize_list(elem.findall(".//tag[@class='a']"))
- ['tag']
- >>> summarize_list(elem.findall(".//tag[@class='b']"))
- ['tag', 'tag']
- >>> summarize_list(elem.findall(".//tag[@id]"))
- ['tag']
- >>> summarize_list(elem.findall(".//section[tag]"))
- ['section']
- >>> summarize_list(elem.findall(".//section[element]"))
- []
- >>> summarize_list(elem.findall("../tag"))
- []
- >>> summarize_list(elem.findall("section/../tag"))
- ['tag', 'tag']
- >>> summarize_list(ET.ElementTree(elem).findall("./tag"))
- ['tag', 'tag']
-
- Following example is invalid in 1.2.
- A leading '*' is assumed in 1.3.
-
- >>> elem.findall("section//") == elem.findall("section//*")
- True
-
- ET's Path module handles this case incorrectly; this gives
- a warning in 1.3, and the behaviour will be modified in 1.4.
-
- >>> summarize_list(ET.ElementTree(elem).findall("/tag"))
- ['tag', 'tag']
-
- >>> elem = ET.XML(SAMPLE_XML_NS)
- >>> summarize_list(elem.findall("tag"))
- []
- >>> summarize_list(elem.findall("{http://effbot.org/ns}tag"))
- ['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
- >>> summarize_list(elem.findall(".//{http://effbot.org/ns}tag"))
- ['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
- """
-
-def file_init():
- """
- >>> import StringIO
-
- >>> stringfile = StringIO.StringIO(SAMPLE_XML)
- >>> tree = ET.ElementTree(file=stringfile)
- >>> tree.find("tag").tag
- 'tag'
- >>> tree.find("section/tag").tag
- 'tag'
-
- >>> tree = ET.ElementTree(file=SIMPLE_XMLFILE)
- >>> tree.find("element").tag
- 'element'
- >>> tree.find("element/../empty-element").tag
- 'empty-element'
- """
-
-def bad_find():
- """
- Check bad or unsupported path expressions.
-
- >>> elem = ET.XML(SAMPLE_XML)
- >>> elem.findall("/tag")
- Traceback (most recent call last):
- SyntaxError: cannot use absolute path on element
- """
-
-def path_cache():
- """
- Check that the path cache behaves sanely.
-
- >>> elem = ET.XML(SAMPLE_XML)
- >>> for i in range(10): ET.ElementTree(elem).find('./'+str(i))
- >>> cache_len_10 = len(ET.ElementPath._cache)
- >>> for i in range(10): ET.ElementTree(elem).find('./'+str(i))
- >>> len(ET.ElementPath._cache) == cache_len_10
- True
- >>> for i in range(20): ET.ElementTree(elem).find('./'+str(i))
- >>> len(ET.ElementPath._cache) > cache_len_10
- True
- >>> for i in range(600): ET.ElementTree(elem).find('./'+str(i))
- >>> len(ET.ElementPath._cache) < 500
- True
- """
-
-def copy():
- """
- Test copy handling (etc).
-
- >>> import copy
- >>> e1 = ET.XML("<tag>hello<foo/></tag>")
- >>> e2 = copy.copy(e1)
- >>> e3 = copy.deepcopy(e1)
- >>> e1.find("foo").tag = "bar"
- >>> serialize(e1)
- '<tag>hello<bar /></tag>'
- >>> serialize(e2)
- '<tag>hello<bar /></tag>'
- >>> serialize(e3)
- '<tag>hello<foo /></tag>'
-
- """
-
-def attrib():
- """
- Test attribute handling.
-
- >>> elem = ET.Element("tag")
- >>> elem.get("key") # 1.1
- >>> elem.get("key", "default") # 1.2
- 'default'
- >>> elem.set("key", "value")
- >>> elem.get("key") # 1.3
- 'value'
-
- >>> elem = ET.Element("tag", key="value")
- >>> elem.get("key") # 2.1
- 'value'
- >>> elem.attrib # 2.2
- {'key': 'value'}
-
- >>> attrib = {"key": "value"}
- >>> elem = ET.Element("tag", attrib)
- >>> attrib.clear() # check for aliasing issues
- >>> elem.get("key") # 3.1
- 'value'
- >>> elem.attrib # 3.2
- {'key': 'value'}
-
- >>> attrib = {"key": "value"}
- >>> elem = ET.Element("tag", **attrib)
- >>> attrib.clear() # check for aliasing issues
- >>> elem.get("key") # 4.1
- 'value'
- >>> elem.attrib # 4.2
- {'key': 'value'}
-
- >>> elem = ET.Element("tag", {"key": "other"}, key="value")
- >>> elem.get("key") # 5.1
- 'value'
- >>> elem.attrib # 5.2
- {'key': 'value'}
-
- >>> elem = ET.Element('test')
- >>> elem.text = "aa"
- >>> elem.set('testa', 'testval')
- >>> elem.set('testb', 'test2')
- >>> ET.tostring(elem)
- '<test testa="testval" testb="test2">aa</test>'
- >>> sorted(elem.keys())
- ['testa', 'testb']
- >>> sorted(elem.items())
- [('testa', 'testval'), ('testb', 'test2')]
- >>> elem.attrib['testb']
- 'test2'
- >>> elem.attrib['testb'] = 'test1'
- >>> elem.attrib['testc'] = 'test2'
- >>> ET.tostring(elem)
- '<test testa="testval" testb="test1" testc="test2">aa</test>'
- """
-
-def makeelement():
- """
- Test makeelement handling.
-
- >>> elem = ET.Element("tag")
- >>> attrib = {"key": "value"}
- >>> subelem = elem.makeelement("subtag", attrib)
- >>> if subelem.attrib is attrib:
- ... print "attrib aliasing"
- >>> elem.append(subelem)
- >>> serialize(elem)
- '<tag><subtag key="value" /></tag>'
-
- >>> elem.clear()
- >>> serialize(elem)
- '<tag />'
- >>> elem.append(subelem)
- >>> serialize(elem)
- '<tag><subtag key="value" /></tag>'
- >>> elem.extend([subelem, subelem])
- >>> serialize(elem)
- '<tag><subtag key="value" /><subtag key="value" /><subtag key="value" /></tag>'
- >>> elem[:] = [subelem]
- >>> serialize(elem)
- '<tag><subtag key="value" /></tag>'
- >>> elem[:] = tuple([subelem])
- >>> serialize(elem)
- '<tag><subtag key="value" /></tag>'
-
- """
-
-def parsefile():
- """
- Test parsing from file.
-
- >>> tree = ET.parse(SIMPLE_XMLFILE)
- >>> normalize_crlf(tree)
- >>> tree.write(sys.stdout)
- <root>
- <element key="value">text</element>
- <element>text</element>tail
- <empty-element />
- </root>
- >>> tree = ET.parse(SIMPLE_NS_XMLFILE)
- >>> normalize_crlf(tree)
- >>> tree.write(sys.stdout)
- <ns0:root xmlns:ns0="namespace">
- <ns0:element key="value">text</ns0:element>
- <ns0:element>text</ns0:element>tail
- <ns0:empty-element />
- </ns0:root>
-
- >>> with open(SIMPLE_XMLFILE) as f:
- ... data = f.read()
-
- >>> parser = ET.XMLParser()
- >>> parser.version # doctest: +ELLIPSIS
- 'Expat ...'
- >>> parser.feed(data)
- >>> print serialize(parser.close())
- <root>
- <element key="value">text</element>
- <element>text</element>tail
- <empty-element />
- </root>
-
- >>> parser = ET.XMLTreeBuilder() # 1.2 compatibility
- >>> parser.feed(data)
- >>> print serialize(parser.close())
- <root>
- <element key="value">text</element>
- <element>text</element>tail
- <empty-element />
- </root>
-
- >>> target = ET.TreeBuilder()
- >>> parser = ET.XMLParser(target=target)
- >>> parser.feed(data)
- >>> print serialize(parser.close())
- <root>
- <element key="value">text</element>
- <element>text</element>tail
- <empty-element />
- </root>
- """
-
-def parseliteral():
- """
- >>> element = ET.XML("<html><body>text</body></html>")
- >>> ET.ElementTree(element).write(sys.stdout)
- <html><body>text</body></html>
- >>> element = ET.fromstring("<html><body>text</body></html>")
- >>> ET.ElementTree(element).write(sys.stdout)
- <html><body>text</body></html>
- >>> sequence = ["<html><body>", "text</bo", "dy></html>"]
- >>> element = ET.fromstringlist(sequence)
- >>> print ET.tostring(element)
- <html><body>text</body></html>
- >>> print "".join(ET.tostringlist(element))
- <html><body>text</body></html>
- >>> ET.tostring(element, "ascii")
- "<?xml version='1.0' encoding='ascii'?>\\n<html><body>text</body></html>"
- >>> _, ids = ET.XMLID("<html><body>text</body></html>")
- >>> len(ids)
- 0
- >>> _, ids = ET.XMLID("<html><body id='body'>text</body></html>")
- >>> len(ids)
- 1
- >>> ids["body"].tag
- 'body'
- """
-
-def iterparse():
- """
- Test iterparse interface.
-
- >>> iterparse = ET.iterparse
-
- >>> context = iterparse(SIMPLE_XMLFILE)
- >>> action, elem = next(context)
- >>> print action, elem.tag
- end element
- >>> for action, elem in context:
- ... print action, elem.tag
- end element
- end empty-element
- end root
- >>> context.root.tag
- 'root'
-
- >>> context = iterparse(SIMPLE_NS_XMLFILE)
- >>> for action, elem in context:
- ... print action, elem.tag
- end {namespace}element
- end {namespace}element
- end {namespace}empty-element
- end {namespace}root
-
- >>> events = ()
- >>> context = iterparse(SIMPLE_XMLFILE, events)
- >>> for action, elem in context:
- ... print action, elem.tag
-
- >>> events = ()
- >>> context = iterparse(SIMPLE_XMLFILE, events=events)
- >>> for action, elem in context:
- ... print action, elem.tag
-
- >>> events = ("start", "end")
- >>> context = iterparse(SIMPLE_XMLFILE, events)
- >>> for action, elem in context:
- ... print action, elem.tag
- start root
- start element
- end element
- start element
- end element
- start empty-element
- end empty-element
- end root
-
- >>> events = ("start", "end", "start-ns", "end-ns")
- >>> context = iterparse(SIMPLE_NS_XMLFILE, events)
- >>> for action, elem in context:
- ... if action in ("start", "end"):
- ... print action, elem.tag
- ... else:
- ... print action, elem
- start-ns ('', 'namespace')
- start {namespace}root
- start {namespace}element
- end {namespace}element
- start {namespace}element
- end {namespace}element
- start {namespace}empty-element
- end {namespace}empty-element
- end {namespace}root
- end-ns None
-
- >>> import StringIO
-
- >>> events = ('start-ns', 'end-ns')
- >>> context = ET.iterparse(StringIO.StringIO(r"<root xmlns=''/>"), events)
- >>> for action, elem in context:
- ... print action, elem
- start-ns ('', '')
- end-ns None
-
- >>> events = ("start", "end", "bogus")
- >>> with open(SIMPLE_XMLFILE, "rb") as f:
- ... iterparse(f, events)
- Traceback (most recent call last):
- ValueError: unknown event 'bogus'
-
- >>> source = StringIO.StringIO(
- ... "<?xml version='1.0' encoding='iso-8859-1'?>\\n"
- ... "<body xmlns='http://&#233;ffbot.org/ns'\\n"
- ... " xmlns:cl\\xe9='http://effbot.org/ns'>text</body>\\n")
- >>> events = ("start-ns",)
- >>> context = iterparse(source, events)
- >>> for action, elem in context:
- ... print action, elem
- start-ns ('', u'http://\\xe9ffbot.org/ns')
- start-ns (u'cl\\xe9', 'http://effbot.org/ns')
-
- >>> source = StringIO.StringIO("<document />junk")
- >>> try:
- ... for action, elem in iterparse(source):
- ... print action, elem.tag
- ... except ET.ParseError, v:
- ... print v
- end document
- junk after document element: line 1, column 12
- """
-
-def writefile():
- """
- >>> elem = ET.Element("tag")
- >>> elem.text = "text"
- >>> serialize(elem)
- '<tag>text</tag>'
- >>> ET.SubElement(elem, "subtag").text = "subtext"
- >>> serialize(elem)
- '<tag>text<subtag>subtext</subtag></tag>'
-
- Test tag suppression
- >>> elem.tag = None
- >>> serialize(elem)
- 'text<subtag>subtext</subtag>'
- >>> elem.insert(0, ET.Comment("comment"))
- >>> serialize(elem) # assumes 1.3
- 'text<!--comment--><subtag>subtext</subtag>'
- >>> elem[0] = ET.PI("key", "value")
- >>> serialize(elem)
- 'text<?key value?><subtag>subtext</subtag>'
- """
-
-def custom_builder():
- """
- Test parser w. custom builder.
-
- >>> with open(SIMPLE_XMLFILE) as f:
- ... data = f.read()
- >>> class Builder:
- ... def start(self, tag, attrib):
- ... print "start", tag
- ... def end(self, tag):
- ... print "end", tag
- ... def data(self, text):
- ... pass
- >>> builder = Builder()
- >>> parser = ET.XMLParser(target=builder)
- >>> parser.feed(data)
- start root
- start element
- end element
- start element
- end element
- start empty-element
- end empty-element
- end root
-
- >>> with open(SIMPLE_NS_XMLFILE) as f:
- ... data = f.read()
- >>> class Builder:
- ... def start(self, tag, attrib):
- ... print "start", tag
- ... def end(self, tag):
- ... print "end", tag
- ... def data(self, text):
- ... pass
- ... def pi(self, target, data):
- ... print "pi", target, repr(data)
- ... def comment(self, data):
- ... print "comment", repr(data)
- >>> builder = Builder()
- >>> parser = ET.XMLParser(target=builder)
- >>> parser.feed(data)
- pi pi 'data'
- comment ' comment '
- start {namespace}root
- start {namespace}element
- end {namespace}element
- start {namespace}element
- end {namespace}element
- start {namespace}empty-element
- end {namespace}empty-element
- end {namespace}root
-
- """
-
-def getchildren():
- """
- Test Element.getchildren()
-
- >>> with open(SIMPLE_XMLFILE, "r") as f:
- ... tree = ET.parse(f)
- >>> for elem in tree.getroot().iter():
- ... summarize_list(elem.getchildren())
- ['element', 'element', 'empty-element']
- []
- []
- []
- >>> for elem in tree.getiterator():
- ... summarize_list(elem.getchildren())
- ['element', 'element', 'empty-element']
- []
- []
- []
-
- >>> elem = ET.XML(SAMPLE_XML)
- >>> len(elem.getchildren())
- 3
- >>> len(elem[2].getchildren())
- 1
- >>> elem[:] == elem.getchildren()
- True
- >>> child1 = elem[0]
- >>> child2 = elem[2]
- >>> del elem[1:2]
- >>> len(elem.getchildren())
- 2
- >>> child1 == elem[0]
- True
- >>> child2 == elem[1]
- True
- >>> elem[0:2] = [child2, child1]
- >>> child2 == elem[0]
- True
- >>> child1 == elem[1]
- True
- >>> child1 == elem[0]
- False
- >>> elem.clear()
- >>> elem.getchildren()
- []
- """
-
-def writestring():
- """
- >>> elem = ET.XML("<html><body>text</body></html>")
- >>> ET.tostring(elem)
- '<html><body>text</body></html>'
- >>> elem = ET.fromstring("<html><body>text</body></html>")
- >>> ET.tostring(elem)
- '<html><body>text</body></html>'
- """
-
-def check_encoding(encoding):
- """
- >>> check_encoding("ascii")
- >>> check_encoding("us-ascii")
- >>> check_encoding("iso-8859-1")
- >>> check_encoding("iso-8859-15")
- >>> check_encoding("cp437")
- >>> check_encoding("mac-roman")
- >>> check_encoding("gbk")
- Traceback (most recent call last):
- ValueError: multi-byte encodings are not supported
- >>> check_encoding("cp037")
- Traceback (most recent call last):
- ParseError: unknown encoding: line 1, column 30
- """
- ET.XML("<?xml version='1.0' encoding='%s'?><xml />" % encoding)
-
-def encoding():
- r"""
- Test encoding issues.
-
- >>> elem = ET.Element("tag")
- >>> elem.text = u"abc"
- >>> serialize(elem)
- '<tag>abc</tag>'
- >>> serialize(elem, encoding="utf-8")
- '<tag>abc</tag>'
- >>> serialize(elem, encoding="us-ascii")
- '<tag>abc</tag>'
- >>> serialize(elem, encoding="iso-8859-1")
- "<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>abc</tag>"
-
- >>> elem.text = "<&\"\'>"
- >>> serialize(elem)
- '<tag>&lt;&amp;"\'&gt;</tag>'
- >>> serialize(elem, encoding="utf-8")
- '<tag>&lt;&amp;"\'&gt;</tag>'
- >>> serialize(elem, encoding="us-ascii") # cdata characters
- '<tag>&lt;&amp;"\'&gt;</tag>'
- >>> serialize(elem, encoding="iso-8859-1")
- '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag>&lt;&amp;"\'&gt;</tag>'
-
- >>> elem.attrib["key"] = "<&\"\'>"
- >>> elem.text = None
- >>> serialize(elem)
- '<tag key="&lt;&amp;&quot;\'&gt;" />'
- >>> serialize(elem, encoding="utf-8")
- '<tag key="&lt;&amp;&quot;\'&gt;" />'
- >>> serialize(elem, encoding="us-ascii")
- '<tag key="&lt;&amp;&quot;\'&gt;" />'
- >>> serialize(elem, encoding="iso-8859-1")
- '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="&lt;&amp;&quot;\'&gt;" />'
-
- >>> elem.text = u'\xe5\xf6\xf6<>'
- >>> elem.attrib.clear()
- >>> serialize(elem)
- '<tag>&#229;&#246;&#246;&lt;&gt;</tag>'
- >>> serialize(elem, encoding="utf-8")
- '<tag>\xc3\xa5\xc3\xb6\xc3\xb6&lt;&gt;</tag>'
- >>> serialize(elem, encoding="us-ascii")
- '<tag>&#229;&#246;&#246;&lt;&gt;</tag>'
- >>> serialize(elem, encoding="iso-8859-1")
- "<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>\xe5\xf6\xf6&lt;&gt;</tag>"
-
- >>> elem.attrib["key"] = u'\xe5\xf6\xf6<>'
- >>> elem.text = None
- >>> serialize(elem)
- '<tag key="&#229;&#246;&#246;&lt;&gt;" />'
- >>> serialize(elem, encoding="utf-8")
- '<tag key="\xc3\xa5\xc3\xb6\xc3\xb6&lt;&gt;" />'
- >>> serialize(elem, encoding="us-ascii")
- '<tag key="&#229;&#246;&#246;&lt;&gt;" />'
- >>> serialize(elem, encoding="iso-8859-1")
- '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="\xe5\xf6\xf6&lt;&gt;" />'
- """
-
-def methods():
- r"""
- Test serialization methods.
-
- >>> e = ET.XML("<html><link/><script>1 &lt; 2</script></html>")
- >>> e.tail = "\n"
- >>> serialize(e)
- '<html><link /><script>1 &lt; 2</script></html>\n'
- >>> serialize(e, method=None)
- '<html><link /><script>1 &lt; 2</script></html>\n'
- >>> serialize(e, method="xml")
- '<html><link /><script>1 &lt; 2</script></html>\n'
- >>> serialize(e, method="html")
- '<html><link><script>1 < 2</script></html>\n'
- >>> serialize(e, method="text")
- '1 < 2\n'
- """
-
-def iterators():
- """
- Test iterators.
-
- >>> e = ET.XML("<html><body>this is a <i>paragraph</i>.</body>..</html>")
- >>> summarize_list(e.iter())
- ['html', 'body', 'i']
- >>> summarize_list(e.find("body").iter())
- ['body', 'i']
- >>> summarize(next(e.iter()))
- 'html'
- >>> "".join(e.itertext())
- 'this is a paragraph...'
- >>> "".join(e.find("body").itertext())
- 'this is a paragraph.'
- >>> next(e.itertext())
- 'this is a '
-
- Method iterparse should return an iterator. See bug 6472.
-
- >>> sourcefile = serialize(e, to_string=False)
- >>> next(ET.iterparse(sourcefile)) # doctest: +ELLIPSIS
- ('end', <Element 'i' at 0x...>)
-
- >>> tree = ET.ElementTree(None)
- >>> tree.iter()
- Traceback (most recent call last):
- AttributeError: 'NoneType' object has no attribute 'iter'
- """
-
-ENTITY_XML = """\
-<!DOCTYPE points [
-<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
-%user-entities;
-]>
-<document>&entity;</document>
-"""
-
-def entity():
- """
- Test entity handling.
-
- 1) good entities
-
- >>> e = ET.XML("<document title='&#x8230;'>test</document>")
- >>> serialize(e)
- '<document title="&#33328;">test</document>'
-
- 2) bad entities
-
- >>> ET.XML("<document>&entity;</document>")
- Traceback (most recent call last):
- ParseError: undefined entity: line 1, column 10
-
- >>> ET.XML(ENTITY_XML)
- Traceback (most recent call last):
- ParseError: undefined entity &entity;: line 5, column 10
-
- 3) custom entity
-
- >>> parser = ET.XMLParser()
- >>> parser.entity["entity"] = "text"
- >>> parser.feed(ENTITY_XML)
- >>> root = parser.close()
- >>> serialize(root)
- '<document>text</document>'
- """
-
-def error(xml):
- """
-
- Test error handling.
-
- >>> issubclass(ET.ParseError, SyntaxError)
- True
- >>> error("foo").position
- (1, 0)
- >>> error("<tag>&foo;</tag>").position
- (1, 5)
- >>> error("foobar<").position
- (1, 6)
+class ElementTreeTest(unittest.TestCase):
+
+ def serialize_check(self, elem, expected):
+ self.assertEqual(serialize(elem), expected)
+
+ def test_interface(self):
+ # Test element tree interface.
+
+ def check_string(string):
+ len(string)
+ for char in string:
+ self.assertEqual(len(char), 1,
+ msg="expected one-character string, got %r" % char)
+ new_string = string + ""
+ new_string = string + " "
+ string[:0]
+
+ def check_mapping(mapping):
+ len(mapping)
+ keys = mapping.keys()
+ items = mapping.items()
+ for key in keys:
+ item = mapping[key]
+ mapping["key"] = "value"
+ self.assertEqual(mapping["key"], "value",
+ msg="expected value string, got %r" % mapping["key"])
+
+ def check_element(element):
+ self.assertTrue(ET.iselement(element), msg="not an element")
+ self.assertTrue(hasattr(element, "tag"), msg="no tag member")
+ self.assertTrue(hasattr(element, "attrib"), msg="no attrib member")
+ self.assertTrue(hasattr(element, "text"), msg="no text member")
+ self.assertTrue(hasattr(element, "tail"), msg="no tail member")
+
+ check_string(element.tag)
+ check_mapping(element.attrib)
+ if element.text is not None:
+ check_string(element.text)
+ if element.tail is not None:
+ check_string(element.tail)
+ for elem in element:
+ check_element(elem)
+
+ element = ET.Element("tag")
+ check_element(element)
+ tree = ET.ElementTree(element)
+ check_element(tree.getroot())
+ element = ET.Element("t\xe4g", key="value")
+ tree = ET.ElementTree(element)
+ self.assertRegexpMatches(repr(element), r"^<Element 't\\xe4g' at 0x.*>$")
+ element = ET.Element("tag", key="value")
+
+ # Make sure all standard element methods exist.
+
+ def check_method(method):
+ self.assertTrue(hasattr(method, '__call__'),
+ msg="%s not callable" % method)
+
+ check_method(element.append)
+ check_method(element.extend)
+ check_method(element.insert)
+ check_method(element.remove)
+ check_method(element.getchildren)
+ check_method(element.find)
+ check_method(element.iterfind)
+ check_method(element.findall)
+ check_method(element.findtext)
+ check_method(element.clear)
+ check_method(element.get)
+ check_method(element.set)
+ check_method(element.keys)
+ check_method(element.items)
+ check_method(element.iter)
+ check_method(element.itertext)
+ check_method(element.getiterator)
+
+ # These methods return an iterable. See bug 6472.
+
+ def check_iter(it):
+ check_method(it.next)
+
+ check_iter(element.iter("tag"))
+ check_iter(element.iterfind("tag"))
+ check_iter(element.iterfind("*"))
+ check_iter(tree.iter("tag"))
+ check_iter(tree.iterfind("tag"))
+ check_iter(tree.iterfind("*"))
+
+ # These aliases are provided:
+
+ self.assertEqual(ET.XML, ET.fromstring)
+ self.assertEqual(ET.PI, ET.ProcessingInstruction)
+ self.assertEqual(ET.XMLParser, ET.XMLTreeBuilder)
+
+ def test_set_attribute(self):
+ element = ET.Element('tag')
+
+ self.assertEqual(element.tag, 'tag')
+ element.tag = 'Tag'
+ self.assertEqual(element.tag, 'Tag')
+ element.tag = 'TAG'
+ self.assertEqual(element.tag, 'TAG')
+
+ self.assertIsNone(element.text)
+ element.text = 'Text'
+ self.assertEqual(element.text, 'Text')
+ element.text = 'TEXT'
+ self.assertEqual(element.text, 'TEXT')
+
+ self.assertIsNone(element.tail)
+ element.tail = 'Tail'
+ self.assertEqual(element.tail, 'Tail')
+ element.tail = 'TAIL'
+ self.assertEqual(element.tail, 'TAIL')
+
+ self.assertEqual(element.attrib, {})
+ element.attrib = {'a': 'b', 'c': 'd'}
+ self.assertEqual(element.attrib, {'a': 'b', 'c': 'd'})
+ element.attrib = {'A': 'B', 'C': 'D'}
+ self.assertEqual(element.attrib, {'A': 'B', 'C': 'D'})
+
+ def test_simpleops(self):
+ # Basic method sanity checks.
+
+ elem = ET.XML("<body><tag/></body>")
+ self.serialize_check(elem, '<body><tag /></body>')
+ e = ET.Element("tag2")
+ elem.append(e)
+ self.serialize_check(elem, '<body><tag /><tag2 /></body>')
+ elem.remove(e)
+ self.serialize_check(elem, '<body><tag /></body>')
+ elem.insert(0, e)
+ self.serialize_check(elem, '<body><tag2 /><tag /></body>')
+ elem.remove(e)
+ elem.extend([e])
+ self.serialize_check(elem, '<body><tag /><tag2 /></body>')
+ elem.remove(e)
+
+ element = ET.Element("tag", key="value")
+ self.serialize_check(element, '<tag key="value" />') # 1
+ subelement = ET.Element("subtag")
+ element.append(subelement)
+ self.serialize_check(element, '<tag key="value"><subtag /></tag>') # 2
+ element.insert(0, subelement)
+ self.serialize_check(element,
+ '<tag key="value"><subtag /><subtag /></tag>') # 3
+ element.remove(subelement)
+ self.serialize_check(element, '<tag key="value"><subtag /></tag>') # 4
+ element.remove(subelement)
+ self.serialize_check(element, '<tag key="value" />') # 5
+ with self.assertRaises(ValueError) as cm:
+ element.remove(subelement)
+ self.assertTrue(str(cm.exception).startswith('list.remove('))
+ self.serialize_check(element, '<tag key="value" />') # 6
+ element[0:0] = [subelement, subelement, subelement]
+ self.serialize_check(element[1], '<subtag />')
+ self.assertEqual(element[1:9], [element[1], element[2]])
+ self.assertEqual(element[:9:2], [element[0], element[2]])
+ del element[1:2]
+ self.serialize_check(element,
+ '<tag key="value"><subtag /><subtag /></tag>')
+
+ def test_cdata(self):
+ # Test CDATA handling (etc).
+
+ self.serialize_check(ET.XML("<tag>hello</tag>"),
+ '<tag>hello</tag>')
+ self.serialize_check(ET.XML("<tag>&#104;&#101;&#108;&#108;&#111;</tag>"),
+ '<tag>hello</tag>')
+ self.serialize_check(ET.XML("<tag><![CDATA[hello]]></tag>"),
+ '<tag>hello</tag>')
+
+ def test_file_init(self):
+ stringfile = StringIO.StringIO(SAMPLE_XML.encode("utf-8"))
+ tree = ET.ElementTree(file=stringfile)
+ self.assertEqual(tree.find("tag").tag, 'tag')
+ self.assertEqual(tree.find("section/tag").tag, 'tag')
+
+ tree = ET.ElementTree(file=SIMPLE_XMLFILE)
+ self.assertEqual(tree.find("element").tag, 'element')
+ self.assertEqual(tree.find("element/../empty-element").tag,
+ 'empty-element')
+
+ def test_path_cache(self):
+ # Check that the path cache behaves sanely.
+
+ from xml.etree import ElementPath
+
+ elem = ET.XML(SAMPLE_XML)
+ for i in range(10): ET.ElementTree(elem).find('./'+str(i))
+ cache_len_10 = len(ElementPath._cache)
+ for i in range(10): ET.ElementTree(elem).find('./'+str(i))
+ self.assertEqual(len(ElementPath._cache), cache_len_10)
+ for i in range(20): ET.ElementTree(elem).find('./'+str(i))
+ self.assertGreater(len(ElementPath._cache), cache_len_10)
+ for i in range(600): ET.ElementTree(elem).find('./'+str(i))
+ self.assertLess(len(ElementPath._cache), 500)
+
+ def test_copy(self):
+ # Test copy handling (etc).
+
+ import copy
+ e1 = ET.XML("<tag>hello<foo/></tag>")
+ e2 = copy.copy(e1)
+ e3 = copy.deepcopy(e1)
+ e1.find("foo").tag = "bar"
+ self.serialize_check(e1, '<tag>hello<bar /></tag>')
+ self.serialize_check(e2, '<tag>hello<bar /></tag>')
+ self.serialize_check(e3, '<tag>hello<foo /></tag>')
+
+ def test_attrib(self):
+ # Test attribute handling.
+
+ elem = ET.Element("tag")
+ elem.get("key") # 1.1
+ self.assertEqual(elem.get("key", "default"), 'default') # 1.2
+
+ elem.set("key", "value")
+ self.assertEqual(elem.get("key"), 'value') # 1.3
+
+ elem = ET.Element("tag", key="value")
+ self.assertEqual(elem.get("key"), 'value') # 2.1
+ self.assertEqual(elem.attrib, {'key': 'value'}) # 2.2
+
+ attrib = {"key": "value"}
+ elem = ET.Element("tag", attrib)
+ attrib.clear() # check for aliasing issues
+ self.assertEqual(elem.get("key"), 'value') # 3.1
+ self.assertEqual(elem.attrib, {'key': 'value'}) # 3.2
+
+ attrib = {"key": "value"}
+ elem = ET.Element("tag", **attrib)
+ attrib.clear() # check for aliasing issues
+ self.assertEqual(elem.get("key"), 'value') # 4.1
+ self.assertEqual(elem.attrib, {'key': 'value'}) # 4.2
+
+ elem = ET.Element("tag", {"key": "other"}, key="value")
+ self.assertEqual(elem.get("key"), 'value') # 5.1
+ self.assertEqual(elem.attrib, {'key': 'value'}) # 5.2
+
+ elem = ET.Element('test')
+ elem.text = "aa"
+ elem.set('testa', 'testval')
+ elem.set('testb', 'test2')
+ self.assertEqual(ET.tostring(elem),
+ b'<test testa="testval" testb="test2">aa</test>')
+ self.assertEqual(sorted(elem.keys()), ['testa', 'testb'])
+ self.assertEqual(sorted(elem.items()),
+ [('testa', 'testval'), ('testb', 'test2')])
+ self.assertEqual(elem.attrib['testb'], 'test2')
+ elem.attrib['testb'] = 'test1'
+ elem.attrib['testc'] = 'test2'
+ self.assertEqual(ET.tostring(elem),
+ b'<test testa="testval" testb="test1" testc="test2">aa</test>')
+
+ elem = ET.Element('test')
+ elem.set('a', '\r')
+ elem.set('b', '\r\n')
+ elem.set('c', '\t\n\r ')
+ elem.set('d', '\n\n')
+ self.assertEqual(ET.tostring(elem),
+ b'<test a="\r" b="\r&#10;" c="\t&#10;\r " d="&#10;&#10;" />')
+
+ def test_makeelement(self):
+ # Test makeelement handling.
+
+ elem = ET.Element("tag")
+ attrib = {"key": "value"}
+ subelem = elem.makeelement("subtag", attrib)
+ self.assertIsNot(subelem.attrib, attrib, msg="attrib aliasing")
+ elem.append(subelem)
+ self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
+
+ elem.clear()
+ self.serialize_check(elem, '<tag />')
+ elem.append(subelem)
+ self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
+ elem.extend([subelem, subelem])
+ self.serialize_check(elem,
+ '<tag><subtag key="value" /><subtag key="value" /><subtag key="value" /></tag>')
+ elem[:] = [subelem]
+ self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
+ elem[:] = tuple([subelem])
+ self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
+
+ def test_parsefile(self):
+ # Test parsing from file.
+
+ tree = ET.parse(SIMPLE_XMLFILE)
+ normalize_crlf(tree)
+ stream = StringIO.StringIO()
+ tree.write(stream)
+ self.assertEqual(stream.getvalue(),
+ '<root>\n'
+ ' <element key="value">text</element>\n'
+ ' <element>text</element>tail\n'
+ ' <empty-element />\n'
+ '</root>')
+ tree = ET.parse(SIMPLE_NS_XMLFILE)
+ normalize_crlf(tree)
+ stream = StringIO.StringIO()
+ tree.write(stream)
+ self.assertEqual(stream.getvalue(),
+ '<ns0:root xmlns:ns0="namespace">\n'
+ ' <ns0:element key="value">text</ns0:element>\n'
+ ' <ns0:element>text</ns0:element>tail\n'
+ ' <ns0:empty-element />\n'
+ '</ns0:root>')
+
+ with open(SIMPLE_XMLFILE) as f:
+ data = f.read()
+
+ parser = ET.XMLParser()
+ self.assertRegexpMatches(parser.version, r'^Expat ')
+ parser.feed(data)
+ self.serialize_check(parser.close(),
+ '<root>\n'
+ ' <element key="value">text</element>\n'
+ ' <element>text</element>tail\n'
+ ' <empty-element />\n'
+ '</root>')
+
+ parser = ET.XMLTreeBuilder() # 1.2 compatibility
+ parser.feed(data)
+ self.serialize_check(parser.close(),
+ '<root>\n'
+ ' <element key="value">text</element>\n'
+ ' <element>text</element>tail\n'
+ ' <empty-element />\n'
+ '</root>')
+
+ target = ET.TreeBuilder()
+ parser = ET.XMLParser(target=target)
+ parser.feed(data)
+ self.serialize_check(parser.close(),
+ '<root>\n'
+ ' <element key="value">text</element>\n'
+ ' <element>text</element>tail\n'
+ ' <empty-element />\n'
+ '</root>')
+
+ def test_parseliteral(self):
+ element = ET.XML("<html><body>text</body></html>")
+ self.assertEqual(ET.tostring(element),
+ '<html><body>text</body></html>')
+ element = ET.fromstring("<html><body>text</body></html>")
+ self.assertEqual(ET.tostring(element),
+ '<html><body>text</body></html>')
+ sequence = ["<html><body>", "text</bo", "dy></html>"]
+ element = ET.fromstringlist(sequence)
+ self.assertEqual(ET.tostring(element),
+ '<html><body>text</body></html>')
+ self.assertEqual("".join(ET.tostringlist(element)),
+ '<html><body>text</body></html>')
+ self.assertEqual(ET.tostring(element, "ascii"),
+ "<?xml version='1.0' encoding='ascii'?>\n"
+ "<html><body>text</body></html>")
+ _, ids = ET.XMLID("<html><body>text</body></html>")
+ self.assertEqual(len(ids), 0)
+ _, ids = ET.XMLID("<html><body id='body'>text</body></html>")
+ self.assertEqual(len(ids), 1)
+ self.assertEqual(ids["body"].tag, 'body')
+
+ def test_iterparse(self):
+ # Test iterparse interface.
+
+ iterparse = ET.iterparse
+
+ context = iterparse(SIMPLE_XMLFILE)
+ action, elem = next(context)
+ self.assertEqual((action, elem.tag), ('end', 'element'))
+ self.assertEqual([(action, elem.tag) for action, elem in context], [
+ ('end', 'element'),
+ ('end', 'empty-element'),
+ ('end', 'root'),
+ ])
+ self.assertEqual(context.root.tag, 'root')
+
+ context = iterparse(SIMPLE_NS_XMLFILE)
+ self.assertEqual([(action, elem.tag) for action, elem in context], [
+ ('end', '{namespace}element'),
+ ('end', '{namespace}element'),
+ ('end', '{namespace}empty-element'),
+ ('end', '{namespace}root'),
+ ])
+
+ events = ()
+ context = iterparse(SIMPLE_XMLFILE, events)
+ self.assertEqual([(action, elem.tag) for action, elem in context], [])
+
+ events = ()
+ context = iterparse(SIMPLE_XMLFILE, events=events)
+ self.assertEqual([(action, elem.tag) for action, elem in context], [])
+
+ events = ("start", "end")
+ context = iterparse(SIMPLE_XMLFILE, events)
+ self.assertEqual([(action, elem.tag) for action, elem in context], [
+ ('start', 'root'),
+ ('start', 'element'),
+ ('end', 'element'),
+ ('start', 'element'),
+ ('end', 'element'),
+ ('start', 'empty-element'),
+ ('end', 'empty-element'),
+ ('end', 'root'),
+ ])
+
+ events = ("start", "end", "start-ns", "end-ns")
+ context = iterparse(SIMPLE_NS_XMLFILE, events)
+ self.assertEqual([(action, elem.tag) if action in ("start", "end")
+ else (action, elem)
+ for action, elem in context], [
+ ('start-ns', ('', 'namespace')),
+ ('start', '{namespace}root'),
+ ('start', '{namespace}element'),
+ ('end', '{namespace}element'),
+ ('start', '{namespace}element'),
+ ('end', '{namespace}element'),
+ ('start', '{namespace}empty-element'),
+ ('end', '{namespace}empty-element'),
+ ('end', '{namespace}root'),
+ ('end-ns', None),
+ ])
+
+ events = ('start-ns', 'end-ns')
+ context = iterparse(StringIO.StringIO(r"<root xmlns=''/>"), events)
+ res = [(action, elem) for action, elem in context]
+ self.assertEqual(res, [('start-ns', ('', '')), ('end-ns', None)])
+
+ events = ("start", "end", "bogus")
+ with open(SIMPLE_XMLFILE, "rb") as f:
+ with self.assertRaises(ValueError) as cm:
+ iterparse(f, events)
+ self.assertFalse(f.closed)
+ self.assertEqual(str(cm.exception), "unknown event 'bogus'")
+
+ source = StringIO.StringIO(
+ "<?xml version='1.0' encoding='iso-8859-1'?>\n"
+ "<body xmlns='http://&#233;ffbot.org/ns'\n"
+ " xmlns:cl\xe9='http://effbot.org/ns'>text</body>\n")
+ events = ("start-ns",)
+ context = iterparse(source, events)
+ self.assertEqual([(action, elem) for action, elem in context], [
+ ('start-ns', ('', u'http://\xe9ffbot.org/ns')),
+ ('start-ns', (u'cl\xe9', 'http://effbot.org/ns')),
+ ])
+
+ source = StringIO.StringIO("<document />junk")
+ it = iterparse(source)
+ action, elem = next(it)
+ self.assertEqual((action, elem.tag), ('end', 'document'))
+ with self.assertRaises(ET.ParseError) as cm:
+ next(it)
+ self.assertEqual(str(cm.exception),
+ 'junk after document element: line 1, column 12')
+
+ def test_writefile(self):
+ elem = ET.Element("tag")
+ elem.text = "text"
+ self.serialize_check(elem, '<tag>text</tag>')
+ ET.SubElement(elem, "subtag").text = "subtext"
+ self.serialize_check(elem, '<tag>text<subtag>subtext</subtag></tag>')
+
+ # Test tag suppression
+ elem.tag = None
+ self.serialize_check(elem, 'text<subtag>subtext</subtag>')
+ elem.insert(0, ET.Comment("comment"))
+ self.serialize_check(elem,
+ 'text<!--comment--><subtag>subtext</subtag>') # assumes 1.3
+
+ elem[0] = ET.PI("key", "value")
+ self.serialize_check(elem, 'text<?key value?><subtag>subtext</subtag>')
+
+ def test_custom_builder(self):
+ # Test parser w. custom builder.
+
+ with open(SIMPLE_XMLFILE) as f:
+ data = f.read()
+ class Builder(list):
+ def start(self, tag, attrib):
+ self.append(("start", tag))
+ def end(self, tag):
+ self.append(("end", tag))
+ def data(self, text):
+ pass
+ builder = Builder()
+ parser = ET.XMLParser(target=builder)
+ parser.feed(data)
+ self.assertEqual(builder, [
+ ('start', 'root'),
+ ('start', 'element'),
+ ('end', 'element'),
+ ('start', 'element'),
+ ('end', 'element'),
+ ('start', 'empty-element'),
+ ('end', 'empty-element'),
+ ('end', 'root'),
+ ])
+
+ with open(SIMPLE_NS_XMLFILE) as f:
+ data = f.read()
+ class Builder(list):
+ def start(self, tag, attrib):
+ self.append(("start", tag))
+ def end(self, tag):
+ self.append(("end", tag))
+ def data(self, text):
+ pass
+ def pi(self, target, data):
+ self.append(("pi", target, data))
+ def comment(self, data):
+ self.append(("comment", data))
+ builder = Builder()
+ parser = ET.XMLParser(target=builder)
+ parser.feed(data)
+ self.assertEqual(builder, [
+ ('pi', 'pi', 'data'),
+ ('comment', ' comment '),
+ ('start', '{namespace}root'),
+ ('start', '{namespace}element'),
+ ('end', '{namespace}element'),
+ ('start', '{namespace}element'),
+ ('end', '{namespace}element'),
+ ('start', '{namespace}empty-element'),
+ ('end', '{namespace}empty-element'),
+ ('end', '{namespace}root'),
+ ])
+
+
+ # Element.getchildren() and ElementTree.getiterator() are deprecated.
+ @checkwarnings(("This method will be removed in future versions. "
+ "Use .+ instead.",
+ (DeprecationWarning, PendingDeprecationWarning)))
+ def test_getchildren(self):
+ # Test Element.getchildren()
+
+ with open(SIMPLE_XMLFILE, "r") as f:
+ tree = ET.parse(f)
+ self.assertEqual([summarize_list(elem.getchildren())
+ for elem in tree.getroot().iter()], [
+ ['element', 'element', 'empty-element'],
+ [],
+ [],
+ [],
+ ])
+ self.assertEqual([summarize_list(elem.getchildren())
+ for elem in tree.getiterator()], [
+ ['element', 'element', 'empty-element'],
+ [],
+ [],
+ [],
+ ])
+
+ elem = ET.XML(SAMPLE_XML)
+ self.assertEqual(len(elem.getchildren()), 3)
+ self.assertEqual(len(elem[2].getchildren()), 1)
+ self.assertEqual(elem[:], elem.getchildren())
+ child1 = elem[0]
+ child2 = elem[2]
+ del elem[1:2]
+ self.assertEqual(len(elem.getchildren()), 2)
+ self.assertEqual(child1, elem[0])
+ self.assertEqual(child2, elem[1])
+ elem[0:2] = [child2, child1]
+ self.assertEqual(child2, elem[0])
+ self.assertEqual(child1, elem[1])
+ self.assertNotEqual(child1, elem[0])
+ elem.clear()
+ self.assertEqual(elem.getchildren(), [])
+
+ def test_writestring(self):
+ elem = ET.XML("<html><body>text</body></html>")
+ self.assertEqual(ET.tostring(elem), b'<html><body>text</body></html>')
+ elem = ET.fromstring("<html><body>text</body></html>")
+ self.assertEqual(ET.tostring(elem), b'<html><body>text</body></html>')
+
+ def test_encoding(self):
+ def check(encoding, body=''):
+ xml = ("<?xml version='1.0' encoding='%s'?><xml>%s</xml>" %
+ (encoding, body))
+ self.assertEqual(ET.XML(xml.encode(encoding)).text, body)
+ check("ascii", 'a')
+ check("us-ascii", 'a')
+ check("iso-8859-1", u'\xbd')
+ check("iso-8859-15", u'\u20ac')
+ check("cp437", u'\u221a')
+ check("mac-roman", u'\u02da')
+
+ def xml(encoding):
+ return "<?xml version='1.0' encoding='%s'?><xml />" % encoding
+ def bxml(encoding):
+ return xml(encoding).encode(encoding)
+ supported_encodings = [
+ 'ascii', 'utf-8', 'utf-8-sig', 'utf-16', 'utf-16be', 'utf-16le',
+ 'iso8859-1', 'iso8859-2', 'iso8859-3', 'iso8859-4', 'iso8859-5',
+ 'iso8859-6', 'iso8859-7', 'iso8859-8', 'iso8859-9', 'iso8859-10',
+ 'iso8859-13', 'iso8859-14', 'iso8859-15', 'iso8859-16',
+ 'cp437', 'cp720', 'cp737', 'cp775', 'cp850', 'cp852',
+ 'cp855', 'cp856', 'cp857', 'cp858', 'cp860', 'cp861', 'cp862',
+ 'cp863', 'cp865', 'cp866', 'cp869', 'cp874', 'cp1006',
+ 'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
+ 'cp1256', 'cp1257', 'cp1258',
+ 'mac-cyrillic', 'mac-greek', 'mac-iceland', 'mac-latin2',
+ 'mac-roman', 'mac-turkish',
+ 'iso2022-jp', 'iso2022-jp-1', 'iso2022-jp-2', 'iso2022-jp-2004',
+ 'iso2022-jp-3', 'iso2022-jp-ext',
+ 'koi8-r', 'koi8-u',
+ 'ptcp154',
+ ]
+ for encoding in supported_encodings:
+ self.assertEqual(ET.tostring(ET.XML(bxml(encoding))), b'<xml />')
+
+ unsupported_ascii_compatible_encodings = [
+ 'big5', 'big5hkscs',
+ 'cp932', 'cp949', 'cp950',
+ 'euc-jp', 'euc-jis-2004', 'euc-jisx0213', 'euc-kr',
+ 'gb2312', 'gbk', 'gb18030',
+ 'iso2022-kr', 'johab', 'hz',
+ 'shift-jis', 'shift-jis-2004', 'shift-jisx0213',
+ 'utf-7',
+ ]
+ for encoding in unsupported_ascii_compatible_encodings:
+ self.assertRaises(ValueError, ET.XML, bxml(encoding))
+
+ unsupported_ascii_incompatible_encodings = [
+ 'cp037', 'cp424', 'cp500', 'cp864', 'cp875', 'cp1026', 'cp1140',
+ 'utf_32', 'utf_32_be', 'utf_32_le',
+ ]
+ for encoding in unsupported_ascii_incompatible_encodings:
+ self.assertRaises(ET.ParseError, ET.XML, bxml(encoding))
+
+ self.assertRaises(ValueError, ET.XML, xml('undefined').encode('ascii'))
+ self.assertRaises(LookupError, ET.XML, xml('xxx').encode('ascii'))
+
+ def test_methods(self):
+ # Test serialization methods.
+
+ e = ET.XML("<html><link/><script>1 &lt; 2</script></html>")
+ e.tail = "\n"
+ self.assertEqual(serialize(e),
+ '<html><link /><script>1 &lt; 2</script></html>\n')
+ self.assertEqual(serialize(e, method=None),
+ '<html><link /><script>1 &lt; 2</script></html>\n')
+ self.assertEqual(serialize(e, method="xml"),
+ '<html><link /><script>1 &lt; 2</script></html>\n')
+ self.assertEqual(serialize(e, method="html"),
+ '<html><link><script>1 < 2</script></html>\n')
+ self.assertEqual(serialize(e, method="text"), '1 < 2\n')
+
+ def test_issue18347(self):
+ e = ET.XML('<html><CamelCase>text</CamelCase></html>')
+ self.assertEqual(serialize(e),
+ '<html><CamelCase>text</CamelCase></html>')
+ self.assertEqual(serialize(e, method="html"),
+ '<html><CamelCase>text</CamelCase></html>')
+
+ def test_entity(self):
+ # Test entity handling.
+
+ # 1) good entities
+
+ e = ET.XML("<document title='&#x8230;'>test</document>")
+ self.assertEqual(serialize(e, encoding="us-ascii"),
+ '<document title="&#33328;">test</document>')
+ self.serialize_check(e, '<document title="&#33328;">test</document>')
+
+ # 2) bad entities
+
+ with self.assertRaises(ET.ParseError) as cm:
+ ET.XML("<document>&entity;</document>")
+ self.assertEqual(str(cm.exception),
+ 'undefined entity: line 1, column 10')
+
+ with self.assertRaises(ET.ParseError) as cm:
+ ET.XML(ENTITY_XML)
+ self.assertEqual(str(cm.exception),
+ 'undefined entity &entity;: line 5, column 10')
+
+ # 3) custom entity
+
+ parser = ET.XMLParser()
+ parser.entity["entity"] = "text"
+ parser.feed(ENTITY_XML)
+ root = parser.close()
+ self.serialize_check(root, '<document>text</document>')
+
+ def test_namespace(self):
+ # Test namespace issues.
+
+ # 1) xml namespace
+
+ elem = ET.XML("<tag xml:lang='en' />")
+ self.serialize_check(elem, '<tag xml:lang="en" />') # 1.1
+
+ # 2) other "well-known" namespaces
+
+ elem = ET.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />")
+ self.serialize_check(elem,
+ '<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />') # 2.1
+
+ elem = ET.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />")
+ self.serialize_check(elem,
+ '<html:html xmlns:html="http://www.w3.org/1999/xhtml" />') # 2.2
+
+ elem = ET.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />")
+ self.serialize_check(elem,
+ '<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />') # 2.3
+
+ # 3) unknown namespaces
+ elem = ET.XML(SAMPLE_XML_NS)
+ self.serialize_check(elem,
+ '<ns0:body xmlns:ns0="http://effbot.org/ns">\n'
+ ' <ns0:tag>text</ns0:tag>\n'
+ ' <ns0:tag />\n'
+ ' <ns0:section>\n'
+ ' <ns0:tag>subtext</ns0:tag>\n'
+ ' </ns0:section>\n'
+ '</ns0:body>')
+
+ def test_qname(self):
+ # Test QName handling.
+
+ # 1) decorated tags
+
+ elem = ET.Element("{uri}tag")
+ self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.1
+ elem = ET.Element(ET.QName("{uri}tag"))
+ self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.2
+ elem = ET.Element(ET.QName("uri", "tag"))
+ self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.3
+ elem = ET.Element(ET.QName("uri", "tag"))
+ subelem = ET.SubElement(elem, ET.QName("uri", "tag1"))
+ subelem = ET.SubElement(elem, ET.QName("uri", "tag2"))
+ self.serialize_check(elem,
+ '<ns0:tag xmlns:ns0="uri"><ns0:tag1 /><ns0:tag2 /></ns0:tag>') # 1.4
+
+ # 2) decorated attributes
+
+ elem.clear()
+ elem.attrib["{uri}key"] = "value"
+ self.serialize_check(elem,
+ '<ns0:tag xmlns:ns0="uri" ns0:key="value" />') # 2.1
+
+ elem.clear()
+ elem.attrib[ET.QName("{uri}key")] = "value"
+ self.serialize_check(elem,
+ '<ns0:tag xmlns:ns0="uri" ns0:key="value" />') # 2.2
+
+ # 3) decorated values are not converted by default, but the
+ # QName wrapper can be used for values
+
+ elem.clear()
+ elem.attrib["{uri}key"] = "{uri}value"
+ self.serialize_check(elem,
+ '<ns0:tag xmlns:ns0="uri" ns0:key="{uri}value" />') # 3.1
+
+ elem.clear()
+ elem.attrib["{uri}key"] = ET.QName("{uri}value")
+ self.serialize_check(elem,
+ '<ns0:tag xmlns:ns0="uri" ns0:key="ns0:value" />') # 3.2
+
+ elem.clear()
+ subelem = ET.Element("tag")
+ subelem.attrib["{uri1}key"] = ET.QName("{uri2}value")
+ elem.append(subelem)
+ elem.append(subelem)
+ self.serialize_check(elem,
+ '<ns0:tag xmlns:ns0="uri" xmlns:ns1="uri1" xmlns:ns2="uri2">'
+ '<tag ns1:key="ns2:value" />'
+ '<tag ns1:key="ns2:value" />'
+ '</ns0:tag>') # 3.3
+
+ # 4) Direct QName tests
+
+ self.assertEqual(str(ET.QName('ns', 'tag')), '{ns}tag')
+ self.assertEqual(str(ET.QName('{ns}tag')), '{ns}tag')
+ q1 = ET.QName('ns', 'tag')
+ q2 = ET.QName('ns', 'tag')
+ self.assertEqual(q1, q2)
+ q2 = ET.QName('ns', 'other-tag')
+ self.assertNotEqual(q1, q2)
+ self.assertNotEqual(q1, 'ns:tag')
+ self.assertEqual(q1, '{ns}tag')
+
+ def test_doctype_public(self):
+ # Test PUBLIC doctype.
+
+ elem = ET.XML('<!DOCTYPE html PUBLIC'
+ ' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
+ ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
+ '<html>text</html>')
+
+ def test_xpath_tokenizer(self):
+ # Test the XPath tokenizer.
+ from xml.etree import ElementPath
+ def check(p, expected):
+ self.assertEqual([op or tag
+ for op, tag in ElementPath.xpath_tokenizer(p)],
+ expected)
+
+ # tests from the xml specification
+ check("*", ['*'])
+ check("text()", ['text', '()'])
+ check("@name", ['@', 'name'])
+ check("@*", ['@', '*'])
+ check("para[1]", ['para', '[', '1', ']'])
+ check("para[last()]", ['para', '[', 'last', '()', ']'])
+ check("*/para", ['*', '/', 'para'])
+ check("/doc/chapter[5]/section[2]",
+ ['/', 'doc', '/', 'chapter', '[', '5', ']',
+ '/', 'section', '[', '2', ']'])
+ check("chapter//para", ['chapter', '//', 'para'])
+ check("//para", ['//', 'para'])
+ check("//olist/item", ['//', 'olist', '/', 'item'])
+ check(".", ['.'])
+ check(".//para", ['.', '//', 'para'])
+ check("..", ['..'])
+ check("../@lang", ['..', '/', '@', 'lang'])
+ check("chapter[title]", ['chapter', '[', 'title', ']'])
+ check("employee[@secretary and @assistant]", ['employee',
+ '[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']'])
+
+ # additional tests
+ check("{http://spam}egg", ['{http://spam}egg'])
+ check("./spam.egg", ['.', '/', 'spam.egg'])
+ check(".//{http://spam}egg", ['.', '//', '{http://spam}egg'])
+
+ def test_processinginstruction(self):
+ # Test ProcessingInstruction directly
+
+ self.assertEqual(ET.tostring(ET.ProcessingInstruction('test', 'instruction')),
+ '<?test instruction?>')
+ self.assertEqual(ET.tostring(ET.PI('test', 'instruction')),
+ '<?test instruction?>')
+
+ # Issue #2746
+
+ self.assertEqual(ET.tostring(ET.PI('test', '<testing&>')),
+ '<?test <testing&>?>')
+ self.assertEqual(ET.tostring(ET.PI('test', u'<testing&>\xe3'), 'latin1'),
+ "<?xml version='1.0' encoding='latin1'?>\n"
+ "<?test <testing&>\xe3?>")
+
+ def test_html_empty_elems_serialization(self):
+ # issue 15970
+ # from http://www.w3.org/TR/html401/index/elements.html
+ for element in ['AREA', 'BASE', 'BASEFONT', 'BR', 'COL', 'FRAME', 'HR',
+ 'IMG', 'INPUT', 'ISINDEX', 'LINK', 'META', 'PARAM']:
+ for elem in [element, element.lower()]:
+ expected = '<%s>' % elem
+ serialized = serialize(ET.XML('<%s />' % elem), method='html')
+ self.assertEqual(serialized, expected)
+ serialized = serialize(ET.XML('<%s></%s>' % (elem,elem)),
+ method='html')
+ self.assertEqual(serialized, expected)
- """
- try:
- ET.XML(xml)
- except ET.ParseError:
- return sys.exc_value
-
-def namespace():
- """
- Test namespace issues.
-
- 1) xml namespace
-
- >>> elem = ET.XML("<tag xml:lang='en' />")
- >>> serialize(elem) # 1.1
- '<tag xml:lang="en" />'
-
- 2) other "well-known" namespaces
-
- >>> elem = ET.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />")
- >>> serialize(elem) # 2.1
- '<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />'
-
- >>> elem = ET.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />")
- >>> serialize(elem) # 2.2
- '<html:html xmlns:html="http://www.w3.org/1999/xhtml" />'
-
- >>> elem = ET.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />")
- >>> serialize(elem) # 2.3
- '<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />'
-
- 3) unknown namespaces
- >>> elem = ET.XML(SAMPLE_XML_NS)
- >>> print serialize(elem)
- <ns0:body xmlns:ns0="http://effbot.org/ns">
- <ns0:tag>text</ns0:tag>
- <ns0:tag />
- <ns0:section>
- <ns0:tag>subtext</ns0:tag>
- </ns0:section>
- </ns0:body>
- """
-
-def qname():
- """
- Test QName handling.
-
- 1) decorated tags
-
- >>> elem = ET.Element("{uri}tag")
- >>> serialize(elem) # 1.1
- '<ns0:tag xmlns:ns0="uri" />'
- >>> elem = ET.Element(ET.QName("{uri}tag"))
- >>> serialize(elem) # 1.2
- '<ns0:tag xmlns:ns0="uri" />'
- >>> elem = ET.Element(ET.QName("uri", "tag"))
- >>> serialize(elem) # 1.3
- '<ns0:tag xmlns:ns0="uri" />'
- >>> elem = ET.Element(ET.QName("uri", "tag"))
- >>> subelem = ET.SubElement(elem, ET.QName("uri", "tag1"))
- >>> subelem = ET.SubElement(elem, ET.QName("uri", "tag2"))
- >>> serialize(elem) # 1.4
- '<ns0:tag xmlns:ns0="uri"><ns0:tag1 /><ns0:tag2 /></ns0:tag>'
-
- 2) decorated attributes
-
- >>> elem.clear()
- >>> elem.attrib["{uri}key"] = "value"
- >>> serialize(elem) # 2.1
- '<ns0:tag xmlns:ns0="uri" ns0:key="value" />'
-
- >>> elem.clear()
- >>> elem.attrib[ET.QName("{uri}key")] = "value"
- >>> serialize(elem) # 2.2
- '<ns0:tag xmlns:ns0="uri" ns0:key="value" />'
-
- 3) decorated values are not converted by default, but the
- QName wrapper can be used for values
-
- >>> elem.clear()
- >>> elem.attrib["{uri}key"] = "{uri}value"
- >>> serialize(elem) # 3.1
- '<ns0:tag xmlns:ns0="uri" ns0:key="{uri}value" />'
-
- >>> elem.clear()
- >>> elem.attrib["{uri}key"] = ET.QName("{uri}value")
- >>> serialize(elem) # 3.2
- '<ns0:tag xmlns:ns0="uri" ns0:key="ns0:value" />'
-
- >>> elem.clear()
- >>> subelem = ET.Element("tag")
- >>> subelem.attrib["{uri1}key"] = ET.QName("{uri2}value")
- >>> elem.append(subelem)
- >>> elem.append(subelem)
- >>> serialize(elem) # 3.3
- '<ns0:tag xmlns:ns0="uri" xmlns:ns1="uri1" xmlns:ns2="uri2"><tag ns1:key="ns2:value" /><tag ns1:key="ns2:value" /></ns0:tag>'
-
- 4) Direct QName tests
-
- >>> str(ET.QName('ns', 'tag'))
- '{ns}tag'
- >>> str(ET.QName('{ns}tag'))
- '{ns}tag'
- >>> q1 = ET.QName('ns', 'tag')
- >>> q2 = ET.QName('ns', 'tag')
- >>> q1 == q2
- True
- >>> q2 = ET.QName('ns', 'other-tag')
- >>> q1 == q2
- False
- >>> q1 == 'ns:tag'
- False
- >>> q1 == '{ns}tag'
- True
- """
-
-def doctype_public():
- """
- Test PUBLIC doctype.
-
- >>> elem = ET.XML('<!DOCTYPE html PUBLIC'
- ... ' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
- ... ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
- ... '<html>text</html>')
-
- """
-
-def xpath_tokenizer(p):
- """
- Test the XPath tokenizer.
-
- >>> # tests from the xml specification
- >>> xpath_tokenizer("*")
- ['*']
- >>> xpath_tokenizer("text()")
- ['text', '()']
- >>> xpath_tokenizer("@name")
- ['@', 'name']
- >>> xpath_tokenizer("@*")
- ['@', '*']
- >>> xpath_tokenizer("para[1]")
- ['para', '[', '1', ']']
- >>> xpath_tokenizer("para[last()]")
- ['para', '[', 'last', '()', ']']
- >>> xpath_tokenizer("*/para")
- ['*', '/', 'para']
- >>> xpath_tokenizer("/doc/chapter[5]/section[2]")
- ['/', 'doc', '/', 'chapter', '[', '5', ']', '/', 'section', '[', '2', ']']
- >>> xpath_tokenizer("chapter//para")
- ['chapter', '//', 'para']
- >>> xpath_tokenizer("//para")
- ['//', 'para']
- >>> xpath_tokenizer("//olist/item")
- ['//', 'olist', '/', 'item']
- >>> xpath_tokenizer(".")
- ['.']
- >>> xpath_tokenizer(".//para")
- ['.', '//', 'para']
- >>> xpath_tokenizer("..")
- ['..']
- >>> xpath_tokenizer("../@lang")
- ['..', '/', '@', 'lang']
- >>> xpath_tokenizer("chapter[title]")
- ['chapter', '[', 'title', ']']
- >>> xpath_tokenizer("employee[@secretary and @assistant]")
- ['employee', '[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']']
-
- >>> # additional tests
- >>> xpath_tokenizer("{http://spam}egg")
- ['{http://spam}egg']
- >>> xpath_tokenizer("./spam.egg")
- ['.', '/', 'spam.egg']
- >>> xpath_tokenizer(".//{http://spam}egg")
- ['.', '//', '{http://spam}egg']
- """
- from xml.etree import ElementPath
- out = []
- for op, tag in ElementPath.xpath_tokenizer(p):
- out.append(op or tag)
- return out
-
-def processinginstruction():
- """
- Test ProcessingInstruction directly
-
- >>> ET.tostring(ET.ProcessingInstruction('test', 'instruction'))
- '<?test instruction?>'
- >>> ET.tostring(ET.PI('test', 'instruction'))
- '<?test instruction?>'
-
- Issue #2746
-
- >>> ET.tostring(ET.PI('test', '<testing&>'))
- '<?test <testing&>?>'
- >>> ET.tostring(ET.PI('test', u'<testing&>\xe3'), 'latin1')
- "<?xml version='1.0' encoding='latin1'?>\\n<?test <testing&>\\xe3?>"
- """
#
# xinclude tests (samples from appendix C of the xinclude specification)
@@ -1331,96 +1079,6 @@ XINCLUDE["default.xml"] = """\
</document>
""".format(cgi.escape(SIMPLE_XMLFILE, True))
-def xinclude_loader(href, parse="xml", encoding=None):
- try:
- data = XINCLUDE[href]
- except KeyError:
- raise IOError("resource not found")
- if parse == "xml":
- from xml.etree.ElementTree import XML
- return XML(data)
- return data
-
-def xinclude():
- r"""
- Basic inclusion example (XInclude C.1)
-
- >>> from xml.etree import ElementTree as ET
- >>> from xml.etree import ElementInclude
-
- >>> document = xinclude_loader("C1.xml")
- >>> ElementInclude.include(document, xinclude_loader)
- >>> print serialize(document) # C1
- <document>
- <p>120 Mz is adequate for an average home user.</p>
- <disclaimer>
- <p>The opinions represented herein represent those of the individual
- and should not be interpreted as official policy endorsed by this
- organization.</p>
- </disclaimer>
- </document>
-
- Textual inclusion example (XInclude C.2)
-
- >>> document = xinclude_loader("C2.xml")
- >>> ElementInclude.include(document, xinclude_loader)
- >>> print serialize(document) # C2
- <document>
- <p>This document has been accessed
- 324387 times.</p>
- </document>
-
- Textual inclusion after sibling element (based on modified XInclude C.2)
-
- >>> document = xinclude_loader("C2b.xml")
- >>> ElementInclude.include(document, xinclude_loader)
- >>> print(serialize(document)) # C2b
- <document>
- <p>This document has been <em>accessed</em>
- 324387 times.</p>
- </document>
-
- Textual inclusion of XML example (XInclude C.3)
-
- >>> document = xinclude_loader("C3.xml")
- >>> ElementInclude.include(document, xinclude_loader)
- >>> print serialize(document) # C3
- <document>
- <p>The following is the source of the "data.xml" resource:</p>
- <example>&lt;?xml version='1.0'?&gt;
- &lt;data&gt;
- &lt;item&gt;&lt;![CDATA[Brooks &amp; Shields]]&gt;&lt;/item&gt;
- &lt;/data&gt;
- </example>
- </document>
-
- Fallback example (XInclude C.5)
- Note! Fallback support is not yet implemented
-
- >>> document = xinclude_loader("C5.xml")
- >>> ElementInclude.include(document, xinclude_loader)
- Traceback (most recent call last):
- IOError: resource not found
- >>> # print serialize(document) # C5
- """
-
-def xinclude_default():
- """
- >>> from xml.etree import ElementInclude
-
- >>> document = xinclude_loader("default.xml")
- >>> ElementInclude.include(document)
- >>> print serialize(document) # default
- <document>
- <p>Example.</p>
- <root>
- <element key="value">text</element>
- <element>text</element>tail
- <empty-element />
- </root>
- </document>
- """
-
#
# badly formatted xi:include tags
@@ -1441,490 +1099,1659 @@ XINCLUDE_BAD["B2.xml"] = """\
</div>
"""
-def xinclude_failures():
- r"""
- Test failure to locate included XML file.
-
- >>> from xml.etree import ElementInclude
-
- >>> def none_loader(href, parser, encoding=None):
- ... return None
-
- >>> document = ET.XML(XINCLUDE["C1.xml"])
- >>> ElementInclude.include(document, loader=none_loader)
- Traceback (most recent call last):
- FatalIncludeError: cannot load 'disclaimer.xml' as 'xml'
-
- Test failure to locate included text file.
-
- >>> document = ET.XML(XINCLUDE["C2.xml"])
- >>> ElementInclude.include(document, loader=none_loader)
- Traceback (most recent call last):
- FatalIncludeError: cannot load 'count.txt' as 'text'
-
- Test bad parse type.
-
- >>> document = ET.XML(XINCLUDE_BAD["B1.xml"])
- >>> ElementInclude.include(document, loader=none_loader)
- Traceback (most recent call last):
- FatalIncludeError: unknown parse type in xi:include tag ('BAD_TYPE')
-
- Test xi:fallback outside xi:include.
-
- >>> document = ET.XML(XINCLUDE_BAD["B2.xml"])
- >>> ElementInclude.include(document, loader=none_loader)
- Traceback (most recent call last):
- FatalIncludeError: xi:fallback tag must be child of xi:include ('{http://www.w3.org/2001/XInclude}fallback')
- """
+class XIncludeTest(unittest.TestCase):
+
+ def xinclude_loader(self, href, parse="xml", encoding=None):
+ try:
+ data = XINCLUDE[href]
+ except KeyError:
+ raise IOError("resource not found")
+ if parse == "xml":
+ data = ET.XML(data)
+ return data
+
+ def none_loader(self, href, parser, encoding=None):
+ return None
+
+ def test_xinclude_default(self):
+ from xml.etree import ElementInclude
+ doc = self.xinclude_loader('default.xml')
+ ElementInclude.include(doc)
+ self.assertEqual(serialize(doc),
+ '<document>\n'
+ ' <p>Example.</p>\n'
+ ' <root>\n'
+ ' <element key="value">text</element>\n'
+ ' <element>text</element>tail\n'
+ ' <empty-element />\n'
+ '</root>\n'
+ '</document>')
+
+ def test_xinclude(self):
+ from xml.etree import ElementInclude
+
+ # Basic inclusion example (XInclude C.1)
+ document = self.xinclude_loader("C1.xml")
+ ElementInclude.include(document, self.xinclude_loader)
+ self.assertEqual(serialize(document),
+ '<document>\n'
+ ' <p>120 Mz is adequate for an average home user.</p>\n'
+ ' <disclaimer>\n'
+ ' <p>The opinions represented herein represent those of the individual\n'
+ ' and should not be interpreted as official policy endorsed by this\n'
+ ' organization.</p>\n'
+ '</disclaimer>\n'
+ '</document>') # C1
+
+ # Textual inclusion example (XInclude C.2)
+ document = self.xinclude_loader("C2.xml")
+ ElementInclude.include(document, self.xinclude_loader)
+ self.assertEqual(serialize(document),
+ '<document>\n'
+ ' <p>This document has been accessed\n'
+ ' 324387 times.</p>\n'
+ '</document>') # C2
+
+ # Textual inclusion after sibling element (based on modified XInclude C.2)
+ document = self.xinclude_loader("C2b.xml")
+ ElementInclude.include(document, self.xinclude_loader)
+ self.assertEqual(serialize(document),
+ '<document>\n'
+ ' <p>This document has been <em>accessed</em>\n'
+ ' 324387 times.</p>\n'
+ '</document>') # C2b
+
+ # Textual inclusion of XML example (XInclude C.3)
+ document = self.xinclude_loader("C3.xml")
+ ElementInclude.include(document, self.xinclude_loader)
+ self.assertEqual(serialize(document),
+ '<document>\n'
+ ' <p>The following is the source of the "data.xml" resource:</p>\n'
+ " <example>&lt;?xml version='1.0'?&gt;\n"
+ '&lt;data&gt;\n'
+ ' &lt;item&gt;&lt;![CDATA[Brooks &amp; Shields]]&gt;&lt;/item&gt;\n'
+ '&lt;/data&gt;\n'
+ '</example>\n'
+ '</document>') # C3
+
+ # Fallback example (XInclude C.5)
+ # Note! Fallback support is not yet implemented
+ document = self.xinclude_loader("C5.xml")
+ with self.assertRaises(IOError) as cm:
+ ElementInclude.include(document, self.xinclude_loader)
+ self.assertEqual(str(cm.exception), 'resource not found')
+ self.assertEqual(serialize(document),
+ '<div xmlns:ns0="http://www.w3.org/2001/XInclude">\n'
+ ' <ns0:include href="example.txt" parse="text">\n'
+ ' <ns0:fallback>\n'
+ ' <ns0:include href="fallback-example.txt" parse="text">\n'
+ ' <ns0:fallback><a href="mailto:bob@example.org">Report error</a></ns0:fallback>\n'
+ ' </ns0:include>\n'
+ ' </ns0:fallback>\n'
+ ' </ns0:include>\n'
+ '</div>') # C5
+
+ def test_xinclude_failures(self):
+ from xml.etree import ElementInclude
+
+ # Test failure to locate included XML file.
+ document = ET.XML(XINCLUDE["C1.xml"])
+ with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
+ ElementInclude.include(document, loader=self.none_loader)
+ self.assertEqual(str(cm.exception),
+ "cannot load 'disclaimer.xml' as 'xml'")
+
+ # Test failure to locate included text file.
+ document = ET.XML(XINCLUDE["C2.xml"])
+ with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
+ ElementInclude.include(document, loader=self.none_loader)
+ self.assertEqual(str(cm.exception),
+ "cannot load 'count.txt' as 'text'")
+
+ # Test bad parse type.
+ document = ET.XML(XINCLUDE_BAD["B1.xml"])
+ with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
+ ElementInclude.include(document, loader=self.none_loader)
+ self.assertEqual(str(cm.exception),
+ "unknown parse type in xi:include tag ('BAD_TYPE')")
+
+ # Test xi:fallback outside xi:include.
+ document = ET.XML(XINCLUDE_BAD["B2.xml"])
+ with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
+ ElementInclude.include(document, loader=self.none_loader)
+ self.assertEqual(str(cm.exception),
+ "xi:fallback tag must be child of xi:include "
+ "('{http://www.w3.org/2001/XInclude}fallback')")
# --------------------------------------------------------------------
# reported bugs
-def bug_xmltoolkit21():
- """
-
- marshaller gives obscure errors for non-string values
-
- >>> elem = ET.Element(123)
- >>> serialize(elem) # tag
- Traceback (most recent call last):
- TypeError: cannot serialize 123 (type int)
- >>> elem = ET.Element("elem")
- >>> elem.text = 123
- >>> serialize(elem) # text
- Traceback (most recent call last):
- TypeError: cannot serialize 123 (type int)
- >>> elem = ET.Element("elem")
- >>> elem.tail = 123
- >>> serialize(elem) # tail
- Traceback (most recent call last):
- TypeError: cannot serialize 123 (type int)
- >>> elem = ET.Element("elem")
- >>> elem.set(123, "123")
- >>> serialize(elem) # attribute key
- Traceback (most recent call last):
- TypeError: cannot serialize 123 (type int)
- >>> elem = ET.Element("elem")
- >>> elem.set("123", 123)
- >>> serialize(elem) # attribute value
- Traceback (most recent call last):
- TypeError: cannot serialize 123 (type int)
-
- """
-
-def bug_xmltoolkit25():
- """
+class BugsTest(unittest.TestCase):
- typo in ElementTree.findtext
+ def test_bug_xmltoolkit21(self):
+ # marshaller gives obscure errors for non-string values
- >>> elem = ET.XML(SAMPLE_XML)
- >>> tree = ET.ElementTree(elem)
- >>> tree.findtext("tag")
- 'text'
- >>> tree.findtext("section/tag")
- 'subtext'
+ def check(elem):
+ with self.assertRaises(TypeError) as cm:
+ serialize(elem)
+ self.assertEqual(str(cm.exception),
+ 'cannot serialize 123 (type int)')
- """
+ elem = ET.Element(123)
+ check(elem) # tag
-def bug_xmltoolkit28():
- """
+ elem = ET.Element("elem")
+ elem.text = 123
+ check(elem) # text
- .//tag causes exceptions
+ elem = ET.Element("elem")
+ elem.tail = 123
+ check(elem) # tail
- >>> tree = ET.XML("<doc><table><tbody/></table></doc>")
- >>> summarize_list(tree.findall(".//thead"))
- []
- >>> summarize_list(tree.findall(".//tbody"))
- ['tbody']
+ elem = ET.Element("elem")
+ elem.set(123, "123")
+ check(elem) # attribute key
- """
+ elem = ET.Element("elem")
+ elem.set("123", 123)
+ check(elem) # attribute value
-def bug_xmltoolkitX1():
- """
+ def test_bug_xmltoolkit25(self):
+ # typo in ElementTree.findtext
- dump() doesn't flush the output buffer
+ elem = ET.XML(SAMPLE_XML)
+ tree = ET.ElementTree(elem)
+ self.assertEqual(tree.findtext("tag"), 'text')
+ self.assertEqual(tree.findtext("section/tag"), 'subtext')
- >>> tree = ET.XML("<doc><table><tbody/></table></doc>")
- >>> ET.dump(tree); sys.stdout.write("tail")
- <doc><table><tbody /></table></doc>
- tail
+ def test_bug_xmltoolkit28(self):
+ # .//tag causes exceptions
- """
+ tree = ET.XML("<doc><table><tbody/></table></doc>")
+ self.assertEqual(summarize_list(tree.findall(".//thead")), [])
+ self.assertEqual(summarize_list(tree.findall(".//tbody")), ['tbody'])
-def bug_xmltoolkit39():
- """
+ def test_bug_xmltoolkitX1(self):
+ # dump() doesn't flush the output buffer
- non-ascii element and attribute names doesn't work
+ tree = ET.XML("<doc><table><tbody/></table></doc>")
+ with support.captured_stdout() as stdout:
+ ET.dump(tree)
+ self.assertEqual(stdout.getvalue(), '<doc><table><tbody /></table></doc>\n')
- >>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g />")
- >>> ET.tostring(tree, "utf-8")
- '<t\\xc3\\xa4g />'
+ def test_bug_xmltoolkit39(self):
+ # non-ascii element and attribute names doesn't work
- >>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><tag \xe4ttr='v&#228;lue' />")
- >>> tree.attrib
- {u'\\xe4ttr': u'v\\xe4lue'}
- >>> ET.tostring(tree, "utf-8")
- '<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />'
+ tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g />")
+ self.assertEqual(ET.tostring(tree, "utf-8"), b'<t\xc3\xa4g />')
- >>> tree = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g>text</t\xe4g>")
- >>> ET.tostring(tree, "utf-8")
- '<t\\xc3\\xa4g>text</t\\xc3\\xa4g>'
+ tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
+ b"<tag \xe4ttr='v&#228;lue' />")
+ self.assertEqual(tree.attrib, {u'\xe4ttr': u'v\xe4lue'})
+ self.assertEqual(ET.tostring(tree, "utf-8"),
+ b'<tag \xc3\xa4ttr="v\xc3\xa4lue" />')
- >>> tree = ET.Element(u"t\u00e4g")
- >>> ET.tostring(tree, "utf-8")
- '<t\\xc3\\xa4g />'
+ tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
+ b'<t\xe4g>text</t\xe4g>')
+ self.assertEqual(ET.tostring(tree, "utf-8"),
+ b'<t\xc3\xa4g>text</t\xc3\xa4g>')
- >>> tree = ET.Element("tag")
- >>> tree.set(u"\u00e4ttr", u"v\u00e4lue")
- >>> ET.tostring(tree, "utf-8")
- '<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />'
+ tree = ET.Element(u"t\u00e4g")
+ self.assertEqual(ET.tostring(tree, "utf-8"), b'<t\xc3\xa4g />')
- """
+ tree = ET.Element("tag")
+ tree.set(u"\u00e4ttr", u"v\u00e4lue")
+ self.assertEqual(ET.tostring(tree, "utf-8"),
+ b'<tag \xc3\xa4ttr="v\xc3\xa4lue" />')
-def bug_xmltoolkit54():
- """
+ def test_bug_xmltoolkit54(self):
+ # problems handling internally defined entities
- problems handling internally defined entities
+ e = ET.XML("<!DOCTYPE doc [<!ENTITY ldots '&#x8230;'>]>"
+ '<doc>&ldots;</doc>')
+ self.assertEqual(serialize(e), '<doc>&#33328;</doc>')
- >>> e = ET.XML("<!DOCTYPE doc [<!ENTITY ldots '&#x8230;'>]><doc>&ldots;</doc>")
- >>> serialize(e)
- '<doc>&#33328;</doc>'
+ def test_bug_xmltoolkit55(self):
+ # make sure we're reporting the first error, not the last
- """
+ with self.assertRaises(ET.ParseError) as cm:
+ ET.XML("<!DOCTYPE doc SYSTEM 'doc.dtd'>"
+ '<doc>&ldots;&ndots;&rdots;</doc>')
+ self.assertEqual(str(cm.exception),
+ 'undefined entity &ldots;: line 1, column 36')
-def bug_xmltoolkit55():
- """
+ def test_bug_xmltoolkit60(self):
+ # Handle crash in stream source.
- make sure we're reporting the first error, not the last
+ class ExceptionFile:
+ def read(self, x):
+ raise IOError
- >>> e = ET.XML("<!DOCTYPE doc SYSTEM 'doc.dtd'><doc>&ldots;&ndots;&rdots;</doc>")
- Traceback (most recent call last):
- ParseError: undefined entity &ldots;: line 1, column 36
+ self.assertRaises(IOError, ET.parse, ExceptionFile())
- """
+ def test_bug_xmltoolkit62(self):
+ # Don't crash when using custom entities.
-class ExceptionFile:
- def read(self, x):
- raise IOError
-
-def xmltoolkit60():
- """
-
- Handle crash in stream source.
- >>> tree = ET.parse(ExceptionFile())
- Traceback (most recent call last):
- IOError
-
- """
-
-XMLTOOLKIT62_DOC = """<?xml version="1.0" encoding="UTF-8"?>
+ ENTITIES = {u'rsquo': u'\u2019', u'lsquo': u'\u2018'}
+ parser = ET.XMLTreeBuilder()
+ parser.entity.update(ENTITIES)
+ parser.feed("""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE patent-application-publication SYSTEM "pap-v15-2001-01-31.dtd" []>
<patent-application-publication>
<subdoc-abstract>
<paragraph id="A-0001" lvl="0">A new cultivar of Begonia plant named &lsquo;BCT9801BEG&rsquo;.</paragraph>
</subdoc-abstract>
-</patent-application-publication>"""
-
-
-def xmltoolkit62():
- """
+</patent-application-publication>""")
+ t = parser.close()
+ self.assertEqual(t.find('.//paragraph').text,
+ u'A new cultivar of Begonia plant named \u2018BCT9801BEG\u2019.')
+
+ @unittest.skipIf(sys.gettrace(), "Skips under coverage.")
+ @support.impl_detail("reference counting")
+ def test_bug_xmltoolkit63(self):
+ # Check reference leak.
+ def xmltoolkit63():
+ tree = ET.TreeBuilder()
+ tree.start("tag", {})
+ tree.data("text")
+ tree.end("tag")
+
+ xmltoolkit63()
+ count = sys.getrefcount(None)
+ for i in range(1000):
+ xmltoolkit63()
+ self.assertEqual(sys.getrefcount(None), count)
+
+ def test_bug_200708_newline(self):
+ # Preserve newlines in attributes.
+
+ e = ET.Element('SomeTag', text="def _f():\n return 3\n")
+ self.assertEqual(ET.tostring(e),
+ b'<SomeTag text="def _f():&#10; return 3&#10;" />')
+ self.assertEqual(ET.XML(ET.tostring(e)).get("text"),
+ 'def _f():\n return 3\n')
+ self.assertEqual(ET.tostring(ET.XML(ET.tostring(e))),
+ b'<SomeTag text="def _f():&#10; return 3&#10;" />')
+
+ def test_bug_200708_close(self):
+ # Test default builder.
+ parser = ET.XMLParser() # default
+ parser.feed("<element>some text</element>")
+ self.assertEqual(parser.close().tag, 'element')
+
+ # Test custom builder.
+ class EchoTarget:
+ def start(self, tag, attrib):
+ pass
+ def end(self, tag):
+ pass
+ def data(self, text):
+ pass
+ def close(self):
+ return ET.Element("element") # simulate root
+ parser = ET.XMLParser(target=EchoTarget())
+ parser.feed("<element>some text</element>")
+ self.assertEqual(parser.close().tag, 'element')
+
+ def test_bug_200709_default_namespace(self):
+ e = ET.Element("{default}elem")
+ s = ET.SubElement(e, "{default}elem")
+ self.assertEqual(serialize(e, default_namespace="default"), # 1
+ '<elem xmlns="default"><elem /></elem>')
+
+ e = ET.Element("{default}elem")
+ s = ET.SubElement(e, "{default}elem")
+ s = ET.SubElement(e, "{not-default}elem")
+ self.assertEqual(serialize(e, default_namespace="default"), # 2
+ '<elem xmlns="default" xmlns:ns1="not-default">'
+ '<elem />'
+ '<ns1:elem />'
+ '</elem>')
+
+ e = ET.Element("{default}elem")
+ s = ET.SubElement(e, "{default}elem")
+ s = ET.SubElement(e, "elem") # unprefixed name
+ with self.assertRaises(ValueError) as cm:
+ serialize(e, default_namespace="default") # 3
+ self.assertEqual(str(cm.exception),
+ 'cannot use non-qualified names with default_namespace option')
+
+ def test_bug_200709_register_namespace(self):
+ e = ET.Element("{http://namespace.invalid/does/not/exist/}title")
+ self.assertEqual(ET.tostring(e),
+ '<ns0:title xmlns:ns0="http://namespace.invalid/does/not/exist/" />')
+ ET.register_namespace("foo", "http://namespace.invalid/does/not/exist/")
+ e = ET.Element("{http://namespace.invalid/does/not/exist/}title")
+ self.assertEqual(ET.tostring(e),
+ '<foo:title xmlns:foo="http://namespace.invalid/does/not/exist/" />')
+
+ # And the Dublin Core namespace is in the default list:
+
+ e = ET.Element("{http://purl.org/dc/elements/1.1/}title")
+ self.assertEqual(ET.tostring(e),
+ '<dc:title xmlns:dc="http://purl.org/dc/elements/1.1/" />')
+
+ def test_bug_200709_element_comment(self):
+ # Not sure if this can be fixed, really (since the serializer needs
+ # ET.Comment, not cET.comment).
+
+ a = ET.Element('a')
+ a.append(ET.Comment('foo'))
+ self.assertEqual(a[0].tag, ET.Comment)
+
+ a = ET.Element('a')
+ a.append(ET.PI('foo'))
+ self.assertEqual(a[0].tag, ET.PI)
+
+ def test_bug_200709_element_insert(self):
+ a = ET.Element('a')
+ b = ET.SubElement(a, 'b')
+ c = ET.SubElement(a, 'c')
+ d = ET.Element('d')
+ a.insert(0, d)
+ self.assertEqual(summarize_list(a), ['d', 'b', 'c'])
+ a.insert(-1, d)
+ self.assertEqual(summarize_list(a), ['d', 'b', 'd', 'c'])
+
+ def test_bug_200709_iter_comment(self):
+ a = ET.Element('a')
+ b = ET.SubElement(a, 'b')
+ comment_b = ET.Comment("TEST-b")
+ b.append(comment_b)
+ self.assertEqual(summarize_list(a.iter(ET.Comment)), [ET.Comment])
+
+ # --------------------------------------------------------------------
+ # reported on bugs.python.org
+
+ def test_bug_1534630(self):
+ bob = ET.TreeBuilder()
+ e = bob.data("data")
+ e = bob.start("tag", {})
+ e = bob.end("tag")
+ e = bob.close()
+ self.assertEqual(serialize(e), '<tag />')
+
+ def test_issue6233(self):
+ e = ET.XML(b"<?xml version='1.0' encoding='utf-8'?>"
+ b'<body>t\xc3\xa3g</body>')
+ self.assertEqual(ET.tostring(e, 'ascii'),
+ b"<?xml version='1.0' encoding='ascii'?>\n"
+ b'<body>t&#227;g</body>')
+ e = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
+ b'<body>t\xe3g</body>')
+ self.assertEqual(ET.tostring(e, 'ascii'),
+ b"<?xml version='1.0' encoding='ascii'?>\n"
+ b'<body>t&#227;g</body>')
+
+ def test_issue3151(self):
+ e = ET.XML('<prefix:localname xmlns:prefix="${stuff}"/>')
+ self.assertEqual(e.tag, '{${stuff}}localname')
+ t = ET.ElementTree(e)
+ self.assertEqual(ET.tostring(e), b'<ns0:localname xmlns:ns0="${stuff}" />')
+
+ def test_issue6565(self):
+ elem = ET.XML("<body><tag/></body>")
+ self.assertEqual(summarize_list(elem), ['tag'])
+ newelem = ET.XML(SAMPLE_XML)
+ elem[:] = newelem[:]
+ self.assertEqual(summarize_list(elem), ['tag', 'tag', 'section'])
+
+ def test_issue10777(self):
+ # Registering a namespace twice caused a "dictionary changed size during
+ # iteration" bug.
+
+ ET.register_namespace('test10777', 'http://myuri/')
+ ET.register_namespace('test10777', 'http://myuri/')
+
+ def check_expat224_utf8_bug(self, text):
+ xml = b'<a b="%s"/>' % text
+ root = ET.XML(xml)
+ self.assertEqual(root.get('b'), text.decode('utf-8'))
+
+ def test_expat224_utf8_bug(self):
+ # bpo-31170: Expat 2.2.3 had a bug in its UTF-8 decoder.
+ # Check that Expat 2.2.4 fixed the bug.
+ #
+ # Test buffer bounds at odd and even positions.
+
+ text = b'\xc3\xa0' * 1024
+ self.check_expat224_utf8_bug(text)
+
+ text = b'x' + b'\xc3\xa0' * 1024
+ self.check_expat224_utf8_bug(text)
+
+ def test_expat224_utf8_bug_file(self):
+ with open(UTF8_BUG_XMLFILE, 'rb') as fp:
+ raw = fp.read()
+ root = ET.fromstring(raw)
+ xmlattr = root.get('b')
+
+ # "Parse" manually the XML file to extract the value of the 'b'
+ # attribute of the <a b='xxx' /> XML element
+ text = raw.decode('utf-8').strip()
+ text = text.replace('\r\n', ' ')
+ text = text[6:-4]
+ self.assertEqual(root.get('b'), text)
- Don't crash when using custom entities.
-
- >>> xmltoolkit62()
- u'A new cultivar of Begonia plant named \u2018BCT9801BEG\u2019.'
-
- """
- ENTITIES = {u'rsquo': u'\u2019', u'lsquo': u'\u2018'}
- parser = ET.XMLTreeBuilder()
- parser.entity.update(ENTITIES)
- parser.feed(XMLTOOLKIT62_DOC)
- t = parser.close()
- return t.find('.//paragraph').text
-
-def xmltoolkit63():
- """
-
- Check reference leak.
- >>> xmltoolkit63()
- >>> count = sys.getrefcount(None) #doctest: +SKIP
- >>> for i in range(1000):
- ... xmltoolkit63()
- >>> sys.getrefcount(None) - count #doctest: +SKIP
- 0
-
- """
- tree = ET.TreeBuilder()
- tree.start("tag", {})
- tree.data("text")
- tree.end("tag")
# --------------------------------------------------------------------
-def bug_200708_newline():
- r"""
-
- Preserve newlines in attributes.
-
- >>> e = ET.Element('SomeTag', text="def _f():\n return 3\n")
- >>> ET.tostring(e)
- '<SomeTag text="def _f():&#10; return 3&#10;" />'
- >>> ET.XML(ET.tostring(e)).get("text")
- 'def _f():\n return 3\n'
- >>> ET.tostring(ET.XML(ET.tostring(e)))
- '<SomeTag text="def _f():&#10; return 3&#10;" />'
-
- """
-
-def bug_200708_close():
- """
-
- Test default builder.
- >>> parser = ET.XMLParser() # default
- >>> parser.feed("<element>some text</element>")
- >>> summarize(parser.close())
- 'element'
-
- Test custom builder.
- >>> class EchoTarget:
- ... def close(self):
- ... return ET.Element("element") # simulate root
- >>> parser = ET.XMLParser(EchoTarget())
- >>> parser.feed("<element>some text</element>")
- >>> summarize(parser.close())
- 'element'
-
- """
-
-def bug_200709_default_namespace():
- """
+class BasicElementTest(unittest.TestCase):
+ @python_only
+ def test_cyclic_gc(self):
+ class Dummy:
+ pass
+
+ # Test the shortest cycle: d->element->d
+ d = Dummy()
+ d.dummyref = ET.Element('joe', attr=d)
+ wref = weakref.ref(d)
+ del d
+ gc_collect()
+ self.assertIsNone(wref())
+
+ # A longer cycle: d->e->e2->d
+ e = ET.Element('joe')
+ d = Dummy()
+ d.dummyref = e
+ wref = weakref.ref(d)
+ e2 = ET.SubElement(e, 'foo', attr=d)
+ del d, e, e2
+ gc_collect()
+ self.assertIsNone(wref())
+
+ # A cycle between Element objects as children of one another
+ # e1->e2->e3->e1
+ e1 = ET.Element('e1')
+ e2 = ET.Element('e2')
+ e3 = ET.Element('e3')
+ e1.append(e2)
+ e2.append(e2)
+ e3.append(e1)
+ wref = weakref.ref(e1)
+ del e1, e2, e3
+ gc_collect()
+ self.assertIsNone(wref())
+
+ @python_only
+ def test_weakref(self):
+ flag = []
+ def wref_cb(w):
+ flag.append(True)
+ e = ET.Element('e')
+ wref = weakref.ref(e, wref_cb)
+ self.assertEqual(wref().tag, 'e')
+ del e
+ gc_collect()
+ self.assertEqual(flag, [True])
+ self.assertEqual(wref(), None)
+
+ @python_only
+ def test_get_keyword_args(self):
+ e1 = ET.Element('foo' , x=1, y=2, z=3)
+ self.assertEqual(e1.get('x', default=7), 1)
+ self.assertEqual(e1.get('w', default=7), 7)
+
+
+class BadElementTest(unittest.TestCase):
+ def test_extend_mutable_list(self):
+ class X(object):
+ @property
+ def __class__(self):
+ L[:] = [ET.Element('baz')]
+ return ET.Element
+ L = [X()]
+ e = ET.Element('foo')
+ try:
+ e.extend(L)
+ except TypeError:
+ pass
+
+ if ET is pyET:
+ class Y(X, ET.Element):
+ pass
+ L = [Y('x')]
+ e = ET.Element('foo')
+ e.extend(L)
+
+ def test_extend_mutable_list2(self):
+ class X(object):
+ @property
+ def __class__(self):
+ del L[:]
+ return ET.Element
+ L = [X(), ET.Element('baz')]
+ e = ET.Element('foo')
+ try:
+ e.extend(L)
+ except TypeError:
+ pass
+
+ if ET is pyET:
+ class Y(X, ET.Element):
+ pass
+ L = [Y('bar'), ET.Element('baz')]
+ e = ET.Element('foo')
+ e.extend(L)
+
+ @python_only
+ def test_remove_with_mutating(self):
+ class X(ET.Element):
+ def __eq__(self, o):
+ del e[:]
+ return False
+ __hash__ = object.__hash__
+ e = ET.Element('foo')
+ e.extend([X('bar')])
+ self.assertRaises(ValueError, e.remove, ET.Element('baz'))
+
+ e = ET.Element('foo')
+ e.extend([ET.Element('bar')])
+ self.assertRaises(ValueError, e.remove, X('baz'))
+
+ def test_recursive_repr(self):
+ # Issue #25455
+ e = ET.Element('foo')
+ with swap_attr(e, 'tag', e):
+ with self.assertRaises(RuntimeError):
+ repr(e) # Should not crash
+
+ def test_element_get_text(self):
+ # Issue #27863
+ class X(str):
+ def __del__(self):
+ try:
+ elem.text
+ except NameError:
+ pass
+
+ b = ET.TreeBuilder()
+ b.start('tag', {})
+ b.data('ABCD')
+ b.data(X('EFGH'))
+ b.data('IJKL')
+ b.end('tag')
+
+ elem = b.close()
+ self.assertEqual(elem.text, 'ABCDEFGHIJKL')
+
+ def test_element_get_tail(self):
+ # Issue #27863
+ class X(str):
+ def __del__(self):
+ try:
+ elem[0].tail
+ except NameError:
+ pass
+
+ b = ET.TreeBuilder()
+ b.start('root', {})
+ b.start('tag', {})
+ b.end('tag')
+ b.data('ABCD')
+ b.data(X('EFGH'))
+ b.data('IJKL')
+ b.end('root')
+
+ elem = b.close()
+ self.assertEqual(elem[0].tail, 'ABCDEFGHIJKL')
+
+ def test_element_iter(self):
+ # Issue #27863
+ e = ET.Element('tag')
+ e.extend([None]) # non-Element
+
+ it = e.iter()
+ self.assertIs(next(it), e)
+ self.assertRaises((AttributeError, TypeError), list, it)
+
+ def test_subscr(self):
+ # Issue #27863
+ class X:
+ def __index__(self):
+ del e[:]
+ return 1
+
+ e = ET.Element('elem')
+ e.append(ET.Element('child'))
+ e[:X()] # shouldn't crash
+
+ e.append(ET.Element('child'))
+ e[0:10:X()] # shouldn't crash
+
+ def test_ass_subscr(self):
+ # Issue #27863
+ class X:
+ def __index__(self):
+ e[:] = []
+ return 1
+
+ e = ET.Element('elem')
+ for _ in range(10):
+ e.insert(0, ET.Element('child'))
+
+ e[0:10:X()] = [] # shouldn't crash
+
+
+class MutatingElementPath(str):
+ def __new__(cls, elem, *args):
+ self = str.__new__(cls, *args)
+ self.elem = elem
+ return self
+ def __eq__(self, o):
+ del self.elem[:]
+ return True
+ __hash__ = str.__hash__
+
+class BadElementPath(str):
+ def __eq__(self, o):
+ raise 1.0/0.0
+ __hash__ = str.__hash__
+
+class BadElementPathTest(unittest.TestCase):
+ def setUp(self):
+ super(BadElementPathTest, self).setUp()
+ from xml.etree import ElementPath
+ self.path_cache = ElementPath._cache
+ ElementPath._cache = {}
+
+ def tearDown(self):
+ from xml.etree import ElementPath
+ ElementPath._cache = self.path_cache
+ super(BadElementPathTest, self).tearDown()
+
+ def test_find_with_mutating(self):
+ e = ET.Element('foo')
+ e.extend([ET.Element('bar')])
+ e.find(MutatingElementPath(e, 'x'))
+
+ def test_find_with_error(self):
+ e = ET.Element('foo')
+ e.extend([ET.Element('bar')])
+ try:
+ e.find(BadElementPath('x'))
+ except ZeroDivisionError:
+ pass
+
+ def test_findtext_with_mutating(self):
+ e = ET.Element('foo')
+ e.extend([ET.Element('bar')])
+ e.findtext(MutatingElementPath(e, 'x'))
+
+ def test_findtext_with_error(self):
+ e = ET.Element('foo')
+ e.extend([ET.Element('bar')])
+ try:
+ e.findtext(BadElementPath('x'))
+ except ZeroDivisionError:
+ pass
+
+ def test_findall_with_mutating(self):
+ e = ET.Element('foo')
+ e.extend([ET.Element('bar')])
+ e.findall(MutatingElementPath(e, 'x'))
+
+ def test_findall_with_error(self):
+ e = ET.Element('foo')
+ e.extend([ET.Element('bar')])
+ try:
+ e.findall(BadElementPath('x'))
+ except ZeroDivisionError:
+ pass
+
+
+class ElementTreeTypeTest(unittest.TestCase):
+ def test_istype(self):
+ self.assertIsInstance(ET.ParseError, type)
+ self.assertIsInstance(ET.QName, type)
+ self.assertIsInstance(ET.ElementTree, type)
+ if ET is pyET:
+ self.assertIsInstance(ET.Element, type)
+ self.assertIsInstance(ET.TreeBuilder, type)
+ self.assertIsInstance(ET.XMLParser, type)
+
+ @python_only
+ def test_Element_subclass_trivial(self):
+ class MyElement(ET.Element):
+ pass
+
+ mye = MyElement('foo')
+ self.assertIsInstance(mye, ET.Element)
+ self.assertIsInstance(mye, MyElement)
+ self.assertEqual(mye.tag, 'foo')
+
+ # test that attribute assignment works (issue 14849)
+ mye.text = "joe"
+ self.assertEqual(mye.text, "joe")
+
+ @python_only
+ def test_Element_subclass_constructor(self):
+ class MyElement(ET.Element):
+ def __init__(self, tag, attrib={}, **extra):
+ super(MyElement, self).__init__(tag + '__', attrib, **extra)
+
+ mye = MyElement('foo', {'a': 1, 'b': 2}, c=3, d=4)
+ self.assertEqual(mye.tag, 'foo__')
+ self.assertEqual(sorted(mye.items()),
+ [('a', 1), ('b', 2), ('c', 3), ('d', 4)])
+
+ @python_only
+ def test_Element_subclass_new_method(self):
+ class MyElement(ET.Element):
+ def newmethod(self):
+ return self.tag
+
+ mye = MyElement('joe')
+ self.assertEqual(mye.newmethod(), 'joe')
+
+
+class ElementFindTest(unittest.TestCase):
+ @python_only
+ def test_simplefind(self):
+ ET.ElementPath
+ with swap_attr(ET, 'ElementPath', ET._SimpleElementPath()):
+ e = ET.XML(SAMPLE_XML)
+ self.assertEqual(e.find('tag').tag, 'tag')
+ self.assertEqual(ET.ElementTree(e).find('tag').tag, 'tag')
+ self.assertEqual(e.findtext('tag'), 'text')
+ self.assertIsNone(e.findtext('tog'))
+ self.assertEqual(e.findtext('tog', 'default'), 'default')
+ self.assertEqual(ET.ElementTree(e).findtext('tag'), 'text')
+ self.assertEqual(summarize_list(e.findall('tag')), ['tag', 'tag'])
+ self.assertEqual(summarize_list(e.findall('.//tag')), ['tag', 'tag', 'tag'])
+
+ # Path syntax doesn't work in this case.
+ self.assertIsNone(e.find('section/tag'))
+ self.assertIsNone(e.findtext('section/tag'))
+ self.assertEqual(summarize_list(e.findall('section/tag')), [])
+
+ def test_find_simple(self):
+ e = ET.XML(SAMPLE_XML)
+ self.assertEqual(e.find('tag').tag, 'tag')
+ self.assertEqual(e.find('section/tag').tag, 'tag')
+ self.assertEqual(e.find('./tag').tag, 'tag')
+
+ e[2] = ET.XML(SAMPLE_SECTION)
+ self.assertEqual(e.find('section/nexttag').tag, 'nexttag')
+
+ self.assertEqual(e.findtext('./tag'), 'text')
+ self.assertEqual(e.findtext('section/tag'), 'subtext')
+
+ # section/nexttag is found but has no text
+ self.assertEqual(e.findtext('section/nexttag'), '')
+ self.assertEqual(e.findtext('section/nexttag', 'default'), '')
+
+ # tog doesn't exist and 'default' kicks in
+ self.assertIsNone(e.findtext('tog'))
+ self.assertEqual(e.findtext('tog', 'default'), 'default')
+
+ # Issue #16922
+ self.assertEqual(ET.XML('<tag><empty /></tag>').findtext('empty'), '')
+
+ def test_find_xpath(self):
+ LINEAR_XML = '''
+ <body>
+ <tag class='a'/>
+ <tag class='b'/>
+ <tag class='c'/>
+ <tag class='d'/>
+ </body>'''
+ e = ET.XML(LINEAR_XML)
+
+ # Test for numeric indexing and last()
+ self.assertEqual(e.find('./tag[1]').attrib['class'], 'a')
+ self.assertEqual(e.find('./tag[2]').attrib['class'], 'b')
+ self.assertEqual(e.find('./tag[last()]').attrib['class'], 'd')
+ self.assertEqual(e.find('./tag[last()-1]').attrib['class'], 'c')
+ self.assertEqual(e.find('./tag[last()-2]').attrib['class'], 'b')
+
+ def test_findall(self):
+ e = ET.XML(SAMPLE_XML)
+ e[2] = ET.XML(SAMPLE_SECTION)
+ self.assertEqual(summarize_list(e.findall('.')), ['body'])
+ self.assertEqual(summarize_list(e.findall('tag')), ['tag', 'tag'])
+ self.assertEqual(summarize_list(e.findall('tog')), [])
+ self.assertEqual(summarize_list(e.findall('tog/foo')), [])
+ self.assertEqual(summarize_list(e.findall('*')),
+ ['tag', 'tag', 'section'])
+ self.assertEqual(summarize_list(e.findall('.//tag')),
+ ['tag'] * 4)
+ self.assertEqual(summarize_list(e.findall('section/tag')), ['tag'])
+ self.assertEqual(summarize_list(e.findall('section//tag')), ['tag'] * 2)
+ self.assertEqual(summarize_list(e.findall('section/*')),
+ ['tag', 'nexttag', 'nextsection'])
+ self.assertEqual(summarize_list(e.findall('section//*')),
+ ['tag', 'nexttag', 'nextsection', 'tag'])
+ self.assertEqual(summarize_list(e.findall('section/.//*')),
+ ['tag', 'nexttag', 'nextsection', 'tag'])
+ self.assertEqual(summarize_list(e.findall('*/*')),
+ ['tag', 'nexttag', 'nextsection'])
+ self.assertEqual(summarize_list(e.findall('*//*')),
+ ['tag', 'nexttag', 'nextsection', 'tag'])
+ self.assertEqual(summarize_list(e.findall('*/tag')), ['tag'])
+ self.assertEqual(summarize_list(e.findall('*/./tag')), ['tag'])
+ self.assertEqual(summarize_list(e.findall('./tag')), ['tag'] * 2)
+ self.assertEqual(summarize_list(e.findall('././tag')), ['tag'] * 2)
+
+ self.assertEqual(summarize_list(e.findall('.//tag[@class]')),
+ ['tag'] * 3)
+ self.assertEqual(summarize_list(e.findall('.//tag[@class="a"]')),
+ ['tag'])
+ self.assertEqual(summarize_list(e.findall('.//tag[@class="b"]')),
+ ['tag'] * 2)
+ self.assertEqual(summarize_list(e.findall('.//tag[@id]')),
+ ['tag'])
+ self.assertEqual(summarize_list(e.findall('.//section[tag]')),
+ ['section'])
+ self.assertEqual(summarize_list(e.findall('.//section[element]')), [])
+ self.assertEqual(summarize_list(e.findall('../tag')), [])
+ self.assertEqual(summarize_list(e.findall('section/../tag')),
+ ['tag'] * 2)
+ self.assertEqual(e.findall('section//'), e.findall('section//*'))
+
+ def test_test_find_with_ns(self):
+ e = ET.XML(SAMPLE_XML_NS)
+ self.assertEqual(summarize_list(e.findall('tag')), [])
+ self.assertEqual(
+ summarize_list(e.findall("{http://effbot.org/ns}tag")),
+ ['{http://effbot.org/ns}tag'] * 2)
+ self.assertEqual(
+ summarize_list(e.findall(".//{http://effbot.org/ns}tag")),
+ ['{http://effbot.org/ns}tag'] * 3)
+
+ def test_bad_find(self):
+ e = ET.XML(SAMPLE_XML)
+ with self.assertRaisesRegexp(SyntaxError,
+ 'cannot use absolute path on element'):
+ e.findall('/tag')
+
+ def test_find_through_ElementTree(self):
+ e = ET.XML(SAMPLE_XML)
+ self.assertEqual(ET.ElementTree(e).find('tag').tag, 'tag')
+ self.assertEqual(ET.ElementTree(e).find('./tag').tag, 'tag')
+ # this produces a warning
+ msg = ("This search is broken in 1.3 and earlier, and will be fixed "
+ "in a future version. If you rely on the current behaviour, "
+ "change it to '.+'")
+ with support.check_warnings((msg, FutureWarning)):
+ self.assertEqual(ET.ElementTree(e).find('/tag').tag, 'tag')
+ e[2] = ET.XML(SAMPLE_SECTION)
+ self.assertEqual(ET.ElementTree(e).find('section/tag').tag, 'tag')
+ self.assertIsNone(ET.ElementTree(e).find('tog'))
+ self.assertIsNone(ET.ElementTree(e).find('tog/foo'))
+
+ self.assertEqual(ET.ElementTree(e).findtext('tag'), 'text')
+ self.assertIsNone(ET.ElementTree(e).findtext('tog/foo'))
+ self.assertEqual(ET.ElementTree(e).findtext('tog/foo', 'default'),
+ 'default')
+ self.assertEqual(ET.ElementTree(e).findtext('./tag'), 'text')
+ with support.check_warnings((msg, FutureWarning)):
+ self.assertEqual(ET.ElementTree(e).findtext('/tag'), 'text')
+ self.assertEqual(ET.ElementTree(e).findtext('section/tag'), 'subtext')
+
+ self.assertEqual(summarize_list(ET.ElementTree(e).findall('./tag')),
+ ['tag'] * 2)
+ with support.check_warnings((msg, FutureWarning)):
+ it = ET.ElementTree(e).findall('/tag')
+ self.assertEqual(summarize_list(it), ['tag'] * 2)
+
+
+class ElementIterTest(unittest.TestCase):
+ def _ilist(self, elem, tag=None):
+ return summarize_list(elem.iter(tag))
+
+ def test_basic(self):
+ doc = ET.XML("<html><body>this is a <i>paragraph</i>.</body>..</html>")
+ self.assertEqual(self._ilist(doc), ['html', 'body', 'i'])
+ self.assertEqual(self._ilist(doc.find('body')), ['body', 'i'])
+ self.assertEqual(next(doc.iter()).tag, 'html')
+ self.assertEqual(''.join(doc.itertext()), 'this is a paragraph...')
+ self.assertEqual(''.join(doc.find('body').itertext()),
+ 'this is a paragraph.')
+ self.assertEqual(next(doc.itertext()), 'this is a ')
+
+ # Method iterparse should return an iterator. See bug 6472.
+ sourcefile = serialize(doc, to_string=False)
+ self.assertEqual(next(ET.iterparse(sourcefile))[0], 'end')
+
+ if ET is pyET:
+ # With an explitit parser too (issue #9708)
+ sourcefile = serialize(doc, to_string=False)
+ parser = ET.XMLParser(target=ET.TreeBuilder())
+ self.assertEqual(next(ET.iterparse(sourcefile, parser=parser))[0],
+ 'end')
+
+ tree = ET.ElementTree(None)
+ self.assertRaises(AttributeError, tree.iter)
+
+ # Issue #16913
+ doc = ET.XML("<root>a&amp;<sub>b&amp;</sub>c&amp;</root>")
+ self.assertEqual(''.join(doc.itertext()), 'a&b&c&')
+
+ def test_corners(self):
+ # single root, no subelements
+ a = ET.Element('a')
+ self.assertEqual(self._ilist(a), ['a'])
+
+ # one child
+ b = ET.SubElement(a, 'b')
+ self.assertEqual(self._ilist(a), ['a', 'b'])
+
+ # one child and one grandchild
+ c = ET.SubElement(b, 'c')
+ self.assertEqual(self._ilist(a), ['a', 'b', 'c'])
+
+ # two children, only first with grandchild
+ d = ET.SubElement(a, 'd')
+ self.assertEqual(self._ilist(a), ['a', 'b', 'c', 'd'])
+
+ # replace first child by second
+ a[0] = a[1]
+ del a[1]
+ self.assertEqual(self._ilist(a), ['a', 'd'])
+
+ def test_iter_by_tag(self):
+ doc = ET.XML('''
+ <document>
+ <house>
+ <room>bedroom1</room>
+ <room>bedroom2</room>
+ </house>
+ <shed>nothing here
+ </shed>
+ <house>
+ <room>bedroom8</room>
+ </house>
+ </document>''')
+
+ self.assertEqual(self._ilist(doc, 'room'), ['room'] * 3)
+ self.assertEqual(self._ilist(doc, 'house'), ['house'] * 2)
+
+ if ET is pyET:
+ # test that iter also accepts 'tag' as a keyword arg
+ self.assertEqual(
+ summarize_list(doc.iter(tag='room')),
+ ['room'] * 3)
+
+ # make sure both tag=None and tag='*' return all tags
+ all_tags = ['document', 'house', 'room', 'room',
+ 'shed', 'house', 'room']
+ self.assertEqual(summarize_list(doc.iter()), all_tags)
+ self.assertEqual(self._ilist(doc), all_tags)
+ self.assertEqual(self._ilist(doc, '*'), all_tags)
+
+ def test_getiterator(self):
+ # Element.getiterator() is deprecated.
+ if sys.py3kwarning or ET is pyET:
+ with support.check_warnings(("This method will be removed in future versions. "
+ "Use .+ instead.", PendingDeprecationWarning)):
+ self._test_getiterator()
+ else:
+ self._test_getiterator()
+
+ def _test_getiterator(self):
+ doc = ET.XML('''
+ <document>
+ <house>
+ <room>bedroom1</room>
+ <room>bedroom2</room>
+ </house>
+ <shed>nothing here
+ </shed>
+ <house>
+ <room>bedroom8</room>
+ </house>
+ </document>''')
+
+ self.assertEqual(summarize_list(doc.getiterator('room')),
+ ['room'] * 3)
+ self.assertEqual(summarize_list(doc.getiterator('house')),
+ ['house'] * 2)
+
+ if ET is pyET:
+ # test that getiterator also accepts 'tag' as a keyword arg
+ self.assertEqual(
+ summarize_list(doc.getiterator(tag='room')),
+ ['room'] * 3)
+
+ # make sure both tag=None and tag='*' return all tags
+ all_tags = ['document', 'house', 'room', 'room',
+ 'shed', 'house', 'room']
+ self.assertEqual(summarize_list(doc.getiterator()), all_tags)
+ self.assertEqual(summarize_list(doc.getiterator(None)), all_tags)
+ self.assertEqual(summarize_list(doc.getiterator('*')), all_tags)
+
+ @support.impl_detail("unpickleable iteraters")
+ def test_copy(self):
+ a = ET.Element('a')
+ it = a.iter()
+ with self.assertRaises(TypeError):
+ copy.copy(it)
+
+ @support.impl_detail("unpickleable iteraters")
+ def test_pickle(self):
+ a = ET.Element('a')
+ it = a.iter()
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ with self.assertRaises((TypeError, pickle.PicklingError)):
+ pickle.dumps(it, proto)
+
+
+class TreeBuilderTest(unittest.TestCase):
+ sample1 = ('<!DOCTYPE html PUBLIC'
+ ' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
+ ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
+ '<html>text<div>subtext</div>tail</html>')
+
+ sample2 = '''<toplevel>sometext</toplevel>'''
+
+ def _check_sample1_element(self, e):
+ self.assertEqual(e.tag, 'html')
+ self.assertEqual(e.text, 'text')
+ self.assertEqual(e.tail, None)
+ self.assertEqual(e.attrib, {})
+ children = list(e)
+ self.assertEqual(len(children), 1)
+ child = children[0]
+ self.assertEqual(child.tag, 'div')
+ self.assertEqual(child.text, 'subtext')
+ self.assertEqual(child.tail, 'tail')
+ self.assertEqual(child.attrib, {})
+
+ def test_dummy_builder(self):
+ class DummyBuilder:
+ data = start = end = lambda *a: None
+
+ def close(self):
+ return 42
+
+ parser = ET.XMLParser(target=DummyBuilder())
+ parser.feed(self.sample1)
+ self.assertEqual(parser.close(), 42)
+
+ @python_only
+ def test_treebuilder_elementfactory_none(self):
+ parser = ET.XMLParser(target=ET.TreeBuilder(element_factory=None))
+ parser.feed(self.sample1)
+ e = parser.close()
+ self._check_sample1_element(e)
+
+ @python_only
+ def test_subclass(self):
+ class MyTreeBuilder(ET.TreeBuilder):
+ def foobar(self, x):
+ return x * 2
+
+ tb = MyTreeBuilder()
+ self.assertEqual(tb.foobar(10), 20)
+
+ parser = ET.XMLParser(target=tb)
+ parser.feed(self.sample1)
+
+ e = parser.close()
+ self._check_sample1_element(e)
+
+ @python_only
+ def test_element_factory(self):
+ lst = []
+ def myfactory(tag, attrib):
+ lst.append(tag)
+ return ET.Element(tag, attrib)
+
+ tb = ET.TreeBuilder(element_factory=myfactory)
+ parser = ET.XMLParser(target=tb)
+ parser.feed(self.sample2)
+ parser.close()
+
+ self.assertEqual(lst, ['toplevel'])
+
+ @python_only
+ def test_element_factory_subclass(self):
+ class MyElement(ET.Element):
+ pass
+
+ tb = ET.TreeBuilder(element_factory=MyElement)
+
+ parser = ET.XMLParser(target=tb)
+ parser.feed(self.sample1)
+ e = parser.close()
+ self.assertIsInstance(e, MyElement)
+ self._check_sample1_element(e)
+
+
+ @python_only
+ def test_doctype(self):
+ class DoctypeParser:
+ _doctype = None
+
+ def doctype(self, name, pubid, system):
+ self._doctype = (name, pubid, system)
+
+ data = start = end = lambda *a: None
+
+ def close(self):
+ return self._doctype
+
+ parser = ET.XMLParser(target=DoctypeParser())
+ parser.feed(self.sample1)
+
+ self.assertEqual(parser.close(),
+ ('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
+ 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'))
+
+ @cet_only # PyET does not look up the attributes in XMLParser().__init__()
+ def test_builder_lookup_errors(self):
+ class RaisingBuilder(object):
+ def __init__(self, raise_in=None, what=ValueError):
+ self.raise_in = raise_in
+ self.what = what
+
+ def __getattr__(self, name):
+ if name == self.raise_in:
+ raise self.what(self.raise_in)
+ def handle(*args):
+ pass
+ return handle
+
+ ET.XMLParser(target=RaisingBuilder())
+ # cET also checks for 'close' and 'doctype', PyET does it only at need
+ for event in ('start', 'data', 'end', 'comment', 'pi'):
+ with self.assertRaises(ValueError):
+ ET.XMLParser(target=RaisingBuilder(event))
+
+ ET.XMLParser(target=RaisingBuilder(what=AttributeError))
+ for event in ('start', 'data', 'end', 'comment', 'pi'):
+ parser = ET.XMLParser(target=RaisingBuilder(event, what=AttributeError))
+ parser.feed(self.sample1)
+ self.assertIsNone(parser.close())
+
+
+class XMLParserTest(unittest.TestCase):
+ sample1 = b'<file><line>22</line></file>'
+ sample2 = (b'<!DOCTYPE html PUBLIC'
+ b' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
+ b' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
+ b'<html>text</html>')
+
+ def _check_sample_element(self, e):
+ self.assertEqual(e.tag, 'file')
+ self.assertEqual(e[0].tag, 'line')
+ self.assertEqual(e[0].text, '22')
+
+ @python_only
+ def test_constructor_args(self):
+ # Positional args. The first (html) is not supported, but should be
+ # nevertheless correctly accepted.
+ with support.check_py3k_warnings((r'.*\bhtml\b', DeprecationWarning)):
+ parser = ET.XMLParser(None, ET.TreeBuilder(), 'utf-8')
+ parser.feed(self.sample1)
+ self._check_sample_element(parser.close())
+
+ # Now as keyword args.
+ parser2 = ET.XMLParser(encoding='utf-8',
+ target=ET.TreeBuilder())
+ parser2.feed(self.sample1)
+ self._check_sample_element(parser2.close())
+
+ @python_only
+ def test_subclass(self):
+ class MyParser(ET.XMLParser):
+ pass
+ parser = MyParser()
+ parser.feed(self.sample1)
+ self._check_sample_element(parser.close())
+
+ @python_only
+ def test_doctype_warning(self):
+ parser = ET.XMLParser()
+ with support.check_warnings(('', DeprecationWarning)):
+ parser.doctype('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
+ 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd')
+ parser.feed('<html/>')
+ parser.close()
+
+ @python_only
+ def test_subclass_doctype(self):
+ _doctype = []
+ class MyParserWithDoctype(ET.XMLParser):
+ def doctype(self, name, pubid, system):
+ _doctype.append((name, pubid, system))
+
+ parser = MyParserWithDoctype()
+ with support.check_warnings(('', DeprecationWarning)):
+ parser.feed(self.sample2)
+ parser.close()
+ self.assertEqual(_doctype,
+ [('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
+ 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd')])
+
+ _doctype = []
+ _doctype2 = []
+ with warnings.catch_warnings():
+ warnings.simplefilter('error', DeprecationWarning)
+ class DoctypeParser:
+ data = start = end = close = lambda *a: None
+
+ def doctype(self, name, pubid, system):
+ _doctype2.append((name, pubid, system))
+
+ parser = MyParserWithDoctype(target=DoctypeParser())
+ parser.feed(self.sample2)
+ parser.close()
+ self.assertEqual(_doctype, [])
+ self.assertEqual(_doctype2,
+ [('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
+ 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd')])
+
+
+class NamespaceParseTest(unittest.TestCase):
+ def test_find_with_namespace(self):
+ nsmap = {'h': 'hello', 'f': 'foo'}
+ doc = ET.fromstring(SAMPLE_XML_NS_ELEMS)
+
+ self.assertEqual(len(doc.findall('{hello}table', nsmap)), 1)
+ self.assertEqual(len(doc.findall('.//{hello}td', nsmap)), 2)
+ self.assertEqual(len(doc.findall('.//{foo}name', nsmap)), 1)
+
+
+class ElementSlicingTest(unittest.TestCase):
+ def _elem_tags(self, elemlist):
+ return [e.tag for e in elemlist]
+
+ def _subelem_tags(self, elem):
+ return self._elem_tags(list(elem))
+
+ def _make_elem_with_children(self, numchildren):
+ """Create an Element with a tag 'a', with the given amount of children
+ named 'a0', 'a1' ... and so on.
+
+ """
+ e = ET.Element('a')
+ for i in range(numchildren):
+ ET.SubElement(e, 'a%s' % i)
+ return e
+
+ def test_getslice_single_index(self):
+ e = self._make_elem_with_children(10)
+
+ self.assertEqual(e[1].tag, 'a1')
+ self.assertEqual(e[-2].tag, 'a8')
+
+ self.assertRaises(IndexError, lambda: e[12])
+ self.assertRaises(IndexError, lambda: e[-12])
+
+ def test_getslice_range(self):
+ e = self._make_elem_with_children(6)
+
+ self.assertEqual(self._elem_tags(e[3:]), ['a3', 'a4', 'a5'])
+ self.assertEqual(self._elem_tags(e[3:6]), ['a3', 'a4', 'a5'])
+ self.assertEqual(self._elem_tags(e[3:16]), ['a3', 'a4', 'a5'])
+ self.assertEqual(self._elem_tags(e[3:5]), ['a3', 'a4'])
+ self.assertEqual(self._elem_tags(e[3:-1]), ['a3', 'a4'])
+ self.assertEqual(self._elem_tags(e[:2]), ['a0', 'a1'])
+
+ def test_getslice_steps(self):
+ e = self._make_elem_with_children(10)
+
+ self.assertEqual(self._elem_tags(e[8:10:1]), ['a8', 'a9'])
+ self.assertEqual(self._elem_tags(e[::3]), ['a0', 'a3', 'a6', 'a9'])
+ self.assertEqual(self._elem_tags(e[::8]), ['a0', 'a8'])
+ self.assertEqual(self._elem_tags(e[1::8]), ['a1', 'a9'])
+ self.assertEqual(self._elem_tags(e[3::sys.maxsize]), ['a3'])
+ self.assertEqual(self._elem_tags(e[3::sys.maxsize<<64]), ['a3'])
+
+ def test_getslice_negative_steps(self):
+ e = self._make_elem_with_children(4)
+
+ self.assertEqual(self._elem_tags(e[::-1]), ['a3', 'a2', 'a1', 'a0'])
+ self.assertEqual(self._elem_tags(e[::-2]), ['a3', 'a1'])
+ self.assertEqual(self._elem_tags(e[3::-sys.maxsize]), ['a3'])
+ self.assertEqual(self._elem_tags(e[3::-sys.maxsize-1]), ['a3'])
+ self.assertEqual(self._elem_tags(e[3::-sys.maxsize<<64]), ['a3'])
+
+ def test_delslice(self):
+ e = self._make_elem_with_children(4)
+ del e[0:2]
+ self.assertEqual(self._subelem_tags(e), ['a2', 'a3'])
+
+ e = self._make_elem_with_children(4)
+ del e[0:]
+ self.assertEqual(self._subelem_tags(e), [])
+
+ if ET is pyET:
+ e = self._make_elem_with_children(4)
+ del e[::-1]
+ self.assertEqual(self._subelem_tags(e), [])
+
+ e = self._make_elem_with_children(4)
+ del e[::-2]
+ self.assertEqual(self._subelem_tags(e), ['a0', 'a2'])
+
+ e = self._make_elem_with_children(4)
+ del e[1::2]
+ self.assertEqual(self._subelem_tags(e), ['a0', 'a2'])
+
+ e = self._make_elem_with_children(2)
+ del e[::2]
+ self.assertEqual(self._subelem_tags(e), ['a1'])
+
+ def test_setslice_single_index(self):
+ e = self._make_elem_with_children(4)
+ e[1] = ET.Element('b')
+ self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'a2', 'a3'])
+
+ e[-2] = ET.Element('c')
+ self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'c', 'a3'])
+
+ with self.assertRaises(IndexError):
+ e[5] = ET.Element('d')
+ with self.assertRaises(IndexError):
+ e[-5] = ET.Element('d')
+ self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'c', 'a3'])
+
+ def test_setslice_range(self):
+ e = self._make_elem_with_children(4)
+ e[1:3] = [ET.Element('b%s' % i) for i in range(2)]
+ self.assertEqual(self._subelem_tags(e), ['a0', 'b0', 'b1', 'a3'])
+
+ e = self._make_elem_with_children(4)
+ e[1:3] = [ET.Element('b')]
+ self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'a3'])
+
+ e = self._make_elem_with_children(4)
+ e[1:3] = [ET.Element('b%s' % i) for i in range(3)]
+ self.assertEqual(self._subelem_tags(e), ['a0', 'b0', 'b1', 'b2', 'a3'])
+
+ def test_setslice_steps(self):
+ e = self._make_elem_with_children(6)
+ e[1:5:2] = [ET.Element('b%s' % i) for i in range(2)]
+ self.assertEqual(self._subelem_tags(e), ['a0', 'b0', 'a2', 'b1', 'a4', 'a5'])
+
+ e = self._make_elem_with_children(6)
+ with self.assertRaises(ValueError):
+ e[1:5:2] = [ET.Element('b')]
+ with self.assertRaises(ValueError):
+ e[1:5:2] = [ET.Element('b%s' % i) for i in range(3)]
+ with self.assertRaises(ValueError):
+ e[1:5:2] = []
+ self.assertEqual(self._subelem_tags(e), ['a0', 'a1', 'a2', 'a3', 'a4', 'a5'])
+
+ e = self._make_elem_with_children(4)
+ e[1::sys.maxsize] = [ET.Element('b')]
+ self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'a2', 'a3'])
+ e[1::sys.maxsize<<64] = [ET.Element('c')]
+ self.assertEqual(self._subelem_tags(e), ['a0', 'c', 'a2', 'a3'])
+
+ def test_setslice_negative_steps(self):
+ e = self._make_elem_with_children(4)
+ e[2:0:-1] = [ET.Element('b%s' % i) for i in range(2)]
+ self.assertEqual(self._subelem_tags(e), ['a0', 'b1', 'b0', 'a3'])
+
+ e = self._make_elem_with_children(4)
+ with self.assertRaises(ValueError):
+ e[2:0:-1] = [ET.Element('b')]
+ with self.assertRaises(ValueError):
+ e[2:0:-1] = [ET.Element('b%s' % i) for i in range(3)]
+ with self.assertRaises(ValueError):
+ e[2:0:-1] = []
+ self.assertEqual(self._subelem_tags(e), ['a0', 'a1', 'a2', 'a3'])
+
+ e = self._make_elem_with_children(4)
+ e[1::-sys.maxsize] = [ET.Element('b')]
+ self.assertEqual(self._subelem_tags(e), ['a0', 'b', 'a2', 'a3'])
+ e[1::-sys.maxsize-1] = [ET.Element('c')]
+ self.assertEqual(self._subelem_tags(e), ['a0', 'c', 'a2', 'a3'])
+ e[1::-sys.maxsize<<64] = [ET.Element('d')]
+ self.assertEqual(self._subelem_tags(e), ['a0', 'd', 'a2', 'a3'])
+
+
+class IOTest(unittest.TestCase):
+ def tearDown(self):
+ support.unlink(TESTFN)
+
+ def test_encoding(self):
+ # Test encoding issues.
+ elem = ET.Element("tag")
+ elem.text = u"abc"
+ self.assertEqual(serialize(elem), '<tag>abc</tag>')
+ self.assertEqual(serialize(elem, encoding="utf-8"),
+ '<tag>abc</tag>')
+ self.assertEqual(serialize(elem, encoding="us-ascii"),
+ '<tag>abc</tag>')
+ self.assertEqual(serialize(elem, encoding="iso-8859-1"),
+ "<?xml version='1.0' encoding='iso-8859-1'?>\n"
+ "<tag>abc</tag>")
+
+ elem = ET.Element("tag")
+ elem.text = "<&\"\'>"
+ self.assertEqual(serialize(elem), '<tag>&lt;&amp;"\'&gt;</tag>')
+ self.assertEqual(serialize(elem, encoding="utf-8"),
+ b'<tag>&lt;&amp;"\'&gt;</tag>')
+ self.assertEqual(serialize(elem, encoding="us-ascii"),
+ b'<tag>&lt;&amp;"\'&gt;</tag>')
+ self.assertEqual(serialize(elem, encoding="iso-8859-1"),
+ "<?xml version='1.0' encoding='iso-8859-1'?>\n"
+ "<tag>&lt;&amp;\"'&gt;</tag>")
+
+ elem = ET.Element("tag")
+ elem.attrib["key"] = "<&\"\'>"
+ self.assertEqual(serialize(elem), '<tag key="&lt;&amp;&quot;\'&gt;" />')
+ self.assertEqual(serialize(elem, encoding="utf-8"),
+ b'<tag key="&lt;&amp;&quot;\'&gt;" />')
+ self.assertEqual(serialize(elem, encoding="us-ascii"),
+ b'<tag key="&lt;&amp;&quot;\'&gt;" />')
+ self.assertEqual(serialize(elem, encoding="iso-8859-1"),
+ "<?xml version='1.0' encoding='iso-8859-1'?>\n"
+ "<tag key=\"&lt;&amp;&quot;'&gt;\" />")
+
+ elem = ET.Element("tag")
+ elem.text = u'\xe5\xf6\xf6<>'
+ self.assertEqual(serialize(elem),
+ '<tag>&#229;&#246;&#246;&lt;&gt;</tag>')
+ self.assertEqual(serialize(elem, encoding="utf-8"),
+ '<tag>\xc3\xa5\xc3\xb6\xc3\xb6&lt;&gt;</tag>')
+ self.assertEqual(serialize(elem, encoding="us-ascii"),
+ '<tag>&#229;&#246;&#246;&lt;&gt;</tag>')
+ self.assertEqual(serialize(elem, encoding="iso-8859-1"),
+ "<?xml version='1.0' encoding='iso-8859-1'?>\n"
+ "<tag>\xe5\xf6\xf6&lt;&gt;</tag>")
+
+ elem = ET.Element("tag")
+ elem.attrib["key"] = u'\xe5\xf6\xf6<>'
+ self.assertEqual(serialize(elem),
+ '<tag key="&#229;&#246;&#246;&lt;&gt;" />')
+ self.assertEqual(serialize(elem, encoding="utf-8"),
+ '<tag key="\xc3\xa5\xc3\xb6\xc3\xb6&lt;&gt;" />')
+ self.assertEqual(serialize(elem, encoding="us-ascii"),
+ '<tag key="&#229;&#246;&#246;&lt;&gt;" />')
+ self.assertEqual(serialize(elem, encoding="iso-8859-1"),
+ "<?xml version='1.0' encoding='iso-8859-1'?>\n"
+ "<tag key=\"\xe5\xf6\xf6&lt;&gt;\" />")
+
+ def test_write_to_filename(self):
+ tree = ET.ElementTree(ET.XML('''<site />'''))
+ tree.write(TESTFN)
+ with open(TESTFN, 'rb') as f:
+ self.assertEqual(f.read(), b'''<site />''')
+
+ def test_write_to_file(self):
+ tree = ET.ElementTree(ET.XML('''<site />'''))
+ with open(TESTFN, 'wb') as f:
+ tree.write(f)
+ self.assertFalse(f.closed)
+ with open(TESTFN, 'rb') as f:
+ self.assertEqual(f.read(), b'''<site />''')
+
+ def test_read_from_stringio(self):
+ tree = ET.ElementTree()
+ stream = StringIO.StringIO('''<?xml version="1.0"?><site></site>''')
+ tree.parse(stream)
+ self.assertEqual(tree.getroot().tag, 'site')
+
+ def test_write_to_stringio(self):
+ tree = ET.ElementTree(ET.XML('''<site />'''))
+ stream = StringIO.StringIO()
+ tree.write(stream)
+ self.assertEqual(stream.getvalue(), '''<site />''')
+
+ class dummy:
+ pass
+
+ def test_read_from_user_reader(self):
+ stream = StringIO.StringIO('''<?xml version="1.0"?><site></site>''')
+ reader = self.dummy()
+ reader.read = stream.read
+ tree = ET.ElementTree()
+ tree.parse(reader)
+ self.assertEqual(tree.getroot().tag, 'site')
+
+ def test_write_to_user_writer(self):
+ tree = ET.ElementTree(ET.XML('''<site />'''))
+ stream = StringIO.StringIO()
+ writer = self.dummy()
+ writer.write = stream.write
+ tree.write(writer)
+ self.assertEqual(stream.getvalue(), '''<site />''')
+
+ def test_tostringlist_invariant(self):
+ root = ET.fromstring('<tag>foo</tag>')
+ self.assertEqual(
+ ET.tostring(root),
+ ''.join(ET.tostringlist(root)))
+ self.assertEqual(
+ ET.tostring(root, 'utf-16'),
+ b''.join(ET.tostringlist(root, 'utf-16')))
+
+
+class ParseErrorTest(unittest.TestCase):
+ def test_subclass(self):
+ self.assertIsInstance(ET.ParseError(), SyntaxError)
+
+ def _get_error(self, s):
+ try:
+ ET.fromstring(s)
+ except ET.ParseError as e:
+ return e
+
+ def test_error_position(self):
+ self.assertEqual(self._get_error('foo').position, (1, 0))
+ self.assertEqual(self._get_error('<tag>&foo;</tag>').position, (1, 5))
+ self.assertEqual(self._get_error('foobar<').position, (1, 6))
+
+ @python_only
+ def test_error_code(self):
+ from xml.parsers import expat
+ self.assertEqual(expat.ErrorString(self._get_error('foo').code),
+ expat.errors.XML_ERROR_SYNTAX)
+
+
+class KeywordArgsTest(unittest.TestCase):
+ # Test various issues with keyword arguments passed to ET.Element
+ # constructor and methods
+ def test_issue14818(self):
+ x = ET.XML("<a>foo</a>")
+ self.assertEqual(x.find('a', None),
+ x.find(path='a', namespaces=None))
+ self.assertEqual(x.findtext('a', None, None),
+ x.findtext(path='a', default=None, namespaces=None))
+ self.assertEqual(x.findall('a', None),
+ x.findall(path='a', namespaces=None))
+ self.assertEqual(list(x.iterfind('a', None)),
+ list(x.iterfind(path='a', namespaces=None)))
+
+ self.assertEqual(ET.Element('a').attrib, {})
+ elements = [
+ ET.Element('a', dict(href="#", id="foo")),
+ ET.Element('a', attrib=dict(href="#", id="foo")),
+ ET.Element('a', dict(href="#"), id="foo"),
+ ET.Element('a', href="#", id="foo"),
+ ET.Element('a', dict(href="#", id="foo"), href="#", id="foo"),
+ ]
+ for e in elements:
+ self.assertEqual(e.tag, 'a')
+ self.assertEqual(e.attrib, dict(href="#", id="foo"))
+
+ e2 = ET.SubElement(elements[0], 'foobar', attrib={'key1': 'value1'})
+ self.assertEqual(e2.attrib['key1'], 'value1')
+
+ with self.assertRaisesRegexp(TypeError, 'must be dict, not str'):
+ ET.Element('a', "I'm not a dict")
+ with self.assertRaisesRegexp(TypeError, 'must be dict, not str'):
+ ET.Element('a', attrib="I'm not a dict")
- >>> e = ET.Element("{default}elem")
- >>> s = ET.SubElement(e, "{default}elem")
- >>> serialize(e, default_namespace="default") # 1
- '<elem xmlns="default"><elem /></elem>'
-
- >>> e = ET.Element("{default}elem")
- >>> s = ET.SubElement(e, "{default}elem")
- >>> s = ET.SubElement(e, "{not-default}elem")
- >>> serialize(e, default_namespace="default") # 2
- '<elem xmlns="default" xmlns:ns1="not-default"><elem /><ns1:elem /></elem>'
-
- >>> e = ET.Element("{default}elem")
- >>> s = ET.SubElement(e, "{default}elem")
- >>> s = ET.SubElement(e, "elem") # unprefixed name
- >>> serialize(e, default_namespace="default") # 3
- Traceback (most recent call last):
- ValueError: cannot use non-qualified names with default_namespace option
-
- """
-
-def bug_200709_register_namespace():
- """
-
- >>> ET.tostring(ET.Element("{http://namespace.invalid/does/not/exist/}title"))
- '<ns0:title xmlns:ns0="http://namespace.invalid/does/not/exist/" />'
- >>> ET.register_namespace("foo", "http://namespace.invalid/does/not/exist/")
- >>> ET.tostring(ET.Element("{http://namespace.invalid/does/not/exist/}title"))
- '<foo:title xmlns:foo="http://namespace.invalid/does/not/exist/" />'
-
- And the Dublin Core namespace is in the default list:
-
- >>> ET.tostring(ET.Element("{http://purl.org/dc/elements/1.1/}title"))
- '<dc:title xmlns:dc="http://purl.org/dc/elements/1.1/" />'
-
- """
-
-def bug_200709_element_comment():
- """
-
- Not sure if this can be fixed, really (since the serializer needs
- ET.Comment, not cET.comment).
-
- >>> a = ET.Element('a')
- >>> a.append(ET.Comment('foo'))
- >>> a[0].tag == ET.Comment
- True
-
- >>> a = ET.Element('a')
- >>> a.append(ET.PI('foo'))
- >>> a[0].tag == ET.PI
- True
-
- """
-
-def bug_200709_element_insert():
- """
+# --------------------------------------------------------------------
- >>> a = ET.Element('a')
- >>> b = ET.SubElement(a, 'b')
- >>> c = ET.SubElement(a, 'c')
- >>> d = ET.Element('d')
- >>> a.insert(0, d)
- >>> summarize_list(a)
- ['d', 'b', 'c']
- >>> a.insert(-1, d)
- >>> summarize_list(a)
- ['d', 'b', 'd', 'c']
-
- """
-
-def bug_200709_iter_comment():
- """
-
- >>> a = ET.Element('a')
- >>> b = ET.SubElement(a, 'b')
- >>> comment_b = ET.Comment("TEST-b")
- >>> b.append(comment_b)
- >>> summarize_list(a.iter(ET.Comment))
- ['<Comment>']
-
- """
-
-def bug_18347():
- """
-
- >>> e = ET.XML('<html><CamelCase>text</CamelCase></html>')
- >>> serialize(e)
- '<html><CamelCase>text</CamelCase></html>'
- >>> serialize(e, method="html")
- '<html><CamelCase>text</CamelCase></html>'
- """
+class NoAcceleratorTest(unittest.TestCase):
+ def setUp(self):
+ if ET is not pyET:
+ raise unittest.SkipTest('only for the Python version')
-# --------------------------------------------------------------------
-# reported on bugs.python.org
-
-def bug_1534630():
- """
-
- >>> bob = ET.TreeBuilder()
- >>> e = bob.data("data")
- >>> e = bob.start("tag", {})
- >>> e = bob.end("tag")
- >>> e = bob.close()
- >>> serialize(e)
- '<tag />'
-
- """
-
-def check_issue6233():
- """
-
- >>> e = ET.XML("<?xml version='1.0' encoding='utf-8'?><body>t\\xc3\\xa3g</body>")
- >>> ET.tostring(e, 'ascii')
- "<?xml version='1.0' encoding='ascii'?>\\n<body>t&#227;g</body>"
- >>> e = ET.XML("<?xml version='1.0' encoding='iso-8859-1'?><body>t\\xe3g</body>")
- >>> ET.tostring(e, 'ascii')
- "<?xml version='1.0' encoding='ascii'?>\\n<body>t&#227;g</body>"
-
- """
-
-def check_issue3151():
- """
-
- >>> e = ET.XML('<prefix:localname xmlns:prefix="${stuff}"/>')
- >>> e.tag
- '{${stuff}}localname'
- >>> t = ET.ElementTree(e)
- >>> ET.tostring(e)
- '<ns0:localname xmlns:ns0="${stuff}" />'
-
- """
-
-def check_issue6565():
- """
-
- >>> elem = ET.XML("<body><tag/></body>")
- >>> summarize_list(elem)
- ['tag']
- >>> newelem = ET.XML(SAMPLE_XML)
- >>> elem[:] = newelem[:]
- >>> summarize_list(elem)
- ['tag', 'tag', 'section']
-
- """
-
-def check_html_empty_elems_serialization(self):
- # issue 15970
- # from http://www.w3.org/TR/html401/index/elements.html
- """
-
- >>> empty_elems = ['AREA', 'BASE', 'BASEFONT', 'BR', 'COL', 'FRAME', 'HR',
- ... 'IMG', 'INPUT', 'ISINDEX', 'LINK', 'META', 'PARAM']
- >>> elems = ''.join('<%s />' % elem for elem in empty_elems)
- >>> serialize(ET.XML('<html>%s</html>' % elems), method='html')
- '<html><AREA><BASE><BASEFONT><BR><COL><FRAME><HR><IMG><INPUT><ISINDEX><LINK><META><PARAM></html>'
- >>> serialize(ET.XML('<html>%s</html>' % elems.lower()), method='html')
- '<html><area><base><basefont><br><col><frame><hr><img><input><isindex><link><meta><param></html>'
- >>> elems = ''.join('<%s></%s>' % (elem, elem) for elem in empty_elems)
- >>> serialize(ET.XML('<html>%s</html>' % elems), method='html')
- '<html><AREA><BASE><BASEFONT><BR><COL><FRAME><HR><IMG><INPUT><ISINDEX><LINK><META><PARAM></html>'
- >>> serialize(ET.XML('<html>%s</html>' % elems.lower()), method='html')
- '<html><area><base><basefont><br><col><frame><hr><img><input><isindex><link><meta><param></html>'
-
- """
+ # Test that the C accelerator was not imported for pyET
+ def test_correct_import_pyET(self):
+ # The type of methods defined in Python code is types.FunctionType,
+ # while the type of methods defined inside _elementtree is
+ # <class 'wrapper_descriptor'>
+ self.assertIsInstance(pyET.Element.__init__, types.FunctionType)
+ self.assertIsInstance(pyET.XMLParser.__init__, types.FunctionType)
# --------------------------------------------------------------------
-class CleanContext(object):
- """Provide default namespace mapping and path cache."""
- checkwarnings = None
-
- def __init__(self, quiet=False):
- if sys.flags.optimize >= 2:
- # under -OO, doctests cannot be run and therefore not all warnings
- # will be emitted
- quiet = True
- deprecations = (
- # Search behaviour is broken if search path starts with "/".
- ("This search is broken in 1.3 and earlier, and will be fixed "
- "in a future version. If you rely on the current behaviour, "
- "change it to '.+'", FutureWarning),
- # Element.getchildren() and Element.getiterator() are deprecated.
- ("This method will be removed in future versions. "
- "Use .+ instead.", DeprecationWarning),
- ("This method will be removed in future versions. "
- "Use .+ instead.", PendingDeprecationWarning),
- # XMLParser.doctype() is deprecated.
- ("This method of XMLParser is deprecated. Define doctype.. "
- "method on the TreeBuilder target.", DeprecationWarning))
- self.checkwarnings = test_support.check_warnings(*deprecations,
- quiet=quiet)
-
- def __enter__(self):
- from xml.etree import ElementTree
- self._nsmap = ElementTree._namespace_map
- self._path_cache = ElementTree.ElementPath._cache
- # Copy the default namespace mapping
- ElementTree._namespace_map = self._nsmap.copy()
- # Copy the path cache (should be empty)
- ElementTree.ElementPath._cache = self._path_cache.copy()
- self.checkwarnings.__enter__()
-
- def __exit__(self, *args):
- from xml.etree import ElementTree
+def test_main(module=None):
+ # When invoked without a module, runs the Python ET tests by loading pyET.
+ # Otherwise, uses the given module as the ET.
+ if module is None:
+ module = pyET
+
+ global ET
+ ET = module
+
+ test_classes = [
+ ModuleTest,
+ ElementSlicingTest,
+ BasicElementTest,
+ BadElementTest,
+ BadElementPathTest,
+ ElementTreeTest,
+ IOTest,
+ ParseErrorTest,
+ XIncludeTest,
+ ElementTreeTypeTest,
+ ElementFindTest,
+ ElementIterTest,
+ TreeBuilderTest,
+ XMLParserTest,
+ BugsTest,
+ ]
+
+ # These tests will only run for the pure-Python version that doesn't import
+ # _elementtree. We can't use skipUnless here, because pyET is filled in only
+ # after the module is loaded.
+ if pyET is not ET:
+ test_classes.extend([
+ NoAcceleratorTest,
+ ])
+
+ # Provide default namespace mapping and path cache.
+ from xml.etree import ElementPath
+ nsmap = pyET._namespace_map
+ # Copy the default namespace mapping
+ nsmap_copy = nsmap.copy()
+ # Copy the path cache (should be empty)
+ path_cache = ElementPath._cache
+ ElementPath._cache = path_cache.copy()
+ try:
+ support.run_unittest(*test_classes)
+ finally:
+ from xml.etree import ElementPath
# Restore mapping and path cache
- ElementTree._namespace_map = self._nsmap
- ElementTree.ElementPath._cache = self._path_cache
- self.checkwarnings.__exit__(*args)
-
-
-def test_main(module_name='xml.etree.ElementTree'):
- from test import test_xml_etree
-
- use_py_module = (module_name == 'xml.etree.ElementTree')
-
- # The same doctests are used for both the Python and the C implementations
- assert test_xml_etree.ET.__name__ == module_name
-
- # XXX the C module should give the same warnings as the Python module
- with CleanContext(quiet=not use_py_module):
- test_support.run_doctest(test_xml_etree, verbosity=True)
+ nsmap.clear()
+ nsmap.update(nsmap_copy)
+ ElementPath._cache = path_cache
+ # don't interfere with subsequent tests
+ ET = None
- # The module should not be changed by the tests
- assert test_xml_etree.ET.__name__ == module_name
if __name__ == '__main__':
test_main()
diff --git a/lib-python/2.7/test/test_xml_etree_c.py b/lib-python/2.7/test/test_xml_etree_c.py
index 98410c55a6..e8a22e240b 100644
--- a/lib-python/2.7/test/test_xml_etree_c.py
+++ b/lib-python/2.7/test/test_xml_etree_c.py
@@ -4,19 +4,11 @@ from test import test_support
from test.test_support import precisionbigmemtest, _2G
import unittest
-cET = test_support.import_module('xml.etree.cElementTree')
-
-
-# cElementTree specific tests
-
-def sanity():
- """
- Import sanity.
-
- >>> from xml.etree import cElementTree
- """
+# PyPy: was: cET = test_support.import_module('xml.etree.cElementTree')
+cET = None
+@unittest.skipUnless(cET, 'requires _elementtree')
class MiscTests(unittest.TestCase):
# Issue #8651.
@precisionbigmemtest(size=_2G + 100, memuse=1)
@@ -62,26 +54,41 @@ class MiscTests(unittest.TestCase):
del element.attrib
self.assertEqual(element.attrib, {'A': 'B', 'C': 'D'})
+ def test_bpo_31728(self):
+ # A crash shouldn't happen in case garbage collection triggers a call
+ # to clear() or a reading of text or tail, while a setter or clear()
+ # is already running.
+ elem = cET.Element('elem')
+ class X:
+ def __del__(self):
+ elem.text
+ elem.tail
+ elem.clear()
+
+ elem.text = X()
+ elem.clear() # shouldn't crash
+
+ elem.tail = X()
+ elem.clear() # shouldn't crash
+
+ elem.text = X()
+ elem.text = X() # shouldn't crash
+ elem.clear()
+
+ elem.tail = X()
+ elem.tail = X() # shouldn't crash
+ elem.clear()
+
def test_main():
from test import test_xml_etree, test_xml_etree_c
# Run the tests specific to the C implementation
- test_support.run_doctest(test_xml_etree_c, verbosity=True)
-
- # Assign the C implementation before running the doctests
- # Patch the __name__, to prevent confusion with the pure Python test
- pyET = test_xml_etree.ET
- py__name__ = test_xml_etree.__name__
- test_xml_etree.ET = cET
- if __name__ != '__main__':
- test_xml_etree.__name__ = __name__
- try:
- # Run the same test suite as xml.etree.ElementTree
- test_xml_etree.test_main(module_name='xml.etree.cElementTree')
- finally:
- test_xml_etree.ET = pyET
- test_xml_etree.__name__ = py__name__
+ test_support.run_unittest(MiscTests)
+
+ # Run the same test suite as the Python module
+ test_xml_etree.test_main(module=cET)
+
if __name__ == '__main__':
test_main()
diff --git a/lib-python/2.7/test/test_zipfile.py b/lib-python/2.7/test/test_zipfile.py
index 9c63aebbbe..4e545f140a 100644
--- a/lib-python/2.7/test/test_zipfile.py
+++ b/lib-python/2.7/test/test_zipfile.py
@@ -812,6 +812,20 @@ class TestZip64InSmallFiles(unittest.TestCase):
self.assertEqual(content, "%d" % (i**3 % 57))
zipf2.close()
+ def test_append(self):
+ # Test that appending to the Zip64 archive doesn't change
+ # extra fields of existing entries.
+ with zipfile.ZipFile(TESTFN2, "w", allowZip64=True) as zipfp:
+ zipfp.writestr("strfile", self.data)
+ with zipfile.ZipFile(TESTFN2, "r", allowZip64=True) as zipfp:
+ zinfo = zipfp.getinfo("strfile")
+ extra = zinfo.extra
+ with zipfile.ZipFile(TESTFN2, "a", allowZip64=True) as zipfp:
+ zipfp.writestr("strfile2", self.data)
+ with zipfile.ZipFile(TESTFN2, "r", allowZip64=True) as zipfp:
+ zinfo = zipfp.getinfo("strfile")
+ self.assertEqual(zinfo.extra, extra)
+
def tearDown(self):
zipfile.ZIP64_LIMIT = self._limit
zipfile.ZIP_FILECOUNT_LIMIT = self._filecount_limit
diff --git a/lib-python/2.7/test/wrongcert.pem b/lib-python/2.7/test/wrongcert.pem
deleted file mode 100644
index 5f92f9bce7..0000000000
--- a/lib-python/2.7/test/wrongcert.pem
+++ /dev/null
@@ -1,32 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIICXAIBAAKBgQC89ZNxjTgWgq7Z1g0tJ65w+k7lNAj5IgjLb155UkUrz0XsHDnH
-FlbsVUg2Xtk6+bo2UEYIzN7cIm5ImpmyW/2z0J1IDVDlvR2xJ659xrE0v5c2cB6T
-f9lnNTwpSoeK24Nd7Jwq4j9vk95fLrdqsBq0/KVlsCXeixS/CaqqduXfvwIDAQAB
-AoGAQFko4uyCgzfxr4Ezb4Mp5pN3Npqny5+Jey3r8EjSAX9Ogn+CNYgoBcdtFgbq
-1yif/0sK7ohGBJU9FUCAwrqNBI9ZHB6rcy7dx+gULOmRBGckln1o5S1+smVdmOsW
-7zUVLBVByKuNWqTYFlzfVd6s4iiXtAE2iHn3GCyYdlICwrECQQDhMQVxHd3EFbzg
-SFmJBTARlZ2GKA3c1g/h9/XbkEPQ9/RwI3vnjJ2RaSnjlfoLl8TOcf0uOGbOEyFe
-19RvCLXjAkEA1s+UE5ziF+YVkW3WolDCQ2kQ5WG9+ccfNebfh6b67B7Ln5iG0Sbg
-ky9cjsO3jbMJQtlzAQnH1850oRD5Gi51dQJAIbHCDLDZU9Ok1TI+I2BhVuA6F666
-lEZ7TeZaJSYq34OaUYUdrwG9OdqwZ9sy9LUav4ESzu2lhEQchCJrKMn23QJAReqs
-ZLHUeTjfXkVk7dHhWPWSlUZ6AhmIlA/AQ7Payg2/8wM/JkZEJEPvGVykms9iPUrv
-frADRr+hAGe43IewnQJBAJWKZllPgKuEBPwoEldHNS8nRu61D7HzxEzQ2xnfj+Nk
-2fgf1MAzzTRsikfGENhVsVWeqOcijWb6g5gsyCmlRpc=
------END RSA PRIVATE KEY-----
------BEGIN CERTIFICATE-----
-MIICsDCCAhmgAwIBAgIJAOqYOYFJfEEoMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
-BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
-aWRnaXRzIFB0eSBMdGQwHhcNMDgwNjI2MTgxNTUyWhcNMDkwNjI2MTgxNTUyWjBF
-MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50
-ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
-gQC89ZNxjTgWgq7Z1g0tJ65w+k7lNAj5IgjLb155UkUrz0XsHDnHFlbsVUg2Xtk6
-+bo2UEYIzN7cIm5ImpmyW/2z0J1IDVDlvR2xJ659xrE0v5c2cB6Tf9lnNTwpSoeK
-24Nd7Jwq4j9vk95fLrdqsBq0/KVlsCXeixS/CaqqduXfvwIDAQABo4GnMIGkMB0G
-A1UdDgQWBBTctMtI3EO9OjLI0x9Zo2ifkwIiNjB1BgNVHSMEbjBsgBTctMtI3EO9
-OjLI0x9Zo2ifkwIiNqFJpEcwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgTClNvbWUt
-U3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZIIJAOqYOYFJ
-fEEoMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAQwa7jya/DfhaDn7E
-usPkpgIX8WCL2B1SqnRTXEZfBPPVq/cUmFGyEVRVATySRuMwi8PXbVcOhXXuocA+
-43W+iIsD9pXapCZhhOerCq18TC1dWK98vLUsoK8PMjB6e5H/O8bqojv0EeC+fyCw
-eSHj5jpC8iZKjCHBn+mAi4cQ514=
------END CERTIFICATE-----
diff --git a/lib-python/2.7/test/xmltestdata/expat224_utf8_bug.xml b/lib-python/2.7/test/xmltestdata/expat224_utf8_bug.xml
new file mode 100644
index 0000000000..d66a8e6b50
--- /dev/null
+++ b/lib-python/2.7/test/xmltestdata/expat224_utf8_bug.xml
@@ -0,0 +1,2 @@
+<a b='01234567890123456古人咏雪抽幽思骋妍辞竞险韵偶得一编奇绝辄擅美当时流声后代是以北门之风南山之雅梁园之简黄台之赋至今为作家称述尚矣及至洛阳之卧剡溪之兴灞桥之思亦皆传为故事钱塘沈履德先生隐居西湖两峰间孤高贞洁与雪同调方大雪满天皴肤粟背之际先生乃鹿中豹舄端居闭门或扶童曳杖踏遍六桥三竺时取古人诗讽咏之合唐宋元诸名家集句成诗得二百四十章联络通穿如出一人如呵一气气立于言表格备于篇中略无掇拾补凑之形非胸次包罗壮阔笔底驱走鲍谢欧苏诸公不能为此世称王荆公为集句擅长观其在钟山对雪仅题数篇未见有此噫嘻奇矣哉亦富矣哉予慕先生有袁安之节愧不能为慧可之立乃取新集命工传写使海内同好者知先生为博古传述之士而一新世人之耳目他日必有慕潜德阐幽光而剞劂以传者余实为之执殳矣
+弘治戊午仲冬望日慈溪杨子器衵于海虞官舍序毕诗部' />
diff --git a/lib-python/2.7/textwrap.py b/lib-python/2.7/textwrap.py
index 5c2e4fa523..8d91ffa081 100644
--- a/lib-python/2.7/textwrap.py
+++ b/lib-python/2.7/textwrap.py
@@ -383,6 +383,8 @@ def dedent(text):
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
+
+ Entirely blank lines are normalized to a newline character.
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
diff --git a/lib-python/2.7/threading.py b/lib-python/2.7/threading.py
index c2405de4a7..41207724da 100644
--- a/lib-python/2.7/threading.py
+++ b/lib-python/2.7/threading.py
@@ -1024,8 +1024,7 @@ class Thread(_Verbose):
main thread is not a daemon thread and therefore all threads created in
the main thread default to daemon = False.
- The entire Python program exits when no alive non-daemon threads are
- left.
+ The entire Python program exits when only daemon threads are left.
"""
assert self.__initialized, "Thread.__init__() not called"
diff --git a/lib-python/2.7/trace.py b/lib-python/2.7/trace.py
index e22b4beaea..aa7b841524 100755
--- a/lib-python/2.7/trace.py
+++ b/lib-python/2.7/trace.py
@@ -360,7 +360,7 @@ class CoverageResults:
try:
outfile = open(path, "w")
except IOError, err:
- print >> sys.stderr, ("trace: Could not open %r for writing: %s"
+ print >> sys.stderr, ("trace: Could not open %r for writing: %s "
"- skipping" % (path, err))
return 0, 0
diff --git a/lib-python/2.7/unittest/case.py b/lib-python/2.7/unittest/case.py
index 8f4610145a..a3f75af527 100644
--- a/lib-python/2.7/unittest/case.py
+++ b/lib-python/2.7/unittest/case.py
@@ -526,7 +526,8 @@ class TestCase(object):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
- between the two objects is more than the given delta.
+ difference between the two objects is more than the given
+ delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most significant digit).
@@ -564,7 +565,7 @@ class TestCase(object):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
- between the two objects is less than the given delta.
+ difference between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most significant digit).
diff --git a/lib-python/2.7/unittest/loader.py b/lib-python/2.7/unittest/loader.py
index 9163a1a00d..3c161b5bd9 100644
--- a/lib-python/2.7/unittest/loader.py
+++ b/lib-python/2.7/unittest/loader.py
@@ -46,7 +46,7 @@ class TestLoader(object):
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
- """Return a suite of all tests cases contained in testCaseClass"""
+ """Return a suite of all test cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite." \
" Maybe you meant to derive from TestCase?")
@@ -57,7 +57,7 @@ class TestLoader(object):
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
- """Return a suite of all tests cases contained in the given module"""
+ """Return a suite of all test cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
@@ -75,7 +75,7 @@ class TestLoader(object):
return tests
def loadTestsFromName(self, name, module=None):
- """Return a suite of all tests cases given a string specifier.
+ """Return a suite of all test cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
@@ -124,7 +124,7 @@ class TestLoader(object):
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
- """Return a suite of all tests cases found using the given sequence
+ """Return a suite of all test cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
diff --git a/lib-python/2.7/unittest/signals.py b/lib-python/2.7/unittest/signals.py
index e6a5fc5243..9fbcc9ff85 100644
--- a/lib-python/2.7/unittest/signals.py
+++ b/lib-python/2.7/unittest/signals.py
@@ -10,7 +10,7 @@ class _InterruptHandler(object):
def __init__(self, default_handler):
self.called = False
self.original_handler = default_handler
- if isinstance(default_handler, int):
+ if isinstance(default_handler, (int, long)):
if default_handler == signal.SIG_DFL:
# Pretend it's signal.default_int_handler instead.
default_handler = signal.default_int_handler
diff --git a/lib-python/2.7/unittest/test/test_loader.py b/lib-python/2.7/unittest/test/test_loader.py
index 68e871c698..d46ddc8cfc 100644
--- a/lib-python/2.7/unittest/test/test_loader.py
+++ b/lib-python/2.7/unittest/test/test_loader.py
@@ -10,7 +10,7 @@ class Test_TestLoader(unittest.TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
- # "Return a suite of all tests cases contained in the TestCase-derived
+ # "Return a suite of all test cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
@@ -23,7 +23,7 @@ class Test_TestLoader(unittest.TestCase):
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
- # "Return a suite of all tests cases contained in the TestCase-derived
+ # "Return a suite of all test cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
@@ -36,7 +36,7 @@ class Test_TestLoader(unittest.TestCase):
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
- # "Return a suite of all tests cases contained in the TestCase-derived
+ # "Return a suite of all test cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
@@ -57,7 +57,7 @@ class Test_TestLoader(unittest.TestCase):
else:
self.fail('Should raise TypeError')
- # "Return a suite of all tests cases contained in the TestCase-derived
+ # "Return a suite of all test cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
diff --git a/lib-python/2.7/urlparse.py b/lib-python/2.7/urlparse.py
index f6d44c8dd3..ae6310feb6 100644
--- a/lib-python/2.7/urlparse.py
+++ b/lib-python/2.7/urlparse.py
@@ -165,6 +165,25 @@ def _splitnetloc(url, start=0):
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
+def _checknetloc(netloc):
+ if not netloc or not isinstance(netloc, unicode):
+ return
+ # looking for characters like \u2100 that expand to 'a/c'
+ # IDNA uses NFKC equivalence, so normalize for this check
+ import unicodedata
+ n = netloc.replace(u'@', u'') # ignore characters already included
+ n = n.replace(u':', u'') # but not the surrounding text
+ n = n.replace(u'#', u'')
+ n = n.replace(u'?', u'')
+ netloc2 = unicodedata.normalize('NFKC', n)
+ if n == netloc2:
+ return
+ for c in '/?#@:':
+ if c in netloc2:
+ raise ValueError("netloc %r contains invalid characters "
+ "under NFKC normalization"
+ % netloc)
+
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
@@ -193,6 +212,7 @@ def urlsplit(url, scheme='', allow_fragments=True):
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
+ _checknetloc(netloc)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
@@ -216,6 +236,7 @@ def urlsplit(url, scheme='', allow_fragments=True):
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
+ _checknetloc(netloc)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
@@ -362,7 +383,7 @@ def unquote(s):
append(item)
return ''.join(res)
-def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
+def parse_qs(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None):
"""Parse a query given as a string argument.
Arguments:
@@ -379,16 +400,20 @@ def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
+
+ max_num_fields: int. If set, then throws a ValueError if there
+ are more than n fields read by parse_qsl().
"""
dict = {}
- for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
+ for name, value in parse_qsl(qs, keep_blank_values, strict_parsing,
+ max_num_fields):
if name in dict:
dict[name].append(value)
else:
dict[name] = [value]
return dict
-def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
+def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None):
"""Parse a query given as a string argument.
Arguments:
@@ -405,8 +430,19 @@ def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
+ max_num_fields: int. If set, then throws a ValueError if there
+ are more than n fields read by parse_qsl().
+
Returns a list, as G-d intended.
"""
+ # If max_num_fields is defined then check that the number of fields
+ # is less than max_num_fields. This prevents a memory exhaustion DOS
+ # attack via post bodies with many fields.
+ if max_num_fields is not None:
+ num_fields = 1 + qs.count('&') + qs.count(';')
+ if max_num_fields < num_fields:
+ raise ValueError('Max number of fields exceeded')
+
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
diff --git a/lib-python/2.7/uu.py b/lib-python/2.7/uu.py
index f8fa4c4757..8eaea5960d 100755
--- a/lib-python/2.7/uu.py
+++ b/lib-python/2.7/uu.py
@@ -73,6 +73,13 @@ def encode(in_file, out_file, name=None, mode=None):
name = '-'
if mode is None:
mode = 0666
+
+ #
+ # Remove newline chars from name
+ #
+ name = name.replace('\n','\\n')
+ name = name.replace('\r','\\r')
+
#
# Write the data
#
diff --git a/lib-python/2.7/uuid.py b/lib-python/2.7/uuid.py
index ca256a4360..8bd37c89e8 100644
--- a/lib-python/2.7/uuid.py
+++ b/lib-python/2.7/uuid.py
@@ -357,8 +357,9 @@ def _find_mac(command, args, hw_identifiers, get_index):
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
+ keywords = ('hwaddr', 'ether', 'address:', 'lladdr')
for args in ('', '-a', '-av'):
- mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i+1)
+ mac = _find_mac('ifconfig', args, keywords, lambda i: i+1)
if mac:
return mac
@@ -371,7 +372,20 @@ def _arp_getnode():
return None
# Try getting the MAC addr from arp based on our IP address (Solaris).
- return _find_mac('arp', '-an', [ip_addr], lambda i: -1)
+ mac = _find_mac('arp', '-an', [ip_addr], lambda i: -1)
+ if mac:
+ return mac
+
+ # This works on OpenBSD
+ mac = _find_mac('arp', '-an', [ip_addr], lambda i: i+1)
+ if mac:
+ return mac
+
+ # This works on Linux, FreeBSD and NetBSD
+ mac = _find_mac('arp', '-an', ['(%s)' % ip_addr],
+ lambda i: i+2)
+ if mac:
+ return mac
def _lanscan_getnode():
"""Get the hardware address on Unix by running lanscan."""
@@ -423,7 +437,7 @@ def _ipconfig_getnode():
with pipe:
for line in pipe:
value = line.split(':')[-1].strip().lower()
- if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
+ if re.match('(?:[0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]$', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
@@ -531,6 +545,11 @@ def _random_getnode():
_node = None
+_NODE_GETTERS_WIN32 = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
+
+_NODE_GETTERS_UNIX = [_unixdll_getnode, _ifconfig_getnode, _arp_getnode,
+ _lanscan_getnode, _netstat_getnode]
+
def getnode():
"""Get the hardware address as a 48-bit positive integer.
@@ -546,18 +565,19 @@ def getnode():
import sys
if sys.platform == 'win32':
- getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
+ getters = _NODE_GETTERS_WIN32
else:
- getters = [_unixdll_getnode, _ifconfig_getnode, _arp_getnode,
- _lanscan_getnode, _netstat_getnode]
+ getters = _NODE_GETTERS_UNIX
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
- if _node is not None:
+ if (_node is not None) and (0 <= _node < (1 << 48)):
return _node
+ assert False, '_random_getnode() returned invalid value: {}'.format(_node)
+
_last_timestamp = None
diff --git a/lib-python/2.7/warnings.py b/lib-python/2.7/warnings.py
index 5643c428fe..1f6e1b3c18 100644
--- a/lib-python/2.7/warnings.py
+++ b/lib-python/2.7/warnings.py
@@ -85,10 +85,10 @@ def filterwarnings(action, message="", category=Warning, module="", lineno=0,
"category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, basestring), "module must be a string"
- assert isinstance(lineno, int) and lineno >= 0, \
+ assert isinstance(lineno, (int, long)) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, re.compile(message, re.I), category,
- re.compile(module), lineno)
+ re.compile(module), int(lineno))
if append:
filters.append(item)
else:
@@ -106,9 +106,9 @@ def simplefilter(action, category=Warning, lineno=0, append=0):
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
- assert isinstance(lineno, int) and lineno >= 0, \
+ assert isinstance(lineno, (int, long)) and lineno >= 0, \
"lineno must be an int >= 0"
- item = (action, None, category, None, lineno)
+ item = (action, None, category, None, int(lineno))
if append:
filters.append(item)
else:
diff --git a/lib-python/2.7/weakref.py b/lib-python/2.7/weakref.py
index d3f9997194..fa881b3663 100644
--- a/lib-python/2.7/weakref.py
+++ b/lib-python/2.7/weakref.py
@@ -42,6 +42,7 @@ except ImportError:
except KeyError:
pass
+
def _remove_dead_weakref(d, key):
try:
wr = d[key]
@@ -72,7 +73,7 @@ class WeakValueDictionary(UserDict.UserDict):
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
- def remove(wr, selfref=ref(self)):
+ def remove(wr, selfref=ref(self), _atomic_removal=_remove_dead_weakref):
self = selfref()
if self is not None:
if self._iterating:
@@ -80,7 +81,7 @@ class WeakValueDictionary(UserDict.UserDict):
else:
# Atomic removal is necessary since this function
# can be called asynchronously by the GC
- _delitem_if_value_is(self.data, wr.key, wr)
+ _atomic_removal(self.data, wr.key)
self._remove = remove
# A list of keys to be removed
self._pending_removals = []
@@ -97,6 +98,8 @@ class WeakValueDictionary(UserDict.UserDict):
_remove_dead_weakref(d, key)
def __getitem__(self, key):
+ if self._pending_removals:
+ self._commit_removals()
o = self.data[key]()
if o is None:
raise KeyError, key
@@ -109,6 +112,8 @@ class WeakValueDictionary(UserDict.UserDict):
del self.data[key]
def __contains__(self, key):
+ if self._pending_removals:
+ self._commit_removals()
try:
o = self.data[key]()
except KeyError:
@@ -116,6 +121,8 @@ class WeakValueDictionary(UserDict.UserDict):
return o is not None
def has_key(self, key):
+ if self._pending_removals:
+ self._commit_removals()
try:
o = self.data[key]()
except KeyError:
@@ -136,6 +143,8 @@ class WeakValueDictionary(UserDict.UserDict):
self.data.clear()
def copy(self):
+ if self._pending_removals:
+ self._commit_removals()
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
@@ -147,6 +156,8 @@ class WeakValueDictionary(UserDict.UserDict):
def __deepcopy__(self, memo):
from copy import deepcopy
+ if self._pending_removals:
+ self._commit_removals()
new = self.__class__()
for key, wr in self.data.items():
o = wr()
@@ -155,6 +166,8 @@ class WeakValueDictionary(UserDict.UserDict):
return new
def get(self, key, default=None):
+ if self._pending_removals:
+ self._commit_removals()
try:
wr = self.data[key]
except KeyError:
@@ -168,6 +181,8 @@ class WeakValueDictionary(UserDict.UserDict):
return o
def items(self):
+ if self._pending_removals:
+ self._commit_removals()
L = []
for key, wr in self.data.items():
o = wr()
@@ -176,6 +191,8 @@ class WeakValueDictionary(UserDict.UserDict):
return L
def iteritems(self):
+ if self._pending_removals:
+ self._commit_removals()
with _IterationGuard(self):
for wr in self.data.itervalues():
value = wr()
@@ -183,6 +200,8 @@ class WeakValueDictionary(UserDict.UserDict):
yield wr.key, value
def iterkeys(self):
+ if self._pending_removals:
+ self._commit_removals()
with _IterationGuard(self):
for k in self.data.iterkeys():
yield k
@@ -199,11 +218,15 @@ class WeakValueDictionary(UserDict.UserDict):
keep the values around longer than needed.
"""
+ if self._pending_removals:
+ self._commit_removals()
with _IterationGuard(self):
for wr in self.data.itervalues():
yield wr
def itervalues(self):
+ if self._pending_removals:
+ self._commit_removals()
with _IterationGuard(self):
for wr in self.data.itervalues():
obj = wr()
@@ -235,13 +258,13 @@ class WeakValueDictionary(UserDict.UserDict):
return o
def setdefault(self, key, default=None):
+ if self._pending_removals:
+ self._commit_removals()
try:
o = self.data[key]()
except KeyError:
o = None
if o is None:
- if self._pending_removals:
- self._commit_removals()
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
@@ -277,9 +300,13 @@ class WeakValueDictionary(UserDict.UserDict):
keep the values around longer than needed.
"""
+ if self._pending_removals:
+ self._commit_removals()
return self.data.values()
def values(self):
+ if self._pending_removals:
+ self._commit_removals()
L = []
for wr in self.data.values():
o = wr()
diff --git a/lib-python/2.7/webbrowser.py b/lib-python/2.7/webbrowser.py
index 23891796e3..15eeb660e2 100755
--- a/lib-python/2.7/webbrowser.py
+++ b/lib-python/2.7/webbrowser.py
@@ -319,11 +319,10 @@ Chromium = Chrome
class Opera(UnixBrowser):
"Launcher class for Opera browser."
- raise_opts = ["-noraise", ""]
- remote_args = ['-remote', 'openURL(%s%action)']
+ remote_args = ['%action', '%s']
remote_action = ""
- remote_action_newwin = ",new-window"
- remote_action_newtab = ",new-page"
+ remote_action_newwin = "--new-window"
+ remote_action_newtab = ""
background = True
diff --git a/lib-python/2.7/xml/dom/domreg.py b/lib-python/2.7/xml/dom/domreg.py
index ec3acdf9c1..0835280106 100644
--- a/lib-python/2.7/xml/dom/domreg.py
+++ b/lib-python/2.7/xml/dom/domreg.py
@@ -8,6 +8,8 @@ from xml.dom.minicompat import * # isinstance, StringTypes
# should be published by posting to xml-sig@python.org, and are
# subsequently recorded in this file.
+import sys
+
well_known_implementations = {
'minidom':'xml.dom.minidom',
'4DOM': 'xml.dom.DOMImplementation',
@@ -57,7 +59,7 @@ def getDOMImplementation(name = None, features = ()):
return mod.getDOMImplementation()
elif name:
return registered[name]()
- elif "PYTHON_DOM" in os.environ:
+ elif not sys.flags.ignore_environment and "PYTHON_DOM" in os.environ:
return getDOMImplementation(name = os.environ["PYTHON_DOM"])
# User did not specify a name, try implementations in arbitrary
diff --git a/lib-python/2.7/xml/dom/minidom.py b/lib-python/2.7/xml/dom/minidom.py
index c30e2462ee..05649d620f 100644
--- a/lib-python/2.7/xml/dom/minidom.py
+++ b/lib-python/2.7/xml/dom/minidom.py
@@ -1273,7 +1273,7 @@ class DocumentType(Identified, Childless, Node):
entity.encoding = e.encoding
entity.version = e.version
clone.entities._seq.append(entity)
- e._call_user_data_handler(operation, n, entity)
+ e._call_user_data_handler(operation, e, entity)
self._call_user_data_handler(operation, self, clone)
return clone
else:
@@ -1876,7 +1876,7 @@ def _clone_node(node, deep, newOwnerDocument):
entity.ownerDocument = newOwnerDocument
clone.entities._seq.append(entity)
if hasattr(e, '_call_user_data_handler'):
- e._call_user_data_handler(operation, n, entity)
+ e._call_user_data_handler(operation, e, entity)
else:
# Note the cloning of Document and DocumentType nodes is
# implementation specific. minidom handles those cases
diff --git a/lib-python/2.7/xml/etree/ElementTree.py b/lib-python/2.7/xml/etree/ElementTree.py
index 7a1ec560dc..dca69106d1 100644
--- a/lib-python/2.7/xml/etree/ElementTree.py
+++ b/lib-python/2.7/xml/etree/ElementTree.py
@@ -1450,6 +1450,8 @@ class TreeBuilder(object):
self._tail = 1
return self._last
+_sentinel = ['sentinel']
+
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
@@ -1465,7 +1467,11 @@ class TreeBuilder(object):
class XMLParser(object):
- def __init__(self, html=0, target=None, encoding=None):
+ def __init__(self, html=_sentinel, target=None, encoding=None):
+ if html is not _sentinel:
+ warnings.warnpy3k(
+ "The html argument of XMLParser() is deprecated",
+ DeprecationWarning, stacklevel=2)
try:
from xml.parsers import expat
except ImportError:
@@ -1617,17 +1623,7 @@ class XMLParser(object):
pubid = pubid[1:-1]
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
- elif 1: # XXX PyPy fix, used to be
- # elif self.doctype is not self._XMLParser__doctype:
- # but that condition is always True on CPython, as far
- # as I can tell: self._XMLParser__doctype always
- # returns a fresh unbound method object.
- # On PyPy, unbound and bound methods have stronger
- # unicity guarantees: self._XMLParser__doctype
- # can return the same unbound method object, in
- # some cases making the test above incorrectly False.
- # (My guess would be that the line above is a backport
- # from Python 3.)
+ elif self.doctype != self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
diff --git a/lib-python/2.7/xml/sax/__init__.py b/lib-python/2.7/xml/sax/__init__.py
index 005b66e38a..d2e33913c0 100644
--- a/lib-python/2.7/xml/sax/__init__.py
+++ b/lib-python/2.7/xml/sax/__init__.py
@@ -59,7 +59,7 @@ if _false:
import xml.sax.expatreader
import os, sys
-if "PY_SAX_PARSER" in os.environ:
+if not sys.flags.ignore_environment and "PY_SAX_PARSER" in os.environ:
default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
diff --git a/lib-python/2.7/xml/sax/expatreader.py b/lib-python/2.7/xml/sax/expatreader.py
index 21c9db91e9..bae663bdd9 100644
--- a/lib-python/2.7/xml/sax/expatreader.py
+++ b/lib-python/2.7/xml/sax/expatreader.py
@@ -105,9 +105,16 @@ class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
source = saxutils.prepare_input_source(source)
self._source = source
- self.reset()
- self._cont_handler.setDocumentLocator(ExpatLocator(self))
- xmlreader.IncrementalParser.parse(self, source)
+ try:
+ self.reset()
+ self._cont_handler.setDocumentLocator(ExpatLocator(self))
+ xmlreader.IncrementalParser.parse(self, source)
+ except:
+ # bpo-30264: Close the source on error to not leak resources:
+ # xml.sax.parse() doesn't give access to the underlying parser
+ # to the caller
+ self._close_source()
+ raise
def prepareParser(self, source):
if source.getSystemId() is not None:
@@ -216,6 +223,17 @@ class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
# FIXME: when to invoke error()?
self._err_handler.fatalError(exc)
+ def _close_source(self):
+ source = self._source
+ try:
+ file = source.getCharacterStream()
+ if file is not None:
+ file.close()
+ finally:
+ file = source.getByteStream()
+ if file is not None:
+ file.close()
+
def close(self):
if (self._entity_stack or self._parser is None or
isinstance(self._parser, _ClosedParser)):
@@ -235,6 +253,7 @@ class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
parser.ErrorColumnNumber = self._parser.ErrorColumnNumber
parser.ErrorLineNumber = self._parser.ErrorLineNumber
self._parser = parser
+ self._close_source()
def _reset_cont_handler(self):
self._parser.ProcessingInstructionHandler = \
diff --git a/lib-python/2.7/zipfile.py b/lib-python/2.7/zipfile.py
index a16d86038d..87b1fe1d7d 100644
--- a/lib-python/2.7/zipfile.py
+++ b/lib-python/2.7/zipfile.py
@@ -131,6 +131,29 @@ _CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
+_DD_SIGNATURE = 0x08074b50
+
+_EXTRA_FIELD_STRUCT = struct.Struct('<HH')
+
+def _strip_extra(extra, xids):
+ # Remove Extra Fields with specified IDs.
+ unpack = _EXTRA_FIELD_STRUCT.unpack
+ modified = False
+ buffer = []
+ start = i = 0
+ while i + 4 <= len(extra):
+ xid, xlen = unpack(extra[i : i + 4])
+ j = i + 4 + xlen
+ if xid in xids:
+ if i != start:
+ buffer.append(extra[start : i])
+ start = j
+ modified = True
+ i = j
+ if not modified:
+ return extra
+ return b''.join(buffer)
+
def _check_zipfile(fp):
try:
if _EndRecData(fp):
@@ -776,7 +799,6 @@ class ZipFile(object):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
- self._start_disk = self.fp.tell()
elif key == 'a':
try:
# See if file is a zip file
@@ -790,7 +812,6 @@ class ZipFile(object):
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
- self._start_disk = self.fp.tell()
else:
raise RuntimeError('Mode must be "r", "w" or "a"')
except:
@@ -821,18 +842,17 @@ class ZipFile(object):
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
- # self._start_disk: Position of the start of ZIP archive
- # It is zero, unless ZIP was concatenated to another file
- self._start_disk = endrec[_ECD_LOCATION] - size_cd - offset_cd
+ # "concat" is zero, unless zip was concatenated to another file
+ concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
- self._start_disk -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
+ concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
- inferred = self._start_disk + offset_cd
- print "given, inferred, offset", offset_cd, inferred, self._start_disk
+ inferred = concat + offset_cd
+ print "given, inferred, offset", offset_cd, inferred, concat
# self.start_dir: Position of start of central directory
- self.start_dir = offset_cd + self._start_disk
+ self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = cStringIO.StringIO(data)
@@ -862,7 +882,7 @@ class ZipFile(object):
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
- x.header_offset = x.header_offset + self._start_disk
+ x.header_offset = x.header_offset + concat
x.filename = x._decodeFilename()
self.filelist.append(x)
self.NameToInfo[x.filename] = x
@@ -1206,7 +1226,7 @@ class ZipFile(object):
raise RuntimeError('Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
- position = self.fp.tell() # Preserve current position in file
+ position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset, 0)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(position, 0)
@@ -1257,9 +1277,9 @@ class ZipFile(object):
self.fp.write(bytes)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
- fmt = '<LQQ' if zip64 else '<LLL'
- self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
- zinfo.file_size))
+ fmt = '<LLQQ' if zip64 else '<LLLL'
+ self.fp.write(struct.pack(fmt, _DD_SIGNATURE, zinfo.CRC,
+ zinfo.compress_size, zinfo.file_size))
self.fp.flush()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
@@ -1292,14 +1312,16 @@ class ZipFile(object):
file_size = zinfo.file_size
compress_size = zinfo.compress_size
- header_offset = zinfo.header_offset - self._start_disk
- if header_offset > ZIP64_LIMIT:
- extra.append(header_offset)
+ if zinfo.header_offset > ZIP64_LIMIT:
+ extra.append(zinfo.header_offset)
header_offset = 0xffffffffL
+ else:
+ header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
# Append a ZIP64 field to the extra's
+ extra_data = _strip_extra(extra_data, (1,))
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
@@ -1339,7 +1361,7 @@ class ZipFile(object):
# Write end-of-zip-archive record
centDirCount = len(self.filelist)
centDirSize = pos2 - pos1
- centDirOffset = pos1 - self._start_disk
+ centDirOffset = pos1
requires_zip64 = None
if centDirCount > ZIP_FILECOUNT_LIMIT:
requires_zip64 = "Files count"
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
index f5d5da98ae..3fdcd35a9f 100644
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -115,6 +115,7 @@ testmap = [
RegrTest('test_augassign.py', core=True),
RegrTest('test_base64.py', usemodules='struct'),
RegrTest('test_bastion.py'),
+ RegrTest('test_bdb.py', usemodules='bdb'),
RegrTest('test_bigaddrspace.py'),
RegrTest('test_bigmem.py'),
RegrTest('test_binascii.py', usemodules='binascii'),
@@ -271,6 +272,7 @@ testmap = [
RegrTest('test_import.py', core=True),
RegrTest('test_importhooks.py', core=True),
RegrTest('test_importlib.py'),
+ RegrTest('test_import_magic.py'),
RegrTest('test_index.py'),
RegrTest('test_inspect.py'),
RegrTest('test_int.py', core=True),
@@ -369,6 +371,7 @@ testmap = [
RegrTest('test_random.py'),
RegrTest('test_re.py', core=True),
RegrTest('test_readline.py'),
+ RegrTest('test_regrtest.py'),
RegrTest('test_repr.py', core=True),
RegrTest('test_resource.py'),
RegrTest('test_rfc822.py'),
@@ -429,6 +432,7 @@ testmap = [
RegrTest('test_tcl.py'),
RegrTest('test_telnetlib.py'),
RegrTest('test_tempfile.py'),
+ RegrTest('test_test_support.py'),
RegrTest('test_textwrap.py'),
RegrTest('test_thread.py', usemodules="thread", core=True),
RegrTest('test_threaded_import.py', usemodules="thread", core=True),
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
index 2f50aae344..19d1143feb 100644
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -5,6 +5,16 @@ Process for upgrading the stdlib to a new cpython version
overly detailed
+The idea is to
+- exactly copy cpython's stdlib to a "vendor" branch on top of the previous
+ version
+- clean up the hg history for moved files so the merge from pypy will work
+- branch off that with a new integration branch
+- merge default or py3 into that branch, which will update with all the
+ modifications pypy made to the stdlib
+
+And in more detail:
+
0. make sure your working dir is clean
1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k)
or create branch vendor/stdlib-3-*
@@ -13,7 +23,7 @@ Process for upgrading the stdlib to a new cpython version
2b. copy the files from the cpython repo
2c. hg add lib-python/2.7/ or lib-python/3/
2d. hg remove --after
- 2e. show copied files in cpython repo by running `hg diff --git -r v<old> -r v<new> Lib | grep '^copy \(from\|to\)'`
+ 2e. show copied files in cpython repo by running `hg diff --git -r v<old> -r v<new> Lib | grep '^copy \(from\|to\)'` or `git diff --compact-summary v<old>..v<new> Lib` and search for `=>`
2f. fix copies / renames manually by running `hg copy --after <from> <to>` for each copied file
3. update stdlib-version.txt with the output of hg -id from the cpython repo
4. commit
diff --git a/lib-python/stdlib-version.txt b/lib-python/stdlib-version.txt
index 4163009081..64c64a077b 100644
--- a/lib-python/stdlib-version.txt
+++ b/lib-python/stdlib-version.txt
@@ -1,9 +1,12 @@
in here there are copies of the Lib/ directory of the CPython HG repository
at http://hg.python.org/cpython/
-the outputs for hg id of each are:
+2.7 is using Gentoo fork of CPython 2.7 git repository with additional
+security fixes at https://gitweb.gentoo.org/fork/cpython.git
+
+the outputs for hg id or `git describe HEAD` of each are:
2.7::
- a06454b1afa1 (2.7) v2.7.13
+ v2.7.18-3-g138e2caeb4
3::
cef745775b65 (3.2) v3.2.5
diff --git a/lib_pypy/_cffi_ssl/_stdssl/__init__.py b/lib_pypy/_cffi_ssl/_stdssl/__init__.py
index 54cad963c0..c444fc3b66 100644
--- a/lib_pypy/_cffi_ssl/_stdssl/__init__.py
+++ b/lib_pypy/_cffi_ssl/_stdssl/__init__.py
@@ -744,7 +744,7 @@ def _fs_decode(name):
return name.decode(sys.getfilesystemencoding())
def _fs_converter(name):
""" name must not be None """
- if isinstance(name, str):
+ if isinstance(name, unicode):
return name.encode(sys.getfilesystemencoding())
return bytes(name)
@@ -908,7 +908,7 @@ class _SSLContext(object):
# Minimal security flags for server and client side context.
# Client sockets ignore server-side parameters.
options |= lib.SSL_OP_NO_COMPRESSION
- # options |= lib.SSL_OP_CIPHER_SERVER_PREFERENCE
+ options |= lib.SSL_OP_CIPHER_SERVER_PREFERENCE
options |= lib.SSL_OP_SINGLE_DH_USE
options |= lib.SSL_OP_SINGLE_ECDH_USE
lib.SSL_CTX_set_options(self.ctx, options)
diff --git a/lib_pypy/_cffi_ssl/_stdssl/certificate.py b/lib_pypy/_cffi_ssl/_stdssl/certificate.py
index 6b49e88b14..fe660bde81 100644
--- a/lib_pypy/_cffi_ssl/_stdssl/certificate.py
+++ b/lib_pypy/_cffi_ssl/_stdssl/certificate.py
@@ -265,6 +265,9 @@ def _get_crl_dp(certificate):
count = lib.sk_DIST_POINT_num(dps)
for i in range(count):
dp = lib.sk_DIST_POINT_value(dps, i);
+ if not dp.distpoint:
+ # Ignore empty DP value, CVE-2019-5010
+ continue
gns = dp.distpoint.name.fullname;
jcount = lib.sk_GENERAL_NAME_num(gns)
diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py
index e972a83ba0..df64e3509a 100644
--- a/lib_pypy/_ctypes/array.py
+++ b/lib_pypy/_ctypes/array.py
@@ -109,19 +109,22 @@ class ArrayMeta(_CDataMeta):
# array accepts very strange parameters as part of structure
# or function argument...
from ctypes import c_char, c_wchar
- if issubclass(self._type_, (c_char, c_wchar)):
- if isinstance(value, basestring):
- if len(value) > self._length_:
- raise ValueError("Invalid length")
- value = self(*value)
- elif not isinstance(value, self):
- raise TypeError("expected string or Unicode object, %s found"
- % (value.__class__.__name__,))
- else:
- if isinstance(value, tuple):
- if len(value) > self._length_:
- raise RuntimeError("Invalid length")
- value = self(*value)
+ if isinstance(value, self):
+ return value
+ if hasattr(self, '_type_'):
+ if issubclass(self._type_, (c_char, c_wchar)):
+ if isinstance(value, basestring):
+ if len(value) > self._length_:
+ raise ValueError("Invalid length")
+ value = self(*value)
+ elif not isinstance(value, self):
+ raise TypeError(
+ "expected string or Unicode object, %s found"
+ % (value.__class__.__name__,))
+ if isinstance(value, tuple):
+ if len(value) > self._length_:
+ raise RuntimeError("Invalid length")
+ value = self(*value)
return _CDataMeta.from_param(self, value)
def _build_ffiargtype(self):
diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py
index d8f91caa29..c0f49b157d 100644
--- a/lib_pypy/_ctypes/basics.py
+++ b/lib_pypy/_ctypes/basics.py
@@ -46,6 +46,9 @@ class COMError(Exception):
self.details = details
class _CDataMeta(type):
+ def _is_abstract(self):
+ return getattr(self, '_type_', 'abstract') == 'abstract'
+
def from_param(self, value):
if isinstance(value, self):
return value
@@ -96,6 +99,8 @@ class _CDataMeta(type):
return self.from_address(dll.__pypy_dll__.getaddressindll(name))
def from_buffer(self, obj, offset=0):
+ if self._is_abstract():
+ raise TypeError('abstract class')
size = self._sizeofinstances()
if isinstance(obj, (str, unicode)):
# hack, buffer(str) will always return a readonly buffer.
@@ -122,6 +127,8 @@ class _CDataMeta(type):
return result
def from_buffer_copy(self, obj, offset=0):
+ if self._is_abstract():
+ raise TypeError('abstract class')
size = self._sizeofinstances()
buf = buffer(obj, offset, size)
if len(buf) < size:
diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py
index 0894788d81..f865f8870f 100644
--- a/lib_pypy/_ctypes/pointer.py
+++ b/lib_pypy/_ctypes/pointer.py
@@ -40,14 +40,17 @@ class PointerType(_CDataMeta):
def from_param(self, value):
if value is None:
return self(None)
- # If we expect POINTER(<type>), but receive a <type> instance, accept
- # it by calling byref(<type>).
- if isinstance(value, self._type_):
- return byref(value)
- # Array instances are also pointers when the item types are the same.
- if isinstance(value, (_Pointer, Array)):
- if issubclass(type(value)._type_, self._type_):
- return value
+ if isinstance(value, self):
+ return value
+ if hasattr(self, '_type_'):
+ # If we expect POINTER(<type>), but receive a <type> instance, accept
+ # it by calling byref(<type>).
+ if isinstance(value, self._type_):
+ return byref(value)
+ # Array instances are also pointers when the item types are the same.
+ if isinstance(value, (_Pointer, Array)):
+ if issubclass(type(value)._type_, self._type_):
+ return value
return _CDataMeta.from_param(self, value)
def _sizeofinstances(self):
@@ -60,6 +63,8 @@ class PointerType(_CDataMeta):
return True
def set_type(self, TP):
+ if self._is_abstract():
+ raise TypeError('abstract class')
ffiarray = _rawffi.Array('P')
def __init__(self, value=None):
if not hasattr(self, '_buffer'):
@@ -179,6 +184,7 @@ def POINTER(cls):
klass = type(_Pointer)("LP_%s" % cls,
(_Pointer,),
{})
+ klass._type_ = 'P'
_pointer_type_cache[id(klass)] = klass
return klass
else:
diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py
index 84e7f4364c..54e3ecd2a7 100644
--- a/lib_pypy/_ctypes/primitive.py
+++ b/lib_pypy/_ctypes/primitive.py
@@ -146,6 +146,14 @@ FROM_PARAM_BY_TYPE = {
'P': from_param_void_p,
}
+CTYPES_TO_PEP3118_TABLE = {
+ 'i': {2: 'h', 4: 'i', 8: 'q'},
+ 'I': {2: 'H', 4: 'I', 8: 'Q'},
+ 'l': {4: 'l', 8: 'q'},
+ 'L': {4: 'L', 8: 'Q'},
+ '?': {1: '?', 2: 'h', 4: 'l', 8: 'q'},
+}
+
class SimpleType(_CDataMeta):
def __new__(self, name, bases, dct):
try:
@@ -157,6 +165,8 @@ class SimpleType(_CDataMeta):
break
else:
raise AttributeError("cannot find _type_ attribute")
+ if tp == 'abstract':
+ tp = 'i'
if (not isinstance(tp, str) or
not len(tp) == 1 or
tp not in SIMPLE_TYPE_CHARS):
@@ -168,7 +178,11 @@ class SimpleType(_CDataMeta):
result._ffishape_ = tp
result._fficompositesize_ = None
result._ffiarray = ffiarray
- result._format = byteorder[sys.byteorder] + tp
+ if tp in CTYPES_TO_PEP3118_TABLE:
+ pep_code = CTYPES_TO_PEP3118_TABLE[tp][_rawffi.sizeof(tp)]
+ else:
+ pep_code = tp
+ result._format = byteorder[sys.byteorder] + pep_code
if tp == 'z':
# c_char_p
def _getvalue(self):
@@ -328,7 +342,7 @@ class SimpleType(_CDataMeta):
result.__ctype_be__ = result
swapped.__ctype_be__ = result
swapped.__ctype_le__ = swapped
- swapped._format = '<' + tp
+ swapped._format = '<' + pep_code
else:
name += '_be'
swapped = self.__new__(self, name, bases, dct)
@@ -336,7 +350,7 @@ class SimpleType(_CDataMeta):
result.__ctype_le__ = result
swapped.__ctype_le__ = result
swapped.__ctype_be__ = swapped
- swapped._format = '>' + tp
+ swapped._format = '>' + pep_code
from _ctypes import sizeof
def _getval(self):
return swap_bytes(self._buffer[0], sizeof(self), name, 'get')
@@ -353,7 +367,8 @@ class SimpleType(_CDataMeta):
def from_param(self, value):
if isinstance(value, self):
return value
-
+ if self._type_ == 'abstract':
+ raise TypeError('abstract class')
from_param_f = FROM_PARAM_BY_TYPE.get(self._type_)
if from_param_f:
res = from_param_f(self, value)
@@ -387,7 +402,7 @@ class SimpleType(_CDataMeta):
class _SimpleCData(_CData):
__metaclass__ = SimpleType
- _type_ = 'i'
+ _type_ = 'abstract'
def __init__(self, value=DEFAULT_VALUE):
if not hasattr(self, '_buffer'):
diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py
index 5324717c33..cc0c373221 100644
--- a/lib_pypy/_ctypes/structure.py
+++ b/lib_pypy/_ctypes/structure.py
@@ -119,6 +119,8 @@ class Field(object):
if self.is_bitfield:
# bitfield member, use direct access
return obj._buffer.__getattr__(self.name)
+ elif not isinstance(obj, _CData):
+ raise(TypeError, 'not a ctype instance')
else:
fieldtype = self.ctype
offset = self.num
@@ -142,6 +144,8 @@ class Field(object):
from ctypes import memmove
dest = obj._buffer.fieldaddress(self.name)
memmove(dest, arg, fieldtype._fficompositesize_)
+ elif not isinstance(obj, _CData):
+ raise(TypeError, 'not a ctype instance')
else:
obj._buffer.__setattr__(self.name, arg)
@@ -209,6 +213,9 @@ class StructOrUnionMeta(_CDataMeta):
__setattr__ = struct_setattr
+ def _is_abstract(self):
+ return False
+
def from_address(self, address):
instance = StructOrUnion.__new__(self)
if isinstance(address, _rawffi.StructureInstance):
@@ -317,7 +324,9 @@ class StructOrUnion(_CData):
memmove(addr, origin, self._fficompositesize_)
def _to_ffi_param(self):
- return self._buffer
+ newparam = StructOrUnion.__new__(type(self))
+ self._copy_to(newparam._buffer.buffer)
+ return newparam._buffer
def __buffer__(self, flags):
fmt = type(self)._getformat()
diff --git a/lib_pypy/_ctypes_test.c b/lib_pypy/_ctypes_test.c
index 0815aba391..f38292b460 100644
--- a/lib_pypy/_ctypes_test.c
+++ b/lib_pypy/_ctypes_test.c
@@ -54,6 +54,19 @@ _testfunc_cbk_large_struct(Test in, void (*func)(Test))
func(in);
}
+/*
+ * See issue 29565. Update a structure passed by value;
+ * the caller should not see any change.
+ */
+
+EXPORT(void)
+_testfunc_large_struct_update_value(Test in)
+{
+ in.first = 0x0badf00d;
+ in.second = 0x0badf00d;
+ in.third = 0x0badf00d;
+}
+
EXPORT(void)testfunc_array(int values[4])
{
printf("testfunc_array %d %d %d %d\n",
@@ -645,6 +658,200 @@ EXPORT(void) TwoOutArgs(int a, int *pi, int b, int *pj)
}
#ifdef MS_WIN32
+
+typedef struct {
+ char f1;
+} Size1;
+
+typedef struct {
+ char f1;
+ char f2;
+} Size2;
+
+typedef struct {
+ char f1;
+ char f2;
+ char f3;
+} Size3;
+
+typedef struct {
+ char f1;
+ char f2;
+ char f3;
+ char f4;
+} Size4;
+
+typedef struct {
+ char f1;
+ char f2;
+ char f3;
+ char f4;
+ char f5;
+} Size5;
+
+typedef struct {
+ char f1;
+ char f2;
+ char f3;
+ char f4;
+ char f5;
+ char f6;
+} Size6;
+
+typedef struct {
+ char f1;
+ char f2;
+ char f3;
+ char f4;
+ char f5;
+ char f6;
+ char f7;
+} Size7;
+
+typedef struct {
+ char f1;
+ char f2;
+ char f3;
+ char f4;
+ char f5;
+ char f6;
+ char f7;
+ char f8;
+} Size8;
+
+typedef struct {
+ char f1;
+ char f2;
+ char f3;
+ char f4;
+ char f5;
+ char f6;
+ char f7;
+ char f8;
+ char f9;
+} Size9;
+
+typedef struct {
+ char f1;
+ char f2;
+ char f3;
+ char f4;
+ char f5;
+ char f6;
+ char f7;
+ char f8;
+ char f9;
+ char f10;
+} Size10;
+
+EXPORT(Size1) TestSize1() {
+ Size1 f;
+ f.f1 = 'a';
+ return f;
+}
+
+EXPORT(Size2) TestSize2() {
+ Size2 f;
+ f.f1 = 'a';
+ f.f2 = 'b';
+ return f;
+}
+
+EXPORT(Size3) TestSize3() {
+ Size3 f;
+ f.f1 = 'a';
+ f.f2 = 'b';
+ f.f3 = 'c';
+ return f;
+}
+
+EXPORT(Size4) TestSize4() {
+ Size4 f;
+ f.f1 = 'a';
+ f.f2 = 'b';
+ f.f3 = 'c';
+ f.f4 = 'd';
+ return f;
+}
+
+EXPORT(Size5) TestSize5() {
+ Size5 f;
+ f.f1 = 'a';
+ f.f2 = 'b';
+ f.f3 = 'c';
+ f.f4 = 'd';
+ f.f5 = 'e';
+ return f;
+}
+
+EXPORT(Size6) TestSize6() {
+ Size6 f;
+ f.f1 = 'a';
+ f.f2 = 'b';
+ f.f3 = 'c';
+ f.f4 = 'd';
+ f.f5 = 'e';
+ f.f6 = 'f';
+ return f;
+}
+
+EXPORT(Size7) TestSize7() {
+ Size7 f;
+ f.f1 = 'a';
+ f.f2 = 'b';
+ f.f3 = 'c';
+ f.f4 = 'd';
+ f.f5 = 'e';
+ f.f6 = 'f';
+ f.f7 = 'g';
+ return f;
+}
+
+EXPORT(Size8) TestSize8() {
+ Size8 f;
+ f.f1 = 'a';
+ f.f2 = 'b';
+ f.f3 = 'c';
+ f.f4 = 'd';
+ f.f5 = 'e';
+ f.f6 = 'f';
+ f.f7 = 'g';
+ f.f8 = 'h';
+ return f;
+}
+
+EXPORT(Size9) TestSize9() {
+ Size9 f;
+ f.f1 = 'a';
+ f.f2 = 'b';
+ f.f3 = 'c';
+ f.f4 = 'd';
+ f.f5 = 'e';
+ f.f6 = 'f';
+ f.f7 = 'g';
+ f.f8 = 'h';
+ f.f9 = 'i';
+ return f;
+}
+
+EXPORT(Size10) TestSize10() {
+ Size10 f;
+ f.f1 = 'a';
+ f.f2 = 'b';
+ f.f3 = 'c';
+ f.f4 = 'd';
+ f.f5 = 'e';
+ f.f6 = 'f';
+ f.f7 = 'g';
+ f.f8 = 'h';
+ f.f9 = 'i';
+ f.f10 = 'j';
+ return f;
+}
+
+#endif
+
+#ifdef MS_WIN32
EXPORT(S2H) __stdcall s_ret_2h_func(S2H inp) { return ret_2h_func(inp); }
EXPORT(S8I) __stdcall s_ret_8i_func(S8I inp) { return ret_8i_func(inp); }
#endif
diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
index 61eba030e6..a0e6d8e1b8 100644
--- a/lib_pypy/_sqlite3.py
+++ b/lib_pypy/_sqlite3.py
@@ -705,6 +705,8 @@ class Cursor(object):
self.__initialized = True
def close(self):
+ if not self.__initialized:
+ raise ProgrammingError("Base Cursor.__init__ not called.")
self.__connection._check_thread()
self.__connection._check_closed()
if self.__statement:
@@ -988,6 +990,7 @@ class Cursor(object):
return list(self)
def __get_connection(self):
+ self.__check_cursor()
return self.__connection
connection = property(__get_connection)
diff --git a/lib_pypy/_testcapimodule.c b/lib_pypy/_testcapimodule.c
index 40d3b777fa..b33f97878c 100644
--- a/lib_pypy/_testcapimodule.c
+++ b/lib_pypy/_testcapimodule.c
@@ -1,6 +1,5 @@
-/* Verbatim copy of Modules/_testcapimodule.c from CPython 2.7.12 w/
+/* Verbatim copy of Modules/_testcapimodule.c from CPython 2.7.18 w/
* parts disabled that rely on the not yet supported:
- * - PyBuffer_To/FromContiguous
* - PyThread_exit_thread
* - PyMarshal_*
* (via the PYPY_NOT_SUPPORTED define)
@@ -21,6 +20,14 @@
#ifndef PYPY_NOT_SUPPORTED
#include "marshal.h"
#endif
+#include <signal.h>
+#ifdef MS_WINDOWS
+# include <crtdbg.h>
+#endif
+
+#ifdef HAVE_SYS_WAIT_H
+#include <sys/wait.h> /* For W_STOPCODE */
+#endif
#ifdef WITH_THREAD
#include "pythread.h"
@@ -190,8 +197,7 @@ test_dict_iteration(PyObject* self)
* PyType_Ready if it hasn't already been called
*/
static PyTypeObject _HashInheritanceTester_Type = {
- PyObject_HEAD_INIT(NULL)
- 0, /* Number of items for varobject */
+ PyVarObject_HEAD_INIT(NULL, 0)
"hashinheritancetester", /* Name of this type */
sizeof(PyObject), /* Basic object size */
0, /* Item size for varobject */
@@ -318,8 +324,7 @@ static PyBufferProcs memoryviewtester_as_buffer = {
};
static PyTypeObject _MemoryViewTester_Type = {
- PyObject_HEAD_INIT(NULL)
- 0, /* Number of items for varobject */
+ PyVarObject_HEAD_INIT(NULL, 0)
"memoryviewtester", /* Name of this type */
sizeof(PyObject), /* Basic object size */
0, /* Item size for varobject */
@@ -388,7 +393,6 @@ test_broken_memoryview(PyObject* self)
Py_RETURN_NONE;
}
-#ifndef PYPY_NOT_SUPPORTED
static PyObject *
test_to_contiguous(PyObject* self, PyObject *noargs)
{
@@ -487,7 +491,6 @@ test_from_contiguous(PyObject* self, PyObject *noargs)
Py_RETURN_NONE;
}
-#endif /* ifndef PYPY_NOT_SUPPORTED */
/* Tests of PyLong_{As, From}{Unsigned,}Long(), and (#ifdef HAVE_LONG_LONG)
@@ -895,6 +898,26 @@ test_long_long_and_overflow(PyObject *self)
return Py_None;
}
+static PyObject *
+test_long_as_unsigned_long_long_mask(PyObject *self)
+{
+ unsigned PY_LONG_LONG res = PyLong_AsUnsignedLongLongMask(NULL);
+
+ if (res != (unsigned PY_LONG_LONG)-1 || !PyErr_Occurred()) {
+ return raiseTestError("test_long_as_unsigned_long_long_mask",
+ "PyLong_AsUnsignedLongLongMask(NULL) didn't "
+ "complain");
+ }
+ if (!PyErr_ExceptionMatches(PyExc_SystemError)) {
+ return raiseTestError("test_long_as_unsigned_long_long_mask",
+ "PyLong_AsUnsignedLongLongMask(NULL) raised "
+ "something other than SystemError");
+ }
+ PyErr_Clear();
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
/* Test the L code for PyArg_ParseTuple. This should deliver a PY_LONG_LONG
for both long and int arguments. The test may leak a little memory if
it fails.
@@ -916,8 +939,9 @@ test_L_code(PyObject *self)
PyTuple_SET_ITEM(tuple, 0, num);
value = -1;
- if (PyArg_ParseTuple(tuple, "L:test_L_code", &value) < 0)
+ if (!PyArg_ParseTuple(tuple, "L:test_L_code", &value)) {
return NULL;
+ }
if (value != 42)
return raiseTestError("test_L_code",
"L code returned wrong value for long 42");
@@ -930,8 +954,9 @@ test_L_code(PyObject *self)
PyTuple_SET_ITEM(tuple, 0, num);
value = -1;
- if (PyArg_ParseTuple(tuple, "L:test_L_code", &value) < 0)
+ if (!PyArg_ParseTuple(tuple, "L:test_L_code", &value)) {
return NULL;
+ }
if (value != 42)
return raiseTestError("test_L_code",
"L code returned wrong value for int 42");
@@ -1212,8 +1237,9 @@ test_k_code(PyObject *self)
PyTuple_SET_ITEM(tuple, 0, num);
value = 0;
- if (PyArg_ParseTuple(tuple, "k:test_k_code", &value) < 0)
+ if (!PyArg_ParseTuple(tuple, "k:test_k_code", &value)) {
return NULL;
+ }
if (value != ULONG_MAX)
return raiseTestError("test_k_code",
"k code returned wrong value for long 0xFFF...FFF");
@@ -1231,8 +1257,9 @@ test_k_code(PyObject *self)
PyTuple_SET_ITEM(tuple, 0, num);
value = 0;
- if (PyArg_ParseTuple(tuple, "k:test_k_code", &value) < 0)
+ if (!PyArg_ParseTuple(tuple, "k:test_k_code", &value)) {
return NULL;
+ }
if (value != (unsigned long)-0x42)
return raiseTestError("test_k_code",
"k code returned wrong value for long -0xFFF..000042");
@@ -1562,6 +1589,89 @@ getargs_et_hash(PyObject *self, PyObject *args)
return result;
}
+static PyObject *
+get_indices(PyObject *self, PyObject *args)
+{
+ int result;
+ PySliceObject *slice;
+ Py_ssize_t length, start, stop, step;
+
+ if (!PyArg_ParseTuple(args, "On", &slice, &length))
+ return NULL;
+
+ result = PySlice_GetIndices(slice, length, &start, &stop, &step);
+
+ if (PyErr_Occurred()) {
+ assert(result == -1);
+ return NULL;
+ }
+
+ if (result == -1) {
+ Py_RETURN_NONE;
+ }
+ return Py_BuildValue("innn", result, start, stop, step);
+}
+
+static PyObject *
+parse_tuple_and_keywords(PyObject *self, PyObject *args)
+{
+ PyObject *sub_args;
+ PyObject *sub_kwargs;
+ const char *sub_format;
+ PyObject *sub_keywords;
+
+ Py_ssize_t i, size;
+ char *keywords[8 + 1]; /* space for NULL at end */
+ PyObject *o;
+
+ int result;
+ PyObject *return_value = NULL;
+
+ double buffers[8][4]; /* double ensures alignment where necessary */
+
+ if (!PyArg_ParseTuple(args, "OOsO:parse_tuple_and_keywords",
+ &sub_args, &sub_kwargs,
+ &sub_format, &sub_keywords))
+ return NULL;
+
+ if (!(PyList_CheckExact(sub_keywords) || PyTuple_CheckExact(sub_keywords))) {
+ PyErr_SetString(PyExc_ValueError,
+ "parse_tuple_and_keywords: sub_keywords must be either list or tuple");
+ return NULL;
+ }
+
+ memset(buffers, 0, sizeof(buffers));
+ memset(keywords, 0, sizeof(keywords));
+
+ size = PySequence_Fast_GET_SIZE(sub_keywords);
+ if (size > 8) {
+ PyErr_SetString(PyExc_ValueError,
+ "parse_tuple_and_keywords: too many keywords in sub_keywords");
+ goto exit;
+ }
+
+ for (i = 0; i < size; i++) {
+ o = PySequence_Fast_GET_ITEM(sub_keywords, i);
+ keywords[i] = PyString_AsString(o);
+ if (keywords[i] == NULL) {
+ goto exit;
+ }
+ }
+
+ result = PyArg_ParseTupleAndKeywords(sub_args, sub_kwargs,
+ sub_format, keywords,
+ buffers + 0, buffers + 1, buffers + 2, buffers + 3,
+ buffers + 4, buffers + 5, buffers + 6, buffers + 7);
+
+ if (result) {
+ return_value = Py_None;
+ Py_INCREF(Py_None);
+ }
+
+exit:
+ return return_value;
+}
+
#ifdef Py_USING_UNICODE
static volatile int x;
@@ -1592,14 +1702,16 @@ test_u_code(PyObject *self)
PyTuple_SET_ITEM(tuple, 0, obj);
value = 0;
- if (PyArg_ParseTuple(tuple, "u:test_u_code", &value) < 0)
+ if (!PyArg_ParseTuple(tuple, "u:test_u_code", &value)) {
return NULL;
+ }
if (value != PyUnicode_AS_UNICODE(obj))
return raiseTestError("test_u_code",
"u code returned wrong value for u'test'");
value = 0;
- if (PyArg_ParseTuple(tuple, "u#:test_u_code", &value, &len) < 0)
+ if (!PyArg_ParseTuple(tuple, "u#:test_u_code", &value, &len)) {
return NULL;
+ }
if (value != PyUnicode_AS_UNICODE(obj) ||
len != PyUnicode_GET_SIZE(obj))
return raiseTestError("test_u_code",
@@ -1697,8 +1809,9 @@ test_empty_argparse(PyObject *self)
tuple = PyTuple_New(0);
if (!tuple)
return NULL;
- if ((result = PyArg_ParseTuple(tuple, "|:test_empty_argparse")) < 0)
+ if (!(result = PyArg_ParseTuple(tuple, "|:test_empty_argparse"))) {
goto done;
+ }
dict = PyDict_New();
if (!dict)
goto done;
@@ -1706,8 +1819,9 @@ test_empty_argparse(PyObject *self)
done:
Py_DECREF(tuple);
Py_XDECREF(dict);
- if (result < 0)
+ if (!result) {
return NULL;
+ }
else {
Py_RETURN_NONE;
}
@@ -1837,7 +1951,7 @@ set_errno(PyObject *self, PyObject *args)
Py_RETURN_NONE;
}
-#ifdef Py_USING_UNICODE
+#if defined(Py_USING_UNICODE) && !defined(Py_BUILD_CORE)
static int test_run_counter = 0;
static PyObject *
@@ -1962,7 +2076,8 @@ static int _pending_callback(void *arg)
/* The following requests n callbacks to _pending_callback. It can be
* run from any python thread.
*/
-PyObject *pending_threadfunc(PyObject *self, PyObject *arg)
+static PyObject *
+pending_threadfunc(PyObject *self, PyObject *arg)
{
PyObject *callable;
int r;
@@ -2498,22 +2613,106 @@ pymarshal_read_object_from_file(PyObject* self, PyObject *args)
}
#endif /* ifndef PYPY_NOT_SUPPORTED */
+static PyObject*
+test_raise_signal(PyObject* self, PyObject *args)
+{
+ int signum, err;
+
+ if (!PyArg_ParseTuple(args, "i:raise_signal", &signum)) {
+ return NULL;
+ }
+
+ err = raise(signum);
+ if (err)
+ return PyErr_SetFromErrno(PyExc_OSError);
+
+ if (PyErr_CheckSignals() < 0)
+ return NULL;
+
+ Py_RETURN_NONE;
+}
+
+
+#ifdef MS_WINDOWS
+static PyObject*
+msvcrt_CrtSetReportMode(PyObject* self, PyObject *args)
+{
+ int type, mode;
+ int res;
+
+ if (!PyArg_ParseTuple(args, "ii:CrtSetReportMode", &type, &mode)) {
+ return NULL;
+ }
+
+ res = _CrtSetReportMode(type, mode);
+ if (res == -1) {
+ PyErr_SetFromErrno(PyExc_OSError);
+ return NULL;
+ }
+ return PyInt_FromLong(res);
+}
+
+
+static PyObject*
+msvcrt_CrtSetReportFile(PyObject* self, PyObject *args)
+{
+ int type, file;
+ long res;
+
+ if (!PyArg_ParseTuple(args, "ii:CrtSetReportFile", &type, &file)) {
+ return NULL;
+ }
+
+ res = (long)_CrtSetReportFile(type, (_HFILE)file);
+
+ return PyInt_FromLong(res);
+}
+#endif
+
+
+#ifdef W_STOPCODE
+static PyObject*
+py_w_stopcode(PyObject *self, PyObject *args)
+{
+ int sig, status;
+ if (!PyArg_ParseTuple(args, "i", &sig)) {
+ return NULL;
+ }
+ status = W_STOPCODE(sig);
+ return PyLong_FromLong(status);
+}
+#endif
+
+
+/* Read memory from NULL (address 0) to raise a SIGSEGV or SIGBUS signal
+ depending on the platform. This function is used by
+ test.support._crash_python() to "crash" Python. */
+static PyObject *
+read_null(PyObject *self, PyObject *args)
+{
+ volatile int *x;
+ volatile int y;
+
+ x = NULL;
+ y = *x;
+ return PyLong_FromLong(y);
+
+}
+
static PyMethodDef TestMethods[] = {
{"raise_exception", raise_exception, METH_VARARGS},
{"set_errno", set_errno, METH_VARARGS},
{"test_config", (PyCFunction)test_config, METH_NOARGS},
-#ifdef Py_USING_UNICODE
+#if defined(Py_USING_UNICODE) && !defined(Py_BUILD_CORE)
{"test_datetime_capi", test_datetime_capi, METH_NOARGS},
#endif
{"test_list_api", (PyCFunction)test_list_api, METH_NOARGS},
{"test_dict_iteration", (PyCFunction)test_dict_iteration,METH_NOARGS},
{"test_lazy_hash_inheritance", (PyCFunction)test_lazy_hash_inheritance,METH_NOARGS},
{"test_broken_memoryview", (PyCFunction)test_broken_memoryview,METH_NOARGS},
-#ifndef PYPY_NOT_SUPPORTED
{"test_to_contiguous", (PyCFunction)test_to_contiguous, METH_NOARGS},
{"test_from_contiguous", (PyCFunction)test_from_contiguous, METH_NOARGS},
-#endif /* ifndef PYPY_NOT_SUPPORTED */
{"test_long_api", (PyCFunction)test_long_api, METH_NOARGS},
{"test_long_and_overflow", (PyCFunction)test_long_and_overflow,
METH_NOARGS},
@@ -2522,6 +2721,8 @@ static PyMethodDef TestMethods[] = {
#ifdef Py_USING_UNICODE
{"test_empty_argparse", (PyCFunction)test_empty_argparse,METH_NOARGS},
#endif
+ {"get_indices", get_indices, METH_VARARGS},
+ {"parse_tuple_and_keywords", parse_tuple_and_keywords, METH_VARARGS},
{"test_null_strings", (PyCFunction)test_null_strings, METH_NOARGS},
{"test_string_from_format", (PyCFunction)test_string_from_format, METH_NOARGS},
{"test_with_docstring", (PyCFunction)test_with_docstring, METH_NOARGS,
@@ -2548,6 +2749,8 @@ static PyMethodDef TestMethods[] = {
{"test_longlong_api", test_longlong_api, METH_NOARGS},
{"test_long_long_and_overflow",
(PyCFunction)test_long_long_and_overflow, METH_NOARGS},
+ {"test_long_as_unsigned_long_long_mask",
+ (PyCFunction)test_long_as_unsigned_long_long_mask, METH_NOARGS},
{"test_L_code", (PyCFunction)test_L_code, METH_NOARGS},
#endif
{"getargs_f", getargs_f, METH_VARARGS},
@@ -2616,6 +2819,15 @@ static PyMethodDef TestMethods[] = {
{"pymarshal_read_object_from_file",
pymarshal_read_object_from_file, METH_VARARGS},
#endif /* ifndef PYPY_NOT_SUPPORTED */
+ {"raise_signal", (PyCFunction)test_raise_signal, METH_VARARGS},
+#ifdef MS_WINDOWS
+ {"CrtSetReportMode", (PyCFunction)msvcrt_CrtSetReportMode, METH_VARARGS},
+ {"CrtSetReportFile", (PyCFunction)msvcrt_CrtSetReportFile, METH_VARARGS},
+#endif
+#ifdef W_STOPCODE
+ {"W_STOPCODE", py_w_stopcode, METH_VARARGS},
+#endif
+ {"_read_null", (PyCFunction)read_null, METH_NOARGS},
{NULL, NULL} /* sentinel */
};
@@ -2780,7 +2992,7 @@ init_testcapi(void)
m = Py_InitModule("_testcapi", TestMethods);
if (m == NULL)
return;
-
+
if (PyType_Ready(&_MemoryViewTester_Type) < 0)
return;
@@ -2815,6 +3027,14 @@ init_testcapi(void)
PyModule_AddObject(m, "PY_SSIZE_T_MIN", PyInt_FromSsize_t(PY_SSIZE_T_MIN));
PyModule_AddObject(m, "SIZEOF_PYGC_HEAD", PyInt_FromSsize_t(sizeof(PyGC_Head)));
+#ifdef MS_WINDOWS
+ PyModule_AddIntConstant(m, "CRT_WARN", _CRT_WARN);
+ PyModule_AddIntConstant(m, "CRT_ERROR", _CRT_ERROR);
+ PyModule_AddIntConstant(m, "CRT_ASSERT", _CRT_ASSERT);
+ PyModule_AddIntConstant(m, "CRTDBG_MODE_FILE", _CRTDBG_MODE_FILE);
+ PyModule_AddIntConstant(m, "CRTDBG_FILE_STDERR", (int)_CRTDBG_FILE_STDERR);
+#endif
+
TestError = PyErr_NewException("_testcapi.error", NULL, NULL);
Py_INCREF(TestError);
PyModule_AddObject(m, "error", TestError);
diff --git a/lib_pypy/_tkinter/tclobj.py b/lib_pypy/_tkinter/tclobj.py
index d5a7b2b301..483d1a1023 100644
--- a/lib_pypy/_tkinter/tclobj.py
+++ b/lib_pypy/_tkinter/tclobj.py
@@ -51,7 +51,7 @@ def FromWideIntObj(app, value):
wide = tkffi.new("Tcl_WideInt*")
if tklib.Tcl_GetWideIntFromObj(app.interp, value, wide) != tklib.TCL_OK:
app.raiseTclError()
- return wide[0]
+ return int(wide[0])
# Only when tklib.HAVE_LIBTOMMATH!
def FromBignumObj(app, value):
diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py
index 9ca2c4e526..cd2be238f2 100644
--- a/lib_pypy/datetime.py
+++ b/lib_pypy/datetime.py
@@ -479,8 +479,11 @@ class timedelta(deltainterop):
@classmethod
def _from_microseconds(cls, us):
- s, us = divmod(us, _US_PER_SECOND)
- d, s = divmod(s, _SECONDS_PER_DAY)
+ try:
+ s, us = divmod(us, _US_PER_SECOND)
+ d, s = divmod(s, _SECONDS_PER_DAY)
+ except ValueError:
+ raise TypeError('__divmod__ must return a tuple of 2 numbers')
return cls._create(d, s, us, False)
@classmethod
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
index 242950d072..bbf2648f71 100644
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -16,4 +16,8 @@ Add posix.sendfile to darwin for python3.6+
.. branch: app_main
-avoid using ``import os`` until after ``import site`` in ``app_main``
+Avoid using ``import os`` until after ``import site`` in ``app_main``
+
+.. branch: stdlib-2.7.18-3
+
+Update lib-python/2.7 to stdlib-2.7.18 and fix many tests
diff --git a/pypy/interpreter/astcompiler/astbuilder.py b/pypy/interpreter/astcompiler/astbuilder.py
index 0e2501a3d3..baa2623b24 100644
--- a/pypy/interpreter/astcompiler/astbuilder.py
+++ b/pypy/interpreter/astcompiler/astbuilder.py
@@ -516,7 +516,7 @@ class ASTBuilder(object):
dec = dec_name
elif decorator_node.num_children() == 5:
dec = ast.Call(dec_name, None, None, None, None,
- decorator_node.get_lineno(), decorator_node.get_column())
+ dec_name.lineno, dec_name.col_offset)
else:
dec = self.handle_call(decorator_node.get_child(3), dec_name)
return dec
diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py
index dcfdb90a17..91d4ec2b8e 100644
--- a/pypy/interpreter/astcompiler/test/test_astbuilder.py
+++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py
@@ -596,6 +596,8 @@ class TestAstBuilder:
assert isinstance(dec, ast.Name)
assert dec.id == "dec"
assert dec.ctx == ast.Load
+ assert dec.lineno == 1
+ assert dec.col_offset == 1
definition = self.get_first_stmt("@mod.hi.dec\n%s" % (stmt,))
assert len(definition.decorator_list) == 1
dec = definition.decorator_list[0]
@@ -623,6 +625,8 @@ class TestAstBuilder:
assert dec.keywords is None
assert dec.starargs is None
assert dec.kwargs is None
+ assert dec.lineno == 1
+ assert dec.col_offset == 1
definition = self.get_first_stmt("@dec(a, b)\n%s" % (stmt,))
assert len(definition.decorator_list) == 1
dec = definition.decorator_list[0]
@@ -632,6 +636,8 @@ class TestAstBuilder:
assert dec.keywords is None
assert dec.starargs is None
assert dec.kwargs is None
+ assert dec.lineno == 1
+ assert dec.col_offset == 1
def test_augassign(self):
aug_assigns = (
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
index c1c9e8aab8..6f7d10b2a9 100644
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -337,8 +337,11 @@ class ExecutionContext(object):
# if it does not exist yet and the tracer accesses it via
# frame.f_locals, it is filled by PyFrame.getdictscope
frame.fast2locals()
+ prev_line_tracing = d.is_in_line_tracing
self.is_tracing += 1
try:
+ if event == 'line':
+ d.is_in_line_tracing = True
try:
w_result = space.call_function(w_callback, frame, space.newtext(event), w_arg)
if space.is_w(w_result, space.w_None):
@@ -353,6 +356,7 @@ class ExecutionContext(object):
raise
finally:
self.is_tracing -= 1
+ d.is_in_line_tracing = prev_line_tracing
if d.w_locals is not None:
frame.locals2fast()
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
index 374d0abfad..9cb01e5c76 100644
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -23,7 +23,7 @@ from pypy.tool import stdlib_opcode
# Define some opcodes used
for op in '''DUP_TOP POP_TOP SETUP_LOOP SETUP_EXCEPT SETUP_FINALLY SETUP_WITH
-POP_BLOCK END_FINALLY'''.split():
+POP_BLOCK END_FINALLY YIELD_VALUE WITH_CLEANUP'''.split():
globals()[op] = stdlib_opcode.opmap[op]
HAVE_ARGUMENT = stdlib_opcode.HAVE_ARGUMENT
@@ -36,6 +36,7 @@ class FrameDebugData(object):
instr_prev_plus_one = 0
f_lineno = 0 # current lineno for tracing
is_being_profiled = False
+ is_in_line_tracing = False
w_locals = None
def __init__(self, pycode):
@@ -707,10 +708,26 @@ class PyFrame(W_Root):
except OperationError:
raise oefmt(space.w_ValueError, "lineno must be an integer")
+ # You can only do this from within a trace function, not via
+ # _getframe or similar hackery.
+ if space.int_w(self.fget_f_lasti(space)) == -1:
+ raise oefmt(space.w_ValueError,
+ "can't jump from the 'call' trace event of a new frame")
if self.get_w_f_trace() is None:
raise oefmt(space.w_ValueError,
"f_lineno can only be set by a trace function.")
+ code = self.pycode.co_code
+ if ord(code[self.last_instr]) == YIELD_VALUE:
+ raise oefmt(space.w_ValueError,
+ "can't jump from a yield statement")
+
+ # Only allow jumps when we're tracing a line event.
+ d = self.getorcreatedebug()
+ if not d.is_in_line_tracing:
+ raise oefmt(space.w_ValueError,
+ "can only jump from a 'line' trace event")
+
line = self.pycode.co_firstlineno
if new_lineno < line:
raise oefmt(space.w_ValueError,
@@ -734,7 +751,6 @@ class PyFrame(W_Root):
"line %d comes after the current code.", new_lineno)
# Don't jump to a line with an except in it.
- code = self.pycode.co_code
if ord(code[new_lasti]) in (DUP_TOP, POP_TOP):
raise oefmt(space.w_ValueError,
"can't jump to 'except' line as there's no exception")
@@ -821,12 +837,17 @@ class PyFrame(W_Root):
raise oefmt(space.w_ValueError,
"can't jump into the middle of a block")
+ # Pop any blocks that we're jumping out of.
+ from pypy.interpreter.pyopcode import FinallyBlock
while f_iblock > new_iblock:
block = self.pop_block()
block.cleanup(self)
f_iblock -= 1
+ if (isinstance(block, FinallyBlock)
+ and ord(code[block.handlerposition]) == WITH_CLEANUP):
+ self.popvalue() # Pop the exit function.
- self.getorcreatedebug().f_lineno = new_lineno
+ d.f_lineno = new_lineno
self.last_instr = new_lasti
def get_last_lineno(self):
diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py
index be96e3da66..80de2b0c31 100644
--- a/pypy/interpreter/test/test_typedef.py
+++ b/pypy/interpreter/test/test_typedef.py
@@ -179,7 +179,7 @@ class TestTypeDef:
self.space.appexec([w_obj], """(obj):
assert type(obj).__hash__ is None
err = raises(TypeError, hash, obj)
- assert err.value.message == "'some_type' objects are unhashable"
+ assert str(err.value) == "unhashable type: 'some_type'"
""")
def test_destructor(self):
diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py
index c45b6dae97..8082cae06f 100644
--- a/pypy/module/_io/interp_iobase.py
+++ b/pypy/module/_io/interp_iobase.py
@@ -233,24 +233,23 @@ class W_IOBase(W_Root):
def readlines_w(self, space, w_hint=None):
hint = convert_size(space, w_hint)
-
if hint <= 0:
return space.newlist(space.unpackiterable(self))
- lines_w = []
length = 0
- while True:
- w_line = space.call_method(self, "readline")
- line_length = space.len_w(w_line)
- if line_length == 0: # done
+ lines_w = []
+ w_iterator = space.iter(self)
+ while 1:
+ try:
+ w_line = space.next(w_iterator)
+ except OperationError as e:
+ if not e.match(space, space.w_StopIteration):
+ raise
break
-
lines_w.append(w_line)
-
- length += line_length
+ length += space.len_w(w_line)
if length > hint:
break
-
return space.newlist(lines_w)
def writelines_w(self, space, w_lines):
diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py
index 3ccb9aa4b9..32a29a23b1 100644
--- a/pypy/module/_io/interp_textio.py
+++ b/pypy/module/_io/interp_textio.py
@@ -662,6 +662,9 @@ class W_TextIOWrapper(W_TextIOBase):
# To prepare for tell(), we need to snapshot a point in the file
# where the decoder's input buffer is empty.
w_state = space.call_method(self.w_decoder, "getstate")
+ if (not space.isinstance_w(w_state, space.w_tuple)
+ or space.len_w(w_state) != 2):
+ raise oefmt(space.w_TypeError, "illegal decoder state")
# Given this, we know there was a valid snapshot point
# len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
w_dec_buffer, w_dec_flags = space.unpackiterable(w_state, 2)
@@ -735,6 +738,7 @@ class W_TextIOWrapper(W_TextIOBase):
chars, lgt = self.decoded.get_chars(-1)
w_result = space.newutf8(chars, lgt)
w_final = space.add(w_result, w_decoded)
+ self.decoded.reset()
self.snapshot = None
return w_final
@@ -869,6 +873,10 @@ class W_TextIOWrapper(W_TextIOBase):
else:
w_bytes = space.call_method(self.w_encoder, "encode", w_text)
+ if not space.isinstance_w(w_bytes, space.w_bytes):
+ raise oefmt(space.w_TypeError,
+ "encoder should return a bytes object, not '%T'", w_bytes)
+
b = space.bytes_w(w_bytes)
if not self.pending_bytes:
self.pending_bytes = []
@@ -882,6 +890,7 @@ class W_TextIOWrapper(W_TextIOBase):
if needflush:
space.call_method(self.w_buffer, "flush")
+ self.decoded.reset()
self.snapshot = None
if self.w_decoder:
diff --git a/pypy/module/_io/test/apptest_io.py b/pypy/module/_io/test/apptest_io.py
index 5b4406359f..960fc58569 100644
--- a/pypy/module/_io/test/apptest_io.py
+++ b/pypy/module/_io/test/apptest_io.py
@@ -18,6 +18,20 @@ def test_iobase():
pass
MyFile("file")
+def test_iobase_overriding():
+ import io
+ class WithIter(io.IOBase):
+ def __iter__(self):
+ yield 'foo'
+ assert WithIter().readlines() == ['foo']
+ assert WithIter().readlines(1) == ['foo']
+
+ class WithNext(io.IOBase):
+ def next(self):
+ raise StopIteration
+ assert WithNext().readlines() == []
+ assert WithNext().readlines(1) == []
+
def test_openclose():
import io
with io.BufferedIOBase() as f:
diff --git a/pypy/module/_io/test/apptest_textio.py b/pypy/module/_io/test/apptest_textio.py
index a40190bc78..a63c797639 100644
--- a/pypy/module/_io/test/apptest_textio.py
+++ b/pypy/module/_io/test/apptest_textio.py
@@ -225,6 +225,29 @@ def test_flush_error_on_close():
raises(IOError, txt.close) # exception not swallowed
assert txt.closed
+def test_illegal_encoder():
+ # bpo-31271: A TypeError should be raised in case the return value of
+ # encoder's encode() is invalid.
+ class BadEncoder:
+ def encode(self, dummy):
+ return u'spam'
+ def get_bad_encoder(dummy):
+ return BadEncoder()
+ import codecs
+ rot13 = codecs.lookup("rot13")
+ text_encoding = rot13._is_text_encoding
+ incrementalencoder = rot13.incrementalencoder
+ rot13._is_text_encoding = True
+ rot13.incrementalencoder = get_bad_encoder
+ try:
+ t = _io.TextIOWrapper(_io.BytesIO(b'foo'), encoding="rot13")
+ finally:
+ rot13._is_text_encoding = text_encoding
+ rot13.incrementalencoder = incrementalencoder
+ with raises(TypeError):
+ t.write(u'bar')
+ t.flush()
+
def test_illegal_decoder():
t = _io.TextIOWrapper(_io.BytesIO(b'aaaaaa'), newline='\n',
encoding='quopri_codec')
@@ -255,6 +278,19 @@ def test_uninitialized():
t.__init__(_io.BytesIO())
assert t.read(0) == u''
+def test_issue25862():
+ # CPython issue #25862
+ # Assertion failures occurred in tell() after read() and write().
+ from _io import TextIOWrapper, BytesIO
+ t = TextIOWrapper(BytesIO(b'test'), encoding='ascii')
+ t.read(1)
+ t.read()
+ t.tell()
+ t = TextIOWrapper(BytesIO(b'test'), encoding='ascii')
+ t.read(1)
+ t.write(u'x')
+ t.tell()
+
def test_newline_decoder():
def check_newline_decoding_utf8(decoder):
# UTF-8 specific tests for a newline decoder
diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c
index 52c2a4a855..b7dec837c1 100644
--- a/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c
+++ b/pypy/module/_multibytecodec/src/cjkcodecs/_codecs_cn.c
@@ -266,7 +266,9 @@ DECODER(gb18030)
REQUIRE_INBUF(4)
c3 = IN3;
c4 = IN4;
- if (c < 0x81 || c3 < 0x81 || c4 < 0x30 || c4 > 0x39)
+ if (c < 0x81 || c > 0xFE ||
+ c3 < 0x81 || c3 > 0xFE ||
+ c4 < 0x30 || c4 > 0x39)
return 4;
c -= 0x81; c2 -= 0x30;
c3 -= 0x81; c4 -= 0x30;
@@ -333,15 +335,17 @@ ENCODER(hz)
DBCHAR code;
if (c < 0x80) {
- if (state->i == 0) {
- WRITE1((unsigned char)c)
- NEXT(1, 1)
- }
- else {
- WRITE3('~', '}', (unsigned char)c)
- NEXT(1, 3)
+ if (state->i) {
+ WRITE2('~', '}');
+ NEXT_OUT(2);
state->i = 0;
}
+ WRITE1((unsigned char)c);
+ NEXT(1, 1);
+ if (c == '~') {
+ WRITE1('~');
+ NEXT_OUT(1);
+ }
continue;
}
@@ -388,17 +392,15 @@ DECODER(hz)
unsigned char c2 = IN2;
REQUIRE_INBUF(2)
- if (c2 == '~') {
+ if (c2 == '~' && state->i == 0) {
WRITE1('~')
- NEXT(2, 1)
- continue;
}
else if (c2 == '{' && state->i == 0)
state->i = 1; /* set GB */
+ else if (c2 == '\n' && state->i == 0)
+ ; /* line-continuation */
else if (c2 == '}' && state->i == 1)
state->i = 0; /* set ASCII */
- else if (c2 == '\n')
- ; /* line-continuation */
else
return 2;
NEXT(2, 0);
diff --git a/pypy/module/_multibytecodec/test/test_c_codecs.py b/pypy/module/_multibytecodec/test/test_c_codecs.py
index 7c9178d7cb..9b58c97ef1 100644
--- a/pypy/module/_multibytecodec/test/test_c_codecs.py
+++ b/pypy/module/_multibytecodec/test/test_c_codecs.py
@@ -1,4 +1,5 @@
import py
+import pytest
from pypy.module._multibytecodec.c_codecs import getcodec, codecs
from pypy.module._multibytecodec.c_codecs import decode, encode
from pypy.module._multibytecodec.c_codecs import EncodeDecodeError
@@ -18,6 +19,15 @@ def test_decode_gbk():
u = decode(c, "foobar")
assert u == "foobar"
+@pytest.mark.parametrize('undecodable', [
+ b"abc\x80\x80\xc1\xc4",
+ b"\xff\x30\x81\x30", b"\x81\x30\xff\x30", # bpo-29990
+])
+def test_decode_gb18030_error(undecodable):
+ c = getcodec("gb18030")
+ with pytest.raises(EncodeDecodeError):
+ decode(c, undecodable)
+
def test_decode_hz():
# stateful
c = getcodec("hz")
@@ -99,6 +109,9 @@ def test_encode_hz():
assert s == 'foobar' and type(s) is str
s = encode(c, u'\u5f95\u6cef'.encode('utf8'), 2)
assert s == '~{abc}~}'
+ # bpo-30003
+ s = encode(c, 'ab~cd', 5)
+ assert s == 'ab~~cd'
def test_encode_hz_error():
# error
diff --git a/pypy/module/_random/interp_random.py b/pypy/module/_random/interp_random.py
index dcd2510229..5161dc1e34 100644
--- a/pypy/module/_random/interp_random.py
+++ b/pypy/module/_random/interp_random.py
@@ -67,15 +67,17 @@ class W_Random(W_Root):
# independent of platfrom, since the below condition is only
# true on 32 bit platforms anyway
w_add = space.pow(space.newint(2), space.newint(32), space.w_None)
+ _state = [r_uint(0)] * rrandom.N
for i in range(rrandom.N):
w_item = space.getitem(w_state, space.newint(i))
if space.is_true(space.lt(w_item, w_zero)):
w_item = space.add(w_item, w_add)
- self._rnd.state[i] = space.uint_w(w_item)
+ _state[i] = space.uint_w(w_item)
w_item = space.getitem(w_state, space.newint(rrandom.N))
index = space.int_w(w_item)
if index < 0 or index > rrandom.N:
raise oefmt(space.w_ValueError, "invalid state")
+ self._rnd.state = _state
self._rnd.index = index
def jumpahead(self, space, w_n):
diff --git a/pypy/module/_random/test/test_random.py b/pypy/module/_random/test/test_random.py
index e84207aef2..359cb2fe3b 100644
--- a/pypy/module/_random/test/test_random.py
+++ b/pypy/module/_random/test/test_random.py
@@ -41,6 +41,18 @@ class AppTestRandom:
# does not crash
rnd1.setstate((-1, ) * 624 + (0, ))
+ def test_failed_setstate(self):
+ import _random
+ rnd = _random.Random()
+ rnd.seed()
+ start_state = rnd.getstate()
+ raises(TypeError, rnd.setstate, None)
+ raises(ValueError, rnd.setstate, (1, 2, 3))
+ raises(TypeError, rnd.setstate, ('a',)*624 + (1,))
+ raises(ValueError, rnd.setstate, (1,)*624 + (625,))
+ # None of these failed calls should have changed the state
+ assert rnd.getstate() == start_state
+
def test_state_repr(self):
# since app-level jumpahead salts with repr(state),
# it is important the repr is consistent with cpython
diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py
index b0cb6bfb9f..57dac53b4a 100644
--- a/pypy/module/_socket/test/test_sock_app.py
+++ b/pypy/module/_socket/test/test_sock_app.py
@@ -590,9 +590,9 @@ class AppTestSocket:
else:
assert ret == b'\x00\x00'
s.setsockopt(_socket.IPPROTO_TCP, _socket.TCP_NODELAY, True)
- assert s.getsockopt(_socket.IPPROTO_TCP, _socket.TCP_NODELAY, 0) == 1
+ assert s.getsockopt(_socket.IPPROTO_TCP, _socket.TCP_NODELAY, 0) != 0
s.setsockopt(_socket.IPPROTO_TCP, _socket.TCP_NODELAY, 1)
- assert s.getsockopt(_socket.IPPROTO_TCP, _socket.TCP_NODELAY, 0) == 1
+ assert s.getsockopt(_socket.IPPROTO_TCP, _socket.TCP_NODELAY, 0) != 0
def test_getsockopt_bad_length(self):
import _socket
@@ -766,6 +766,7 @@ class AppTestSocketTCP:
def test_recv_send_timeout(self):
from _socket import socket, timeout, SOL_SOCKET, SO_RCVBUF, SO_SNDBUF
+ import sys
cli = socket()
cli.connect(self.serv.getsockname())
t, addr = self.serv.accept()
@@ -793,7 +794,12 @@ class AppTestSocketTCP:
try:
while 1:
count += cli.send(b'foobar' * 70)
- assert count < 100000
+ if sys.platform == 'darwin':
+ # MacOS will auto-tune up to 512k
+ # (net.inet.tcp.doauto{rcv,snd}buf sysctls)
+ assert count < 1000000
+ else:
+ assert count < 100000
except timeout:
pass
t.recv(count)
diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py
index 0ef1a294c4..6605e0d68c 100644
--- a/pypy/module/cpyext/longobject.py
+++ b/pypy/module/cpyext/longobject.py
@@ -110,6 +110,8 @@ def PyLong_AsUnsignedLongLongMask(space, w_long):
PyLongObject, if it is not already one, and then return its value as
unsigned long long, without checking for overflow.
"""
+ if w_long is None:
+ raise oefmt(space.w_SystemError, "value is NULL")
num = space.bigint_w(w_long)
return num.ulonglongmask()
diff --git a/pypy/module/cpyext/sliceobject.py b/pypy/module/cpyext/sliceobject.py
index 843a825308..d8146e37b8 100644
--- a/pypy/module/cpyext/sliceobject.py
+++ b/pypy/module/cpyext/sliceobject.py
@@ -97,6 +97,9 @@ def PySlice_GetIndices(space, w_slice, length, start_p, stop_p, step_p):
in the source of your extension."""
if not isinstance(w_slice, W_SliceObject):
raise PyErr_BadInternalCall(space)
- start_p[0], stop_p[0], step_p[0] = \
- w_slice.indices3(space, length)
+ try:
+ start_p[0], stop_p[0], step_p[0] = \
+ w_slice.indices3(space, length)
+ except:
+ return -1
return 0
diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py
index c909bfd91a..03d861fee9 100644
--- a/pypy/module/cpyext/test/test_longobject.py
+++ b/pypy/module/cpyext/test/test_longobject.py
@@ -80,16 +80,26 @@ class TestLongObject(BaseApiTest):
assert excinfo.value.w_type is space.w_OverflowError
assert PyLong_AsUnsignedLongLong(space, space.wrap(1 << 63)) == 1 << 63
+
with pytest.raises(OperationError) as excinfo:
PyLong_AsUnsignedLongLong(space, space.wrap(1 << 64))
assert excinfo.value.w_type is space.w_OverflowError
+ with pytest.raises(OperationError) as excinfo:
+ PyLong_AsUnsignedLongLong(space, None)
+ assert excinfo.value.w_type is space.w_SystemError
+
assert PyLong_AsUnsignedLongLongMask(space, space.wrap(1 << 64)) == 0
with pytest.raises(OperationError) as excinfo:
PyLong_AsUnsignedLongLong(space, space.newint(-1))
assert excinfo.value.w_type is space.w_OverflowError
+ with pytest.raises(OperationError) as excinfo:
+ PyLong_AsUnsignedLongLongMask(space, None)
+ assert excinfo.value.w_type is space.w_SystemError
+
+
def test_as_long_and_overflow(self, space, api):
overflow = lltype.malloc(rffi.CArrayPtr(rffi.INT_real).TO, 1, flavor='raw')
assert api.PyLong_AsLongAndOverflow(
diff --git a/pypy/module/cpyext/test/test_sliceobject.py b/pypy/module/cpyext/test/test_sliceobject.py
index 7f368e0335..3f0f18c742 100644
--- a/pypy/module/cpyext/test/test_sliceobject.py
+++ b/pypy/module/cpyext/test/test_sliceobject.py
@@ -30,11 +30,13 @@ class TestSliceObject(BaseApiTest):
res = api.PySlice_GetIndices(w_slice, 100, values,
rffi.ptradd(values, 1),
rffi.ptradd(values, 2))
- assert res == 0
+ if res != 0:
+ return None
rv = values[0], values[1], values[2]
lltype.free(values, flavor='raw')
return rv
assert get_indices(w(10), w(20), w(1), 200) == (10, 20, 1)
+ assert get_indices(w(10.1), w(20), w(1), 200) is None
class AppTestSliceMembers(AppTestCpythonExtensionBase):
def test_members(self):
diff --git a/pypy/module/itertools/interp_itertools.py b/pypy/module/itertools/interp_itertools.py
index 116fa5371c..5cdb94a3fe 100644
--- a/pypy/module/itertools/interp_itertools.py
+++ b/pypy/module/itertools/interp_itertools.py
@@ -869,6 +869,7 @@ def tee(space, w_iterable, n=2):
class TeeChainedListNode(object):
w_obj = None
+ running = False
class W_TeeIterable(W_Root):
@@ -883,9 +884,16 @@ class W_TeeIterable(W_Root):
def next_w(self):
chained_list = self.chained_list
+ if chained_list.running:
+ raise oefmt(self.space.w_RuntimeError,
+ "cannot re-enter the tee iterator")
w_obj = chained_list.w_obj
if w_obj is None:
- w_obj = self.space.next(self.w_iterator)
+ chained_list.running = True
+ try:
+ w_obj = self.space.next(self.w_iterator)
+ finally:
+ chained_list.running = False
chained_list.next = TeeChainedListNode()
chained_list.w_obj = w_obj
self.chained_list = chained_list.next
diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py
index cf6abfebd9..4cd966988d 100644
--- a/pypy/module/itertools/test/test_itertools.py
+++ b/pypy/module/itertools/test/test_itertools.py
@@ -1155,3 +1155,31 @@ class AppTestItertools27(object):
myiter = Iterator()
islice = itertools.islice(myiter, 5, 8)
raises(StopIteration, islice.next)
+
+
+class AppTestItertools28(object):
+ spaceconfig = {"usemodules": ['itertools', 'thread']}
+
+ def test_tee_concurrent(self):
+ from itertools import tee
+ import threading
+ start = threading.Event()
+ finish = threading.Event()
+ class I:
+ def __iter__(self):
+ return self
+ def next(self):
+ start.set()
+ assert finish.wait(5), 'timed out'
+
+ a, b = tee(I())
+ thread = threading.Thread(target=next, args=[a])
+ thread.start()
+ try:
+ start.wait()
+ with raises(RuntimeError) as exc:
+ next(b)
+ assert 'tee' in str(exc)
+ finally:
+ finish.set()
+ thread.join()
diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py
index 449ec68560..3127ddebe7 100644
--- a/pypy/module/posix/interp_posix.py
+++ b/pypy/module/posix/interp_posix.py
@@ -534,6 +534,15 @@ def _convertenviron(space, w_env):
@unwrap_spec(name='text0', value='text0')
def putenv(space, name, value):
"""Change or add an environment variable."""
+ # Search from index 1 because on Windows starting '=' is allowed for
+ # defining hidden environment variables.
+ if _WIN32:
+ if len(name) == 0 or '=' in name[1:]:
+ raise oefmt(space.w_ValueError, "illegal environment variable name")
+ else:
+ if '=' in name:
+ raise oefmt(space.w_ValueError, "illegal environment variable name")
+
if _WIN32 and len(name) > _MAX_ENV:
raise oefmt(space.w_ValueError,
"the environment variable is longer than %d bytes",
diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py
index af5398652c..afa2c51a1c 100644
--- a/pypy/module/posix/test/test_posix2.py
+++ b/pypy/module/posix/test/test_posix2.py
@@ -391,7 +391,7 @@ class AppTestPosix:
# the file properly.
# This test should be run in multiple macOS platforms to
# be sure that is working as expected.
- if file_system_encoding == 'UTF-8':
+ if file_system_encoding.lower() == 'utf-8':
assert (unicode, 'cafxe9') in typed_result
else:
# darwin 'normalized' it
@@ -1215,6 +1215,14 @@ class AppTestEnvironment(object):
res = os.system(cmd)
assert res == 0
+ def test_putenv_invalid_name(self):
+ import os, sys
+ if sys.platform.startswith('win'):
+ os.putenv("=hidden", "foo")
+ raises(ValueError, os.putenv, "foo=bar", "xxx")
+ else:
+ raises(ValueError, os.putenv, "=foo", "xxx")
+
class AppTestPosixUnicode:
def setup_class(cls):
diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py
index a2d306fe42..5431a2f1bb 100644
--- a/pypy/module/select/interp_kqueue.py
+++ b/pypy/module/select/interp_kqueue.py
@@ -161,9 +161,10 @@ class W_Kqueue(W_Root):
max_events)
if space.is_w(w_changelist, space.w_None):
- changelist_len = 0
+ changelist_list = []
else:
- changelist_len = space.len_w(w_changelist)
+ changelist_list = space.listview(w_changelist)
+ changelist_len = len(changelist_list)
with lltype.scoped_alloc(rffi.CArray(kevent), changelist_len) as changelist:
with lltype.scoped_alloc(rffi.CArray(kevent), max_events) as eventlist:
@@ -184,8 +185,8 @@ class W_Kqueue(W_Root):
ptimeout = lltype.nullptr(timespec)
if not space.is_w(w_changelist, space.w_None):
- i = 0
- for w_ev in space.listview(w_changelist):
+ for i in range(changelist_len):
+ w_ev = changelist_list[i]
ev = space.interp_w(W_Kevent, w_ev)
changelist[i].c_ident = ev.ident
changelist[i].c_filter = ev.filter
@@ -193,7 +194,6 @@ class W_Kqueue(W_Root):
changelist[i].c_fflags = ev.fflags
changelist[i].c_data = ev.data
changelist[i].c_udata = ev.udata
- i += 1
pchangelist = changelist
else:
pchangelist = lltype.nullptr(rffi.CArray(kevent))
diff --git a/pypy/module/select/test/test_kqueue.py b/pypy/module/select/test/test_kqueue.py
index e296ef12fa..a8fb6358f5 100644
--- a/pypy/module/select/test/test_kqueue.py
+++ b/pypy/module/select/test/test_kqueue.py
@@ -189,3 +189,29 @@ class AppTestKqueue(object):
a.close()
b.close()
kq.close()
+
+ def test_issue30058(self):
+ import select
+ import socket
+ # changelist must be an iterable
+ kq = select.kqueue()
+ a, b = socket.socketpair()
+ ev = select.kevent(a, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE)
+
+ kq.control([ev], 0)
+ # not a list
+ kq.control((ev,), 0)
+ # __len__ is not consistent with __iter__
+ class BadList:
+ def __len__(self):
+ return 0
+ def __iter__(self):
+ for i in range(100):
+ yield ev
+ kq.control(BadList(), 0)
+ # doesn't have __len__
+ kq.control(iter([ev]), 0)
+
+ a.close()
+ b.close()
+ kq.close()
diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py
index d45ff5c7fd..37f7199c0a 100644
--- a/pypy/module/signal/interp_signal.py
+++ b/pypy/module/signal/interp_signal.py
@@ -282,8 +282,13 @@ def siginterrupt(space, signum, flag):
#__________________________________________________________
def timeval_from_double(d, timeval):
- rffi.setintfield(timeval, 'c_tv_sec', int(d))
- rffi.setintfield(timeval, 'c_tv_usec', int((d - int(d)) * 1000000))
+ c_tv_sec = int(d)
+ c_tv_usec = int((d - int(d)) * 1000000)
+ # Don't disable the timer if the computation above rounds down to zero.
+ if d > 0.0 and c_tv_sec == 0 and c_tv_usec == 0:
+ c_tv_usec = 1
+ rffi.setintfield(timeval, 'c_tv_sec', c_tv_sec)
+ rffi.setintfield(timeval, 'c_tv_usec', c_tv_usec)
def double_from_timeval(tv):
diff --git a/pypy/module/test_lib_pypy/test_os_wait.py b/pypy/module/test_lib_pypy/test_os_wait.py
index 934a3e63a4..5f3967151c 100644
--- a/pypy/module/test_lib_pypy/test_os_wait.py
+++ b/pypy/module/test_lib_pypy/test_os_wait.py
@@ -2,6 +2,7 @@
from __future__ import absolute_import
import os
import py
+import sys
from pypy.module.test_lib_pypy import test_resource # side-effect: skip()
@@ -36,5 +37,7 @@ def test_os_wait4():
assert isinstance(rusage.ru_maxrss, int)
def test_errors():
- py.test.raises(OSError, _pypy_wait.wait3, -999)
+ # MacOS ignores invalid options
+ if sys.platform != 'darwin':
+ py.test.raises(OSError, _pypy_wait.wait3, -999)
py.test.raises(OSError, _pypy_wait.wait4, -999, -999)
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
index e6be6125c5..c4eaec9ea4 100644
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -439,7 +439,7 @@ class DescrOperation(object):
return default_identity_hash(space, w_obj)
if space.is_w(w_hash, space.w_None):
raise oefmt(space.w_TypeError,
- "'%T' objects are unhashable", w_obj)
+ "unhashable type: '%T'", w_obj)
w_result = space.get_and_call_function(w_hash, w_obj)
# issue 2346 : returns now -2 for hashing -1 like cpython
diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py
index 9d78717f0d..d42e6405ab 100644
--- a/pypy/objspace/std/bytearrayobject.py
+++ b/pypy/objspace/std/bytearrayobject.py
@@ -224,9 +224,10 @@ class W_BytearrayObject(W_Root):
# unicode, got int object"
w_source = encode_object(space, w_source, encoding, errors)
- # Is it an int?
+ # Is it an integer?
+ # Note that we're calling space.getindex_w() instead of space.int_w().
try:
- count = space.int_w(w_source)
+ count = space.getindex_w(w_source, space.w_OverflowError)
except OperationError as e:
if not e.match(space, space.w_TypeError):
raise
diff --git a/pypy/objspace/std/test/test_bytearrayobject.py b/pypy/objspace/std/test/test_bytearrayobject.py
index 581e322049..5f689269e6 100644
--- a/pypy/objspace/std/test/test_bytearrayobject.py
+++ b/pypy/objspace/std/test/test_bytearrayobject.py
@@ -653,3 +653,15 @@ class AppTestBytesArray:
assert x.find(b'fe') == -1
assert x.index(b'f', 2, 11) == 5
assert x.__alloc__() == 14
+
+ def test_fromobject___index__(self):
+ class WithIndex:
+ def __index__(self):
+ return 3
+ assert bytearray(WithIndex()) == b'\x00\x00\x00'
+
+ def test_fromobject___int__(self):
+ class WithInt:
+ def __int__(self):
+ return 3
+ raises(TypeError, bytearray, WithInt())
diff --git a/pypy/objspace/std/test/test_newformat.py b/pypy/objspace/std/test/test_newformat.py
index 18e09e05ef..1f728ad96e 100644
--- a/pypy/objspace/std/test/test_newformat.py
+++ b/pypy/objspace/std/test/test_newformat.py
@@ -390,7 +390,7 @@ class AppTestFloatFormatting:
locale.setlocale(locale.LC_NUMERIC, 'C')
def test_locale_german(self):
- import locale
+ import locale, sys
for name in ['de_DE', 'de_DE.utf8']:
try:
locale.setlocale(locale.LC_NUMERIC, name)
@@ -401,9 +401,15 @@ class AppTestFloatFormatting:
skip("no german locale")
x = 1234.567890
try:
- assert locale.format('%g', x, grouping=True) == '1.234,57'
- assert format(x, 'n') == '1.234,57'
- assert format(12345678901234, 'n') == '12.345.678.901.234'
+ if sys.platform != "darwin":
+ assert locale.format('%g', x, grouping=True) == '1.234,57'
+ assert format(x, 'n') == '1.234,57'
+ assert format(12345678901234, 'n') == '12.345.678.901.234'
+ else:
+ # No thousands separator on German in MacOS since 10.4
+ assert locale.format('%g', x, grouping=True) == '1234,57'
+ assert format(x, 'n') == '1234,57'
+ assert format(12345678901234, 'n') == '12345678901234'
finally:
locale.setlocale(locale.LC_NUMERIC, 'C')
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
index d75e3af9a4..f0746843ba 100644
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -636,7 +636,7 @@ class W_TypeObject(W_Root):
w_newtype, w_newdescr = self.hack_which_new_to_call(
w_newtype, w_newdescr)
#
- w_newfunc = space.get(w_newdescr, self)
+ w_newfunc = space.get(w_newdescr, space.w_None, w_type=self)
if (space.config.objspace.std.newshortcut and
not we_are_jitted() and space._side_effects_ok() and
isinstance(w_newtype, W_TypeObject)):
diff --git a/pypy/objspace/test/apptest_descriptor.py b/pypy/objspace/test/apptest_descriptor.py
index 3e3188d14b..766a6aab04 100644
--- a/pypy/objspace/test/apptest_descriptor.py
+++ b/pypy/objspace/test/apptest_descriptor.py
@@ -180,3 +180,12 @@ def test_issue3255():
class X(object):
__getattribute__ = Descriptor()
assert X().foo == "foo"
+
+def test_descr_funny_new():
+ class C(object):
+ @classmethod
+ def __new__(*args):
+ return args
+
+ assert C.__new__(1,2) == (C, 1, 2)
+ assert C(1,2) == (C, C, 1, 2)
diff --git a/rpython/rlib/rsre/rpy/sre_compile.py b/rpython/rlib/rsre/rpy/sre_compile.py
index eb8cf11ac7..e65e53ff23 100644
--- a/rpython/rlib/rsre/rpy/sre_compile.py
+++ b/rpython/rlib/rsre/rpy/sre_compile.py
@@ -435,7 +435,7 @@ def _compile_info(code, pattern, flags):
# this contains min/max pattern width, and an optional literal
# prefix or a character map
lo, hi = pattern.getwidth()
- if lo == 0:
+ if not lo and hi:
return # not worth it
# look for a literal prefix
prefix = []
diff --git a/rpython/rlib/rsre/rpy/sre_parse.py b/rpython/rlib/rsre/rpy/sre_parse.py
index 891c44db41..c1c3d7c19b 100644
--- a/rpython/rlib/rsre/rpy/sre_parse.py
+++ b/rpython/rlib/rsre/rpy/sre_parse.py
@@ -23,6 +23,7 @@ DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
+ASCIILETTERS = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
WHITESPACE = set(" \t\n\r\v\f")
@@ -233,7 +234,7 @@ def isname(name):
return False
return True
-def _class_escape(source, escape):
+def _class_escape(source, escape, nested):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
@@ -260,12 +261,21 @@ def _class_escape(source, escape):
elif c in DIGITS:
raise error("bogus escape: %s" % repr(escape))
if len(escape) == 2:
+ if sys.py3kwarning and c in ASCIILETTERS:
+ import warnings
+ if c in 'Uu':
+ warnings.warn('bad escape %s; Unicode escapes are '
+ 'supported only since Python 3.3' % escape,
+ FutureWarning, stacklevel=nested + 6)
+ else:
+ warnings.warnpy3k('bad escape %s' % escape,
+ DeprecationWarning, stacklevel=nested + 6)
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
-def _escape(source, escape, state):
+def _escape(source, escape, state, nested):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
@@ -305,23 +315,32 @@ def _escape(source, escape, state):
import warnings
warnings.warn('group references in lookbehind '
'assertions are not supported',
- RuntimeWarning)
+ RuntimeWarning, stacklevel=nested + 6)
return GROUPREF, group
raise ValueError
if len(escape) == 2:
+ if sys.py3kwarning and c in ASCIILETTERS:
+ import warnings
+ if c in 'Uu':
+ warnings.warn('bad escape %s; Unicode escapes are '
+ 'supported only since Python 3.3' % escape,
+ FutureWarning, stacklevel=nested + 6)
+ else:
+ warnings.warnpy3k('bad escape %s' % escape,
+ DeprecationWarning, stacklevel=nested + 6)
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
-def _parse_sub(source, state, nested=1):
+def _parse_sub(source, state, nested):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
- itemsappend(_parse(source, state))
+ itemsappend(_parse(source, state, nested + 1))
if sourcematch("|"):
continue
if not nested:
@@ -373,10 +392,10 @@ def _parse_sub(source, state, nested=1):
subpattern.append((BRANCH, (None, items)))
return subpattern
-def _parse_sub_cond(source, state, condgroup):
- item_yes = _parse(source, state)
+def _parse_sub_cond(source, state, condgroup, nested):
+ item_yes = _parse(source, state, nested + 1)
if source.match("|"):
- item_no = _parse(source, state)
+ item_no = _parse(source, state, nested + 1)
if source.match("|"):
raise error("conditional backref with more than two branches")
else:
@@ -392,7 +411,7 @@ _ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
-def _parse(source, state):
+def _parse(source, state, nested):
# parse a simple pattern
subpattern = SubPattern(state)
@@ -443,7 +462,7 @@ def _parse(source, state):
if this == "]" and set != start:
break
elif this and this[0] == "\\":
- code1 = _class_escape(source, this)
+ code1 = _class_escape(source, this, nested + 1)
elif this:
code1 = LITERAL, ord(this)
else:
@@ -459,7 +478,7 @@ def _parse(source, state):
break
elif this:
if this[0] == "\\":
- code2 = _class_escape(source, this)
+ code2 = _class_escape(source, this, nested + 1)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
@@ -589,7 +608,7 @@ def _parse(source, state):
import warnings
warnings.warn('group references in lookbehind '
'assertions are not supported',
- RuntimeWarning)
+ RuntimeWarning, stacklevel=nested + 6)
subpatternappend((GROUPREF, gid))
continue
else:
@@ -619,7 +638,7 @@ def _parse(source, state):
dir = -1 # lookbehind
char = sourceget()
state.lookbehind += 1
- p = _parse_sub(source, state)
+ p = _parse_sub(source, state, nested + 1)
if dir < 0:
state.lookbehind -= 1
if not sourcematch(")"):
@@ -656,7 +675,7 @@ def _parse(source, state):
import warnings
warnings.warn('group references in lookbehind '
'assertions are not supported',
- RuntimeWarning)
+ RuntimeWarning, stacklevel=nested + 6)
else:
# flags
if not source.next in FLAGS:
@@ -671,9 +690,9 @@ def _parse(source, state):
else:
group = state.opengroup(name)
if condgroup:
- p = _parse_sub_cond(source, state, condgroup)
+ p = _parse_sub_cond(source, state, condgroup, nested + 1)
else:
- p = _parse_sub(source, state)
+ p = _parse_sub(source, state, nested + 1)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if group is not None:
@@ -695,7 +714,7 @@ def _parse(source, state):
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
- code = _escape(source, this, state)
+ code = _escape(source, this, state, nested + 1)
subpatternappend(code)
else:
@@ -714,6 +733,12 @@ def parse(str, flags=0, pattern=None):
pattern.str = str
p = _parse_sub(source, pattern, 0)
+ if (sys.py3kwarning and
+ (p.pattern.flags & SRE_FLAG_LOCALE) and
+ (p.pattern.flags & SRE_FLAG_UNICODE)):
+ import warnings
+ warnings.warnpy3k("LOCALE and UNICODE flags are incompatible",
+ DeprecationWarning, stacklevel=5)
tail = source.get()
if tail == ")":
@@ -801,7 +826,10 @@ def parse_template(source, pattern):
try:
this = makechar(ESCAPES[this][1])
except KeyError:
- pass
+ if sys.py3kwarning and c in ASCIILETTERS:
+ import warnings
+ warnings.warnpy3k('bad escape %s' % this,
+ DeprecationWarning, stacklevel=4)
literal(this)
else:
literal(this)