# Auto-generated from cpython_chunk_24.txt TEXT_DATA = r""" /*********************************************************** Copyright 1994 by Lance Ellinghouse, Cathedral City, California Republic, United States of America. All Rights Reserved Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of Lance Ellinghouse not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ******************************************************************/ /****************************************************************** Revision history: 2010/04/20 (Sean Reifschneider) - Use basename(sys.argv[0]) for the default "ident". - Arguments to openlog() are now keyword args and are all optional. - syslog() calls openlog() if it hasn't already been called. 1998/04/28 (Sean Reifschneider) - When facility not specified to syslog() method, use default from openlog() (This is how it was claimed to work in the documentation) - Potential resource leak of o_ident, now cleaned up in closelog() - Minor comment accuracy fix. 95/06/29 (Steve Clift) - Changed arg parsing to use PyArg_ParseTuple. - Added PyErr_Clear() call(s) where needed. - Fix core dumps if user message contains format specifiers. - Change openlog arg defaults to match normal syslog behavior. - Plug memory leak in openlog(). - Fix setlogmask() to return previous mask value. ******************************************************************/ /* syslog module */ // clinic/syslogmodule.c.h uses internal pycore_modsupport.h API #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 #endif #include "Python.h" #include "osdefs.h" // SEP #include /*[clinic input] module syslog [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=478f4ac94a1d4cae]*/ #include "clinic/syslogmodule.c.h" /* only one instance, only one syslog, so globals should be ok, * these fields are writable from the main interpreter only. */ static PyObject *S_ident_o = NULL; // identifier, held by openlog() static char S_log_open = 0; static inline int is_main_interpreter(void) { return (PyInterpreterState_Get() == PyInterpreterState_Main()); } static PyObject * syslog_get_argv(void) { /* Figure out what to use for as the program "ident" for openlog(). * This swallows exceptions and continues rather than failing out, * because the syslog module can still be used because openlog(3) * is optional. */ Py_ssize_t argv_len, scriptlen; PyObject *scriptobj; Py_ssize_t slash; PyObject *argv; if (PySys_GetOptionalAttrString("argv", &argv) <= 0) { return NULL; } argv_len = PyList_Size(argv); if (argv_len == -1) { PyErr_Clear(); Py_DECREF(argv); return NULL; } if (argv_len == 0) { Py_DECREF(argv); return NULL; } scriptobj = PyList_GetItem(argv, 0); Py_XINCREF(scriptobj); Py_DECREF(argv); if (scriptobj == NULL) { PyErr_Clear(); return NULL; } if (!PyUnicode_Check(scriptobj)) { Py_DECREF(scriptobj); return NULL; } scriptlen = PyUnicode_GET_LENGTH(scriptobj); if (scriptlen == 0) { Py_DECREF(scriptobj); return NULL; } slash = PyUnicode_FindChar(scriptobj, SEP, 0, scriptlen, -1); if (slash == -2) { PyErr_Clear(); Py_DECREF(scriptobj); return NULL; } if (slash != -1) { Py_SETREF(scriptobj, PyUnicode_Substring(scriptobj, slash + 1, scriptlen)); } return scriptobj; } /*[clinic input] @critical_section syslog.openlog ident: unicode = NULL logoption as logopt: long = 0 facility: long(c_default="LOG_USER") = LOG_USER Set logging options of subsequent syslog() calls. [clinic start generated code]*/ static PyObject * syslog_openlog_impl(PyObject *module, PyObject *ident, long logopt, long facility) /*[clinic end generated code: output=5476c12829b6eb75 input=ee700b8786f81c23]*/ { // Since the sys.openlog changes the process level state of syslog library, // this operation is only allowed for the main interpreter. if (!is_main_interpreter()) { PyErr_SetString(PyExc_RuntimeError, "subinterpreter can't use syslog.openlog()"); return NULL; } const char *ident_str = NULL; if (ident) { Py_INCREF(ident); } else { /* get sys.argv[0] or NULL if we can't for some reason */ ident = syslog_get_argv(); if (ident == NULL && PyErr_Occurred()) { return NULL; } } /* At this point, ident should be INCREF()ed. openlog(3) does not * make a copy, and syslog(3) later uses it. We can't garbagecollect it. * If NULL, just let openlog figure it out (probably using C argv[0]). */ if (ident) { ident_str = PyUnicode_AsUTF8(ident); if (ident_str == NULL) { Py_DECREF(ident); return NULL; } } if (PySys_Audit("syslog.openlog", "Oll", ident ? ident : Py_None, logopt, facility) < 0) { Py_XDECREF(ident); return NULL; } openlog(ident_str, logopt, facility); S_log_open = 1; Py_XSETREF(S_ident_o, ident); Py_RETURN_NONE; } /*[clinic input] @critical_section syslog.syslog [ priority: int(c_default="LOG_INFO") = LOG_INFO ] message: str / Send the string message to the system logger. [clinic start generated code]*/ static PyObject * syslog_syslog_impl(PyObject *module, int group_left_1, int priority, const char *message) /*[clinic end generated code: output=c3dbc73445a0e078 input=6588ddb0b113af8e]*/ { if (PySys_Audit("syslog.syslog", "is", priority, message) < 0) { return NULL; } /* if log is not opened, open it now */ if (!S_log_open) { if (!is_main_interpreter()) { PyErr_SetString(PyExc_RuntimeError, "subinterpreter can't use syslog.syslog() " "until the syslog is opened by the main interpreter"); return NULL; } PyObject *openlog_ret = syslog_openlog_impl(module, NULL, 0, LOG_USER); if (openlog_ret == NULL) { return NULL; } Py_DECREF(openlog_ret); } /* Incref ident, because it can be decrefed if syslog.openlog() is * called when the GIL is released. */ PyObject *ident = Py_XNewRef(S_ident_o); #ifdef __APPLE__ // gh-98178: On macOS, libc syslog() is not thread-safe syslog(priority, "%s", message); #else Py_BEGIN_ALLOW_THREADS; syslog(priority, "%s", message); Py_END_ALLOW_THREADS; #endif Py_XDECREF(ident); Py_RETURN_NONE; } /*[clinic input] @critical_section syslog.closelog Reset the syslog module values and call the system library closelog(). [clinic start generated code]*/ static PyObject * syslog_closelog_impl(PyObject *module) /*[clinic end generated code: output=97890a80a24b1b84 input=167f489868bd5a72]*/ { // Since the sys.closelog changes the process level state of syslog library, // this operation is only allowed for the main interpreter. if (!is_main_interpreter()) { PyErr_SetString(PyExc_RuntimeError, "subinterpreter can't use syslog.closelog()"); return NULL; } if (PySys_Audit("syslog.closelog", NULL) < 0) { return NULL; } if (S_log_open) { closelog(); Py_CLEAR(S_ident_o); S_log_open = 0; } Py_RETURN_NONE; } /*[clinic input] syslog.setlogmask -> long maskpri: long / Set the priority mask to maskpri and return the previous mask value. [clinic start generated code]*/ static long syslog_setlogmask_impl(PyObject *module, long maskpri) /*[clinic end generated code: output=d6ed163917b434bf input=adff2c2b76c7629c]*/ { if (PySys_Audit("syslog.setlogmask", "l", maskpri) < 0) { return -1; } static PyMutex setlogmask_mutex = {0}; PyMutex_Lock(&setlogmask_mutex); // Linux man page (3): setlogmask() is MT-Unsafe race:LogMask. long previous_mask = setlogmask(maskpri); PyMutex_Unlock(&setlogmask_mutex); return previous_mask; } /*[clinic input] syslog.LOG_MASK -> long pri: long / Calculates the mask for the individual priority pri. [clinic start generated code]*/ static long syslog_LOG_MASK_impl(PyObject *module, long pri) /*[clinic end generated code: output=c4a5bbfcc74c7c94 input=534829cb7fb5f7d2]*/ { return LOG_MASK(pri); } /*[clinic input] syslog.LOG_UPTO -> long pri: long / Calculates the mask for all priorities up to and including pri. [clinic start generated code]*/ static long syslog_LOG_UPTO_impl(PyObject *module, long pri) /*[clinic end generated code: output=9eab083c90601d7e input=5e906d6c406b7458]*/ { return LOG_UPTO(pri); } /* List of functions defined in the module */ static PyMethodDef syslog_methods[] = { SYSLOG_OPENLOG_METHODDEF SYSLOG_CLOSELOG_METHODDEF SYSLOG_SYSLOG_METHODDEF SYSLOG_SETLOGMASK_METHODDEF SYSLOG_LOG_MASK_METHODDEF SYSLOG_LOG_UPTO_METHODDEF {NULL, NULL, 0} }; static int syslog_exec(PyObject *module) { #define ADD_INT_MACRO(module, macro) \ do { \ if (PyModule_AddIntConstant(module, #macro, macro) < 0) { \ return -1; \ } \ } while (0) /* Priorities */ ADD_INT_MACRO(module, LOG_EMERG); ADD_INT_MACRO(module, LOG_ALERT); ADD_INT_MACRO(module, LOG_CRIT); ADD_INT_MACRO(module, LOG_ERR); ADD_INT_MACRO(module, LOG_WARNING); ADD_INT_MACRO(module, LOG_NOTICE); ADD_INT_MACRO(module, LOG_INFO); ADD_INT_MACRO(module, LOG_DEBUG); /* openlog() option flags */ ADD_INT_MACRO(module, LOG_PID); ADD_INT_MACRO(module, LOG_CONS); ADD_INT_MACRO(module, LOG_NDELAY); #ifdef LOG_ODELAY ADD_INT_MACRO(module, LOG_ODELAY); #endif #ifdef LOG_NOWAIT ADD_INT_MACRO(module, LOG_NOWAIT); #endif #ifdef LOG_PERROR ADD_INT_MACRO(module, LOG_PERROR); #endif /* Facilities */ ADD_INT_MACRO(module, LOG_KERN); ADD_INT_MACRO(module, LOG_USER); ADD_INT_MACRO(module, LOG_MAIL); ADD_INT_MACRO(module, LOG_DAEMON); ADD_INT_MACRO(module, LOG_AUTH); ADD_INT_MACRO(module, LOG_LPR); ADD_INT_MACRO(module, LOG_LOCAL0); ADD_INT_MACRO(module, LOG_LOCAL1); ADD_INT_MACRO(module, LOG_LOCAL2); ADD_INT_MACRO(module, LOG_LOCAL3); ADD_INT_MACRO(module, LOG_LOCAL4); ADD_INT_MACRO(module, LOG_LOCAL5); ADD_INT_MACRO(module, LOG_LOCAL6); ADD_INT_MACRO(module, LOG_LOCAL7); #ifndef LOG_SYSLOG #define LOG_SYSLOG LOG_DAEMON #endif #ifndef LOG_NEWS #define LOG_NEWS LOG_MAIL #endif #ifndef LOG_UUCP #define LOG_UUCP LOG_MAIL #endif #ifndef LOG_CRON #define LOG_CRON LOG_DAEMON #endif ADD_INT_MACRO(module, LOG_SYSLOG); ADD_INT_MACRO(module, LOG_CRON); ADD_INT_MACRO(module, LOG_UUCP); ADD_INT_MACRO(module, LOG_NEWS); #ifdef LOG_AUTHPRIV ADD_INT_MACRO(module, LOG_AUTHPRIV); #endif #ifdef LOG_FTP ADD_INT_MACRO(module, LOG_FTP); #endif #ifdef LOG_NETINFO ADD_INT_MACRO(module, LOG_NETINFO); #endif #ifdef LOG_REMOTEAUTH ADD_INT_MACRO(module, LOG_REMOTEAUTH); #endif #ifdef LOG_INSTALL ADD_INT_MACRO(module, LOG_INSTALL); #endif #ifdef LOG_RAS ADD_INT_MACRO(module, LOG_RAS); #endif #ifdef LOG_LAUNCHD ADD_INT_MACRO(module, LOG_LAUNCHD); #endif return 0; } static PyModuleDef_Slot syslog_slots[] = { {Py_mod_exec, syslog_exec}, {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, {Py_mod_gil, Py_MOD_GIL_NOT_USED}, {0, NULL} }; /* Initialization function for the module */ static struct PyModuleDef syslogmodule = { PyModuleDef_HEAD_INIT, .m_name = "syslog", .m_size = 0, .m_methods = syslog_methods, .m_slots = syslog_slots, }; PyMODINIT_FUNC PyInit_syslog(void) { return PyModuleDef_Init(&syslogmodule); } /* ABCMeta implementation */ #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 #endif #include "Python.h" #include "pycore_moduleobject.h" // _PyModule_GetState() #include "pycore_object.h" // _PyType_GetSubclasses() #include "pycore_runtime.h" // _Py_ID() #include "pycore_setobject.h" // _PySet_NextEntry() #include "pycore_weakref.h" // _PyWeakref_GET_REF() #include "clinic/_abc.c.h" /*[clinic input] module _abc [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=964f5328e1aefcda]*/ PyDoc_STRVAR(_abc__doc__, "Module contains faster C implementation of abc.ABCMeta"); typedef struct { PyTypeObject *_abc_data_type; uint64_t abc_invalidation_counter; } _abcmodule_state; static inline _abcmodule_state* get_abc_state(PyObject *module) { void *state = _PyModule_GetState(module); assert(state != NULL); return (_abcmodule_state *)state; } static inline uint64_t get_invalidation_counter(_abcmodule_state *state) { #ifdef Py_GIL_DISABLED return _Py_atomic_load_uint64(&state->abc_invalidation_counter); #else return state->abc_invalidation_counter; #endif } static inline void increment_invalidation_counter(_abcmodule_state *state) { #ifdef Py_GIL_DISABLED _Py_atomic_add_uint64(&state->abc_invalidation_counter, 1); #else state->abc_invalidation_counter++; #endif } /* This object stores internal state for ABCs. Note that we can use normal sets for caches, since they are never iterated over. */ typedef struct { PyObject_HEAD /* These sets of weak references are lazily created. Once created, they will point to the same sets until the ABCMeta object is destroyed or cleared, both of which will only happen while the object is visible to a single thread. */ PyObject *_abc_registry; PyObject *_abc_cache; PyObject *_abc_negative_cache; uint64_t _abc_negative_cache_version; } _abc_data; #define _abc_data_CAST(op) ((_abc_data *)(op)) static inline uint64_t get_cache_version(_abc_data *impl) { #ifdef Py_GIL_DISABLED return _Py_atomic_load_uint64(&impl->_abc_negative_cache_version); #else return impl->_abc_negative_cache_version; #endif } static inline void set_cache_version(_abc_data *impl, uint64_t version) { #ifdef Py_GIL_DISABLED _Py_atomic_store_uint64(&impl->_abc_negative_cache_version, version); #else impl->_abc_negative_cache_version = version; #endif } static int abc_data_traverse(PyObject *op, visitproc visit, void *arg) { _abc_data *self = _abc_data_CAST(op); Py_VISIT(Py_TYPE(self)); Py_VISIT(self->_abc_registry); Py_VISIT(self->_abc_cache); Py_VISIT(self->_abc_negative_cache); return 0; } static int abc_data_clear(PyObject *op) { _abc_data *self = _abc_data_CAST(op); Py_CLEAR(self->_abc_registry); Py_CLEAR(self->_abc_cache); Py_CLEAR(self->_abc_negative_cache); return 0; } static void abc_data_dealloc(PyObject *self) { PyObject_GC_UnTrack(self); PyTypeObject *tp = Py_TYPE(self); (void)abc_data_clear(self); tp->tp_free(self); Py_DECREF(tp); } static PyObject * abc_data_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { _abc_data *self = (_abc_data *) type->tp_alloc(type, 0); _abcmodule_state *state = NULL; if (self == NULL) { return NULL; } state = _PyType_GetModuleState(type); if (state == NULL) { Py_DECREF(self); return NULL; } self->_abc_registry = NULL; self->_abc_cache = NULL; self->_abc_negative_cache = NULL; self->_abc_negative_cache_version = get_invalidation_counter(state); return (PyObject *) self; } PyDoc_STRVAR(abc_data_doc, "Internal state held by ABC machinery."); static PyType_Slot _abc_data_type_spec_slots[] = { {Py_tp_doc, (void *)abc_data_doc}, {Py_tp_new, abc_data_new}, {Py_tp_dealloc, abc_data_dealloc}, {Py_tp_traverse, abc_data_traverse}, {Py_tp_clear, abc_data_clear}, {0, 0} }; static PyType_Spec _abc_data_type_spec = { .name = "_abc._abc_data", .basicsize = sizeof(_abc_data), .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, .slots = _abc_data_type_spec_slots, }; static _abc_data * _get_impl(PyObject *module, PyObject *self) { _abcmodule_state *state = get_abc_state(module); PyObject *impl = PyObject_GetAttr(self, &_Py_ID(_abc_impl)); if (impl == NULL) { return NULL; } if (!Py_IS_TYPE(impl, state->_abc_data_type)) { PyErr_SetString(PyExc_TypeError, "_abc_impl is set to a wrong type"); Py_DECREF(impl); return NULL; } return (_abc_data *)impl; } static int _in_weak_set(_abc_data *impl, PyObject **pset, PyObject *obj) { PyObject *set; Py_BEGIN_CRITICAL_SECTION(impl); set = *pset; Py_END_CRITICAL_SECTION(); if (set == NULL || PySet_GET_SIZE(set) == 0) { return 0; } PyObject *ref = PyWeakref_NewRef(obj, NULL); if (ref == NULL) { if (PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Clear(); return 0; } return -1; } int res = PySet_Contains(set, ref); Py_DECREF(ref); return res; } static PyObject * _destroy(PyObject *setweakref, PyObject *objweakref) { PyObject *set = _PyWeakref_GET_REF(setweakref); if (set == NULL) { Py_RETURN_NONE; } if (PySet_Discard(set, objweakref) < 0) { Py_DECREF(set); return NULL; } Py_DECREF(set); Py_RETURN_NONE; } static PyMethodDef _destroy_def = { "_destroy", _destroy, METH_O }; static int _add_to_weak_set(_abc_data *impl, PyObject **pset, PyObject *obj) { PyObject *set; Py_BEGIN_CRITICAL_SECTION(impl); set = *pset; if (set == NULL) { set = *pset = PySet_New(NULL); } Py_END_CRITICAL_SECTION(); if (set == NULL) { return -1; } PyObject *ref, *wr; PyObject *destroy_cb; wr = PyWeakref_NewRef(set, NULL); if (wr == NULL) { return -1; } destroy_cb = PyCFunction_NewEx(&_destroy_def, wr, NULL); if (destroy_cb == NULL) { Py_DECREF(wr); return -1; } ref = PyWeakref_NewRef(obj, destroy_cb); Py_DECREF(destroy_cb); if (ref == NULL) { Py_DECREF(wr); return -1; } int ret = PySet_Add(set, ref); Py_DECREF(wr); Py_DECREF(ref); return ret; } /*[clinic input] _abc._reset_registry self: object / Internal ABC helper to reset registry of a given class. Should be only used by refleak.py [clinic start generated code]*/ static PyObject * _abc__reset_registry(PyObject *module, PyObject *self) /*[clinic end generated code: output=92d591a43566cc10 input=12a0b7eb339ac35c]*/ { _abc_data *impl = _get_impl(module, self); if (impl == NULL) { return NULL; } PyObject *registry; Py_BEGIN_CRITICAL_SECTION(impl); registry = impl->_abc_registry; Py_END_CRITICAL_SECTION(); if (registry != NULL && PySet_Clear(registry) < 0) { Py_DECREF(impl); return NULL; } Py_DECREF(impl); Py_RETURN_NONE; } /*[clinic input] _abc._reset_caches self: object / Internal ABC helper to reset both caches of a given class. Should be only used by refleak.py [clinic start generated code]*/ static PyObject * _abc__reset_caches(PyObject *module, PyObject *self) /*[clinic end generated code: output=f296f0d5c513f80c input=c0ac616fd8acfb6f]*/ { _abc_data *impl = _get_impl(module, self); if (impl == NULL) { return NULL; } PyObject *cache, *negative_cache; Py_BEGIN_CRITICAL_SECTION(impl); cache = impl->_abc_cache; negative_cache = impl->_abc_negative_cache; Py_END_CRITICAL_SECTION(); if (cache != NULL && PySet_Clear(cache) < 0) { Py_DECREF(impl); return NULL; } /* also the second cache */ if (negative_cache != NULL && PySet_Clear(negative_cache) < 0) { Py_DECREF(impl); return NULL; } Py_DECREF(impl); Py_RETURN_NONE; } /*[clinic input] _abc._get_dump self: object / Internal ABC helper for cache and registry debugging. Return shallow copies of registry, of both caches, and negative cache version. Don't call this function directly, instead use ABC._dump_registry() for a nice repr. [clinic start generated code]*/ static PyObject * _abc__get_dump(PyObject *module, PyObject *self) /*[clinic end generated code: output=9d9569a8e2c1c443 input=2c5deb1bfe9e3c79]*/ { _abc_data *impl = _get_impl(module, self); if (impl == NULL) { return NULL; } PyObject *res; Py_BEGIN_CRITICAL_SECTION(impl); res = Py_BuildValue("NNNK", PySet_New(impl->_abc_registry), PySet_New(impl->_abc_cache), PySet_New(impl->_abc_negative_cache), get_cache_version(impl)); Py_END_CRITICAL_SECTION(); Py_DECREF(impl); return res; } // Compute set of abstract method names. static int compute_abstract_methods(PyObject *self) { int ret = -1; PyObject *abstracts = PyFrozenSet_New(NULL); if (abstracts == NULL) { return -1; } PyObject *ns = NULL, *items = NULL, *bases = NULL; // Py_XDECREF()ed on error. /* Stage 1: direct abstract methods. */ ns = PyObject_GetAttr(self, &_Py_ID(__dict__)); if (!ns) { goto error; } // We can't use PyDict_Next(ns) even when ns is dict because // _PyObject_IsAbstract() can mutate ns. items = PyMapping_Items(ns); if (!items) { goto error; } assert(PyList_Check(items)); for (Py_ssize_t pos = 0; pos < PyList_GET_SIZE(items); pos++) { PyObject *it = PySequence_Fast( PyList_GET_ITEM(items, pos), "items() returned non-iterable"); if (!it) { goto error; } if (PySequence_Fast_GET_SIZE(it) != 2) { PyErr_SetString(PyExc_TypeError, "items() returned item which size is not 2"); Py_DECREF(it); goto error; } // borrowed PyObject *key = PySequence_Fast_GET_ITEM(it, 0); PyObject *value = PySequence_Fast_GET_ITEM(it, 1); // items or it may be cleared while accessing __abstractmethod__ // So we need to keep strong reference for key Py_INCREF(key); int is_abstract = _PyObject_IsAbstract(value); if (is_abstract < 0 || (is_abstract && PySet_Add(abstracts, key) < 0)) { Py_DECREF(it); Py_DECREF(key); goto error; } Py_DECREF(key); Py_DECREF(it); } /* Stage 2: inherited abstract methods. */ bases = PyObject_GetAttr(self, &_Py_ID(__bases__)); if (!bases) { goto error; } if (!PyTuple_Check(bases)) { PyErr_SetString(PyExc_TypeError, "__bases__ is not tuple"); goto error; } for (Py_ssize_t pos = 0; pos < PyTuple_GET_SIZE(bases); pos++) { PyObject *item = PyTuple_GET_ITEM(bases, pos); // borrowed PyObject *base_abstracts, *iter; if (PyObject_GetOptionalAttr(item, &_Py_ID(__abstractmethods__), &base_abstracts) < 0) { goto error; } if (base_abstracts == NULL) { continue; } if (!(iter = PyObject_GetIter(base_abstracts))) { Py_DECREF(base_abstracts); goto error; } Py_DECREF(base_abstracts); PyObject *key, *value; while ((key = PyIter_Next(iter))) { if (PyObject_GetOptionalAttr(self, key, &value) < 0) { Py_DECREF(key); Py_DECREF(iter); goto error; } if (value == NULL) { Py_DECREF(key); continue; } int is_abstract = _PyObject_IsAbstract(value); Py_DECREF(value); if (is_abstract < 0 || (is_abstract && PySet_Add(abstracts, key) < 0)) { Py_DECREF(key); Py_DECREF(iter); goto error; } Py_DECREF(key); } Py_DECREF(iter); if (PyErr_Occurred()) { goto error; } } if (PyObject_SetAttr(self, &_Py_ID(__abstractmethods__), abstracts) < 0) { goto error; } ret = 0; error: Py_DECREF(abstracts); Py_XDECREF(ns); Py_XDECREF(items); Py_XDECREF(bases); return ret; } #define COLLECTION_FLAGS (Py_TPFLAGS_SEQUENCE | Py_TPFLAGS_MAPPING) /*[clinic input] @permit_long_summary _abc._abc_init self: object / Internal ABC helper for class set-up. Should be never used outside abc module. [clinic start generated code]*/ static PyObject * _abc__abc_init(PyObject *module, PyObject *self) /*[clinic end generated code: output=594757375714cda1 input=0b3513f947736d39]*/ { _abcmodule_state *state = get_abc_state(module); PyObject *data; if (compute_abstract_methods(self) < 0) { return NULL; } /* Set up inheritance registry. */ data = abc_data_new(state->_abc_data_type, NULL, NULL); if (data == NULL) { return NULL; } if (PyObject_SetAttr(self, &_Py_ID(_abc_impl), data) < 0) { Py_DECREF(data); return NULL; } Py_DECREF(data); /* If __abc_tpflags__ & COLLECTION_FLAGS is set, then set the corresponding bit(s) * in the new class. * Used by collections.abc.Sequence and collections.abc.Mapping to indicate * their special status w.r.t. pattern matching. */ if (PyType_Check(self)) { PyTypeObject *cls = (PyTypeObject *)self; PyObject *dict = _PyType_GetDict(cls); PyObject *flags = NULL; if (PyDict_Pop(dict, &_Py_ID(__abc_tpflags__), &flags) < 0) { return NULL; } if (flags == NULL || !PyLong_CheckExact(flags)) { Py_XDECREF(flags); Py_RETURN_NONE; } long val = PyLong_AsLong(flags); Py_DECREF(flags); if (val == -1 && PyErr_Occurred()) { return NULL; } if ((val & COLLECTION_FLAGS) == COLLECTION_FLAGS) { PyErr_SetString(PyExc_TypeError, "__abc_tpflags__ cannot be both Py_TPFLAGS_SEQUENCE and Py_TPFLAGS_MAPPING"); return NULL; } _PyType_SetFlags((PyTypeObject *)self, 0, val & COLLECTION_FLAGS); } Py_RETURN_NONE; } /*[clinic input] @permit_long_summary _abc._abc_register self: object subclass: object / Internal ABC helper for subclasss registration. Should be never used outside abc module. [clinic start generated code]*/ static PyObject * _abc__abc_register_impl(PyObject *module, PyObject *self, PyObject *subclass) /*[clinic end generated code: output=7851e7668c963524 input=135ab13a581b4414]*/ { if (!PyType_Check(subclass)) { PyErr_SetString(PyExc_TypeError, "Can only register classes"); return NULL; } int result = PyObject_IsSubclass(subclass, self); if (result > 0) { return Py_NewRef(subclass); /* Already a subclass. */ } if (result < 0) { return NULL; } /* Subtle: test for cycles *after* testing for "already a subclass"; this means we allow X.register(X) and interpret it as a no-op. */ result = PyObject_IsSubclass(self, subclass); if (result > 0) { /* This would create a cycle, which is bad for the algorithm below. */ PyErr_SetString(PyExc_RuntimeError, "Refusing to create an inheritance cycle"); return NULL; } if (result < 0) { return NULL; } _abc_data *impl = _get_impl(module, self); if (impl == NULL) { return NULL; } if (_add_to_weak_set(impl, &impl->_abc_registry, subclass) < 0) { Py_DECREF(impl); return NULL; } Py_DECREF(impl); /* Invalidate negative cache */ increment_invalidation_counter(get_abc_state(module)); /* Set Py_TPFLAGS_SEQUENCE or Py_TPFLAGS_MAPPING flag */ if (PyType_Check(self)) { unsigned long collection_flag = PyType_GetFlags((PyTypeObject *)self) & COLLECTION_FLAGS; if (collection_flag) { _PyType_SetFlagsRecursive((PyTypeObject *)subclass, COLLECTION_FLAGS, collection_flag); } } return Py_NewRef(subclass); } /*[clinic input] @permit_long_summary _abc._abc_instancecheck self: object instance: object / Internal ABC helper for instance checks. Should be never used outside abc module. [clinic start generated code]*/ static PyObject * _abc__abc_instancecheck_impl(PyObject *module, PyObject *self, PyObject *instance) /*[clinic end generated code: output=b8b5148f63b6b56f input=0bbc8da0ea346719]*/ { PyObject *subtype, *result = NULL, *subclass = NULL; _abc_data *impl = _get_impl(module, self); if (impl == NULL) { return NULL; } subclass = PyObject_GetAttr(instance, &_Py_ID(__class__)); if (subclass == NULL) { Py_DECREF(impl); return NULL; } /* Inline the cache checking. */ int incache = _in_weak_set(impl, &impl->_abc_cache, subclass); if (incache < 0) { goto end; } if (incache > 0) { result = Py_NewRef(Py_True); goto end; } subtype = (PyObject *)Py_TYPE(instance); if (subtype == subclass) { if (get_cache_version(impl) == get_invalidation_counter(get_abc_state(module))) { incache = _in_weak_set(impl, &impl->_abc_negative_cache, subclass); if (incache < 0) { goto end; } if (incache > 0) { result = Py_NewRef(Py_False); goto end; } } /* Fall back to the subclass check. */ result = PyObject_CallMethodOneArg(self, &_Py_ID(__subclasscheck__), subclass); goto end; } result = PyObject_CallMethodOneArg(self, &_Py_ID(__subclasscheck__), subclass); if (result == NULL) { goto end; } switch (PyObject_IsTrue(result)) { case -1: Py_SETREF(result, NULL); break; case 0: Py_DECREF(result); result = PyObject_CallMethodOneArg(self, &_Py_ID(__subclasscheck__), subtype); break; case 1: // Nothing to do. break; default: Py_UNREACHABLE(); } end: Py_XDECREF(impl); Py_XDECREF(subclass); return result; } // Return -1 when exception occurred. // Return 1 when result is set. // Return 0 otherwise. static int subclasscheck_check_registry(_abc_data *impl, PyObject *subclass, PyObject **result); /*[clinic input] @permit_long_summary _abc._abc_subclasscheck self: object subclass: object / Internal ABC helper for subclasss checks. Should be never used outside abc module. [clinic start generated code]*/ static PyObject * _abc__abc_subclasscheck_impl(PyObject *module, PyObject *self, PyObject *subclass) /*[clinic end generated code: output=b56c9e4a530e3894 input=5bf1ef712f5d3610]*/ { if (!PyType_Check(subclass)) { PyErr_SetString(PyExc_TypeError, "issubclass() arg 1 must be a class"); return NULL; } PyObject *ok, *subclasses = NULL, *result = NULL; _abcmodule_state *state = NULL; Py_ssize_t pos; int incache; _abc_data *impl = _get_impl(module, self); if (impl == NULL) { return NULL; } /* 1. Check cache. */ incache = _in_weak_set(impl, &impl->_abc_cache, subclass); if (incache < 0) { goto end; } if (incache > 0) { result = Py_True; goto end; } state = get_abc_state(module); /* 2. Check negative cache; may have to invalidate. */ uint64_t invalidation_counter = get_invalidation_counter(state); if (get_cache_version(impl) < invalidation_counter) { /* Invalidate the negative cache. */ PyObject *negative_cache; Py_BEGIN_CRITICAL_SECTION(impl); negative_cache = impl->_abc_negative_cache; Py_END_CRITICAL_SECTION(); if (negative_cache != NULL && PySet_Clear(negative_cache) < 0) { goto end; } set_cache_version(impl, invalidation_counter); } else { incache = _in_weak_set(impl, &impl->_abc_negative_cache, subclass); if (incache < 0) { goto end; } if (incache > 0) { result = Py_False; goto end; } } /* 3. Check the subclass hook. */ ok = PyObject_CallMethodOneArg( (PyObject *)self, &_Py_ID(__subclasshook__), subclass); if (ok == NULL) { goto end; } if (ok == Py_True) { Py_DECREF(ok); if (_add_to_weak_set(impl, &impl->_abc_cache, subclass) < 0) { goto end; } result = Py_True; goto end; } if (ok == Py_False) { Py_DECREF(ok); if (_add_to_weak_set(impl, &impl->_abc_negative_cache, subclass) < 0) { goto end; } result = Py_False; goto end; } if (ok != Py_NotImplemented) { Py_DECREF(ok); PyErr_SetString(PyExc_AssertionError, "__subclasshook__ must return either" " False, True, or NotImplemented"); goto end; } Py_DECREF(ok); /* 4. Check if it's a direct subclass. */ if (PyType_IsSubtype((PyTypeObject *)subclass, (PyTypeObject *)self)) { if (_add_to_weak_set(impl, &impl->_abc_cache, subclass) < 0) { goto end; } result = Py_True; goto end; } /* 5. Check if it's a subclass of a registered class (recursive). */ if (subclasscheck_check_registry(impl, subclass, &result)) { // Exception occurred or result is set. goto end; } /* 6. Check if it's a subclass of a subclass (recursive). */ subclasses = PyObject_CallMethod(self, "__subclasses__", NULL); if (subclasses == NULL) { goto end; } if (!PyList_Check(subclasses)) { PyErr_SetString(PyExc_TypeError, "__subclasses__() must return a list"); goto end; } for (pos = 0; pos < PyList_GET_SIZE(subclasses); pos++) { PyObject *scls = PyList_GetItemRef(subclasses, pos); if (scls == NULL) { goto end; } int r = PyObject_IsSubclass(subclass, scls); Py_DECREF(scls); if (r > 0) { if (_add_to_weak_set(impl, &impl->_abc_cache, subclass) < 0) { goto end; } result = Py_True; goto end; } if (r < 0) { goto end; } } /* No dice; update negative cache. */ if (_add_to_weak_set(impl, &impl->_abc_negative_cache, subclass) < 0) { goto end; } result = Py_False; end: Py_DECREF(impl); Py_XDECREF(subclasses); return Py_XNewRef(result); } static int subclasscheck_check_registry(_abc_data *impl, PyObject *subclass, PyObject **result) { // Fast path: check subclass is in weakref directly. int ret = _in_weak_set(impl, &impl->_abc_registry, subclass); if (ret < 0) { *result = NULL; return -1; } if (ret > 0) { *result = Py_True; return 1; } PyObject *registry_shared; Py_BEGIN_CRITICAL_SECTION(impl); registry_shared = impl->_abc_registry; Py_END_CRITICAL_SECTION(); if (registry_shared == NULL) { return 0; } // Make a local copy of the registry to protect against concurrent // modifications of _abc_registry. PyObject *registry = PyFrozenSet_New(registry_shared); if (registry == NULL) { return -1; } PyObject *key; Py_ssize_t pos = 0; Py_hash_t hash; while (_PySet_NextEntry(registry, &pos, &key, &hash)) { PyObject *rkey; if (PyWeakref_GetRef(key, &rkey) < 0) { // Someone inject non-weakref type in the registry. ret = -1; break; } if (rkey == NULL) { continue; } int r = PyObject_IsSubclass(subclass, rkey); Py_DECREF(rkey); if (r < 0) { ret = -1; break; } if (r > 0) { if (_add_to_weak_set(impl, &impl->_abc_cache, subclass) < 0) { ret = -1; break; } *result = Py_True; ret = 1; break; } } Py_DECREF(registry); return ret; } /*[clinic input] _abc.get_cache_token Returns the current ABC cache token. The token is an opaque object (supporting equality testing) identifying the current version of the ABC cache for virtual subclasses. The token changes with every call to register() on any ABC. [clinic start generated code]*/ static PyObject * _abc_get_cache_token_impl(PyObject *module) /*[clinic end generated code: output=c7d87841e033dacc input=70413d1c423ad9f9]*/ { _abcmodule_state *state = get_abc_state(module); return PyLong_FromUnsignedLongLong(get_invalidation_counter(state)); } static struct PyMethodDef _abcmodule_methods[] = { _ABC_GET_CACHE_TOKEN_METHODDEF _ABC__ABC_INIT_METHODDEF _ABC__RESET_REGISTRY_METHODDEF _ABC__RESET_CACHES_METHODDEF _ABC__GET_DUMP_METHODDEF _ABC__ABC_REGISTER_METHODDEF _ABC__ABC_INSTANCECHECK_METHODDEF _ABC__ABC_SUBCLASSCHECK_METHODDEF {NULL, NULL} /* sentinel */ }; static int _abcmodule_exec(PyObject *module) { _abcmodule_state *state = get_abc_state(module); state->abc_invalidation_counter = 0; state->_abc_data_type = (PyTypeObject *)PyType_FromModuleAndSpec(module, &_abc_data_type_spec, NULL); if (state->_abc_data_type == NULL) { return -1; } return 0; } static int _abcmodule_traverse(PyObject *module, visitproc visit, void *arg) { _abcmodule_state *state = get_abc_state(module); Py_VISIT(state->_abc_data_type); return 0; } static int _abcmodule_clear(PyObject *module) { _abcmodule_state *state = get_abc_state(module); Py_CLEAR(state->_abc_data_type); return 0; } static void _abcmodule_free(void *module) { (void)_abcmodule_clear((PyObject *)module); } static PyModuleDef_Slot _abcmodule_slots[] = { {Py_mod_exec, _abcmodule_exec}, {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, {Py_mod_gil, Py_MOD_GIL_NOT_USED}, {0, NULL} }; static struct PyModuleDef _abcmodule = { PyModuleDef_HEAD_INIT, .m_name = "_abc", .m_doc = _abc__doc__, .m_size = sizeof(_abcmodule_state), .m_methods = _abcmodule_methods, .m_slots = _abcmodule_slots, .m_traverse = _abcmodule_traverse, .m_clear = _abcmodule_clear, .m_free = _abcmodule_free, }; PyMODINIT_FUNC PyInit__abc(void) { return PyModuleDef_Init(&_abcmodule); } /* C Extension module to test all aspects of PEP-3118. Written by Stefan Krah. */ #include "Python.h" /* struct module */ static PyObject *structmodule = NULL; static PyObject *Struct = NULL; static PyObject *calcsize = NULL; /* cache simple format string */ static const char *simple_fmt = "B"; static PyObject *simple_format = NULL; #define SIMPLE_FORMAT(fmt) (fmt == NULL || strcmp(fmt, "B") == 0) #define FIX_FORMAT(fmt) (fmt == NULL ? "B" : fmt) /**************************************************************************/ /* NDArray Object */ /**************************************************************************/ static PyTypeObject NDArray_Type; #define NDArray_Check(v) Py_IS_TYPE(v, &NDArray_Type) #define CHECK_LIST_OR_TUPLE(v) \ do { \ if (!PyList_Check(v) && !PyTuple_Check(v)) { \ PyErr_SetString(PyExc_TypeError, \ #v " must be a list or a tuple"); \ return NULL; \ } \ } while (0) #define PyMem_XFree(v) \ do { if (v) PyMem_Free(v); } while (0) /* Maximum number of dimensions. */ #define ND_MAX_NDIM (2 * PyBUF_MAX_NDIM) /* Check for the presence of suboffsets in the first dimension. */ #define HAVE_PTR(suboffsets) (suboffsets && suboffsets[0] >= 0) /* Adjust ptr if suboffsets are present. */ #define ADJUST_PTR(ptr, suboffsets) \ (HAVE_PTR(suboffsets) ? *((char**)ptr) + suboffsets[0] : ptr) /* Default: NumPy style (strides), read-only, no var-export, C-style layout */ #define ND_DEFAULT 0x000 /* User configurable flags for the ndarray */ #define ND_VAREXPORT 0x001 /* change layout while buffers are exported */ /* User configurable flags for each base buffer */ #define ND_WRITABLE 0x002 /* mark base buffer as writable */ #define ND_FORTRAN 0x004 /* Fortran contiguous layout */ #define ND_SCALAR 0x008 /* scalar: ndim = 0 */ #define ND_PIL 0x010 /* convert to PIL-style array (suboffsets) */ #define ND_REDIRECT 0x020 /* redirect buffer requests */ #define ND_GETBUF_FAIL 0x040 /* trigger getbuffer failure */ #define ND_GETBUF_UNDEFINED 0x080 /* undefined view.obj */ /* Internal flags for the base buffer */ #define ND_C 0x100 /* C contiguous layout (default) */ #define ND_OWN_ARRAYS 0x200 /* consumer owns arrays */ /* ndarray properties */ #define ND_IS_CONSUMER(nd) \ (((NDArrayObject *)nd)->head == &((NDArrayObject *)nd)->staticbuf) /* ndbuf->flags properties */ #define ND_C_CONTIGUOUS(flags) (!!(flags&(ND_SCALAR|ND_C))) #define ND_FORTRAN_CONTIGUOUS(flags) (!!(flags&(ND_SCALAR|ND_FORTRAN))) #define ND_ANY_CONTIGUOUS(flags) (!!(flags&(ND_SCALAR|ND_C|ND_FORTRAN))) /* getbuffer() requests */ #define REQ_INDIRECT(flags) ((flags&PyBUF_INDIRECT) == PyBUF_INDIRECT) #define REQ_C_CONTIGUOUS(flags) ((flags&PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) #define REQ_F_CONTIGUOUS(flags) ((flags&PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) #define REQ_ANY_CONTIGUOUS(flags) ((flags&PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS) #define REQ_STRIDES(flags) ((flags&PyBUF_STRIDES) == PyBUF_STRIDES) #define REQ_SHAPE(flags) ((flags&PyBUF_ND) == PyBUF_ND) #define REQ_WRITABLE(flags) (flags&PyBUF_WRITABLE) #define REQ_FORMAT(flags) (flags&PyBUF_FORMAT) /* Single node of a list of base buffers. The list is needed to implement changes in memory layout while exported buffers are active. */ static PyTypeObject NDArray_Type; struct ndbuf; typedef struct ndbuf { struct ndbuf *next; struct ndbuf *prev; Py_ssize_t len; /* length of data */ Py_ssize_t offset; /* start of the array relative to data */ char *data; /* raw data */ int flags; /* capabilities of the base buffer */ Py_ssize_t exports; /* number of exports */ Py_buffer base; /* base buffer */ } ndbuf_t; typedef struct { PyObject_HEAD int flags; /* ndarray flags */ ndbuf_t staticbuf; /* static buffer for re-exporting mode */ ndbuf_t *head; /* currently active base buffer */ } NDArrayObject; static ndbuf_t * ndbuf_new(Py_ssize_t nitems, Py_ssize_t itemsize, Py_ssize_t offset, int flags) { ndbuf_t *ndbuf; Py_buffer *base; Py_ssize_t len; len = nitems * itemsize; if (offset % itemsize) { PyErr_SetString(PyExc_ValueError, "offset must be a multiple of itemsize"); return NULL; } if (offset < 0 || offset+itemsize > len) { PyErr_SetString(PyExc_ValueError, "offset out of bounds"); return NULL; } ndbuf = PyMem_Malloc(sizeof *ndbuf); if (ndbuf == NULL) { PyErr_NoMemory(); return NULL; } ndbuf->next = NULL; ndbuf->prev = NULL; ndbuf->len = len; ndbuf->offset= offset; ndbuf->data = PyMem_Malloc(len); if (ndbuf->data == NULL) { PyErr_NoMemory(); PyMem_Free(ndbuf); return NULL; } ndbuf->flags = flags; ndbuf->exports = 0; base = &ndbuf->base; base->obj = NULL; base->buf = ndbuf->data; base->len = len; base->itemsize = 1; base->readonly = 0; base->format = NULL; base->ndim = 1; base->shape = NULL; base->strides = NULL; base->suboffsets = NULL; base->internal = ndbuf; return ndbuf; } static void ndbuf_free(ndbuf_t *ndbuf) { Py_buffer *base = &ndbuf->base; PyMem_XFree(ndbuf->data); PyMem_XFree(base->format); PyMem_XFree(base->shape); PyMem_XFree(base->strides); PyMem_XFree(base->suboffsets); PyMem_Free(ndbuf); } static void ndbuf_push(NDArrayObject *nd, ndbuf_t *elt) { elt->next = nd->head; if (nd->head) nd->head->prev = elt; nd->head = elt; elt->prev = NULL; } static void ndbuf_delete(NDArrayObject *nd, ndbuf_t *elt) { if (elt->prev) elt->prev->next = elt->next; else nd->head = elt->next; if (elt->next) elt->next->prev = elt->prev; ndbuf_free(elt); } static void ndbuf_pop(NDArrayObject *nd) { ndbuf_delete(nd, nd->head); } static PyObject * ndarray_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { NDArrayObject *nd; nd = PyObject_New(NDArrayObject, &NDArray_Type); if (nd == NULL) return NULL; nd->flags = 0; nd->head = NULL; return (PyObject *)nd; } static void ndarray_dealloc(PyObject *op) { NDArrayObject *self = (NDArrayObject*)op; if (self->head) { if (ND_IS_CONSUMER(self)) { Py_buffer *base = &self->head->base; if (self->head->flags & ND_OWN_ARRAYS) { PyMem_XFree(base->shape); PyMem_XFree(base->strides); PyMem_XFree(base->suboffsets); } PyBuffer_Release(base); } else { while (self->head) ndbuf_pop(self); } } PyObject_Free(self); } static int ndarray_init_staticbuf(PyObject *exporter, NDArrayObject *nd, int flags) { Py_buffer *base = &nd->staticbuf.base; if (PyObject_GetBuffer(exporter, base, flags) < 0) return -1; nd->head = &nd->staticbuf; nd->head->next = NULL; nd->head->prev = NULL; nd->head->len = -1; nd->head->offset = -1; nd->head->data = NULL; nd->head->flags = base->readonly ? 0 : ND_WRITABLE; nd->head->exports = 0; return 0; } static void init_flags(ndbuf_t *ndbuf) { if (ndbuf->base.ndim == 0) ndbuf->flags |= ND_SCALAR; if (ndbuf->base.suboffsets) ndbuf->flags |= ND_PIL; if (PyBuffer_IsContiguous(&ndbuf->base, 'C')) ndbuf->flags |= ND_C; if (PyBuffer_IsContiguous(&ndbuf->base, 'F')) ndbuf->flags |= ND_FORTRAN; } /****************************************************************************/ /* Buffer/List conversions */ /****************************************************************************/ static Py_ssize_t *strides_from_shape(const ndbuf_t *, int flags); /* Get number of members in a struct: see issue #12740 */ typedef struct { PyObject_HEAD Py_ssize_t s_size; Py_ssize_t s_len; } PyPartialStructObject; static Py_ssize_t get_nmemb(PyObject *s) { return ((PyPartialStructObject *)s)->s_len; } /* Pack all items into the buffer of 'obj'. The 'format' parameter must be in struct module syntax. For standard C types, a single item is an integer. For compound types, a single item is a tuple of integers. */ static int pack_from_list(PyObject *obj, PyObject *items, PyObject *format, Py_ssize_t itemsize) { PyObject *structobj, *pack_into; PyObject *args, *offset; PyObject *item, *tmp; Py_ssize_t nitems; /* number of items */ Py_ssize_t nmemb; /* number of members in a single item */ Py_ssize_t i, j; int ret = 0; assert(PyObject_CheckBuffer(obj)); assert(PyList_Check(items) || PyTuple_Check(items)); structobj = PyObject_CallFunctionObjArgs(Struct, format, NULL); if (structobj == NULL) return -1; nitems = PySequence_Fast_GET_SIZE(items); nmemb = get_nmemb(structobj); assert(nmemb >= 1); pack_into = PyObject_GetAttrString(structobj, "pack_into"); if (pack_into == NULL) { Py_DECREF(structobj); return -1; } /* nmemb >= 1 */ args = PyTuple_New(2 + nmemb); if (args == NULL) { Py_DECREF(pack_into); Py_DECREF(structobj); return -1; } offset = NULL; for (i = 0; i < nitems; i++) { /* Loop invariant: args[j] are borrowed references or NULL. */ PyTuple_SET_ITEM(args, 0, obj); for (j = 1; j < 2+nmemb; j++) PyTuple_SET_ITEM(args, j, NULL); Py_XDECREF(offset); offset = PyLong_FromSsize_t(i*itemsize); if (offset == NULL) { ret = -1; break; } PyTuple_SET_ITEM(args, 1, offset); item = PySequence_Fast_GET_ITEM(items, i); if ((PyBytes_Check(item) || PyLong_Check(item) || PyFloat_Check(item)) && nmemb == 1) { PyTuple_SET_ITEM(args, 2, item); } else if ((PyList_Check(item) || PyTuple_Check(item)) && PySequence_Length(item) == nmemb) { for (j = 0; j < nmemb; j++) { tmp = PySequence_Fast_GET_ITEM(item, j); PyTuple_SET_ITEM(args, 2+j, tmp); } } else { PyErr_SetString(PyExc_ValueError, "mismatch between initializer element and format string"); ret = -1; break; } tmp = PyObject_CallObject(pack_into, args); if (tmp == NULL) { ret = -1; break; } Py_DECREF(tmp); } Py_INCREF(obj); /* args[0] */ /* args[1]: offset is either NULL or should be dealloc'd */ for (i = 2; i < 2+nmemb; i++) { tmp = PyTuple_GET_ITEM(args, i); Py_XINCREF(tmp); } Py_DECREF(args); Py_DECREF(pack_into); Py_DECREF(structobj); return ret; } /* Pack single element */ static int pack_single(char *ptr, PyObject *item, const char *fmt, Py_ssize_t itemsize) { PyObject *structobj = NULL, *pack_into = NULL, *args = NULL; PyObject *format = NULL, *mview = NULL, *zero = NULL; Py_ssize_t i, nmemb; int ret = -1; PyObject *x; if (fmt == NULL) fmt = "B"; format = PyUnicode_FromString(fmt); if (format == NULL) goto out; structobj = PyObject_CallFunctionObjArgs(Struct, format, NULL); if (structobj == NULL) goto out; nmemb = get_nmemb(structobj); assert(nmemb >= 1); mview = PyMemoryView_FromMemory(ptr, itemsize, PyBUF_WRITE); if (mview == NULL) goto out; zero = PyLong_FromLong(0); if (zero == NULL) goto out; pack_into = PyObject_GetAttrString(structobj, "pack_into"); if (pack_into == NULL) goto out; args = PyTuple_New(2+nmemb); if (args == NULL) goto out; PyTuple_SET_ITEM(args, 0, mview); PyTuple_SET_ITEM(args, 1, zero); if ((PyBytes_Check(item) || PyLong_Check(item) || PyFloat_Check(item)) && nmemb == 1) { PyTuple_SET_ITEM(args, 2, item); } else if ((PyList_Check(item) || PyTuple_Check(item)) && PySequence_Length(item) == nmemb) { for (i = 0; i < nmemb; i++) { x = PySequence_Fast_GET_ITEM(item, i); PyTuple_SET_ITEM(args, 2+i, x); } } else { PyErr_SetString(PyExc_ValueError, "mismatch between initializer element and format string"); goto args_out; } x = PyObject_CallObject(pack_into, args); if (x != NULL) { Py_DECREF(x); ret = 0; } args_out: for (i = 0; i < 2+nmemb; i++) Py_XINCREF(PyTuple_GET_ITEM(args, i)); Py_XDECREF(args); out: Py_XDECREF(pack_into); Py_XDECREF(zero); Py_XDECREF(mview); Py_XDECREF(structobj); Py_XDECREF(format); return ret; } static void copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize, char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets, char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets, char *mem) { Py_ssize_t i; assert(ndim >= 1); if (ndim == 1) { if (!HAVE_PTR(dsuboffsets) && !HAVE_PTR(ssuboffsets) && dstrides[0] == itemsize && sstrides[0] == itemsize) { memmove(dptr, sptr, shape[0] * itemsize); } else { char *p; assert(mem != NULL); for (i=0, p=mem; iformat), FIX_FORMAT(src->format)) != 0 || dest->itemsize != src->itemsize || dest->ndim != src->ndim) return -1; for (i = 0; i < dest->ndim; i++) { if (dest->shape[i] != src->shape[i]) return -1; if (dest->shape[i] == 0) break; } return 0; } /* Copy src to dest. Both buffers must have the same format, itemsize, ndim and shape. Copying is atomic, the function never fails with a partial copy. */ static int copy_buffer(Py_buffer *dest, Py_buffer *src) { char *mem = NULL; assert(dest->ndim > 0); if (cmp_structure(dest, src) < 0) { PyErr_SetString(PyExc_ValueError, "ndarray assignment: lvalue and rvalue have different structures"); return -1; } if ((dest->suboffsets && dest->suboffsets[dest->ndim-1] >= 0) || (src->suboffsets && src->suboffsets[src->ndim-1] >= 0) || dest->strides[dest->ndim-1] != dest->itemsize || src->strides[src->ndim-1] != src->itemsize) { mem = PyMem_Malloc(dest->shape[dest->ndim-1] * dest->itemsize); if (mem == NULL) { PyErr_NoMemory(); return -1; } } copy_rec(dest->shape, dest->ndim, dest->itemsize, dest->buf, dest->strides, dest->suboffsets, src->buf, src->strides, src->suboffsets, mem); PyMem_XFree(mem); return 0; } /* Unpack single element */ static PyObject * unpack_single(char *ptr, const char *fmt, Py_ssize_t itemsize) { PyObject *x, *unpack_from, *mview; if (fmt == NULL) { fmt = "B"; itemsize = 1; } unpack_from = PyObject_GetAttrString(structmodule, "unpack_from"); if (unpack_from == NULL) return NULL; mview = PyMemoryView_FromMemory(ptr, itemsize, PyBUF_READ); if (mview == NULL) { Py_DECREF(unpack_from); return NULL; } x = PyObject_CallFunction(unpack_from, "sO", fmt, mview); Py_DECREF(unpack_from); Py_DECREF(mview); if (x == NULL) return NULL; if (PyTuple_GET_SIZE(x) == 1) { PyObject *tmp = PyTuple_GET_ITEM(x, 0); Py_INCREF(tmp); Py_DECREF(x); return tmp; } return x; } /* Unpack a multi-dimensional matrix into a nested list. Return a scalar for ndim = 0. */ static PyObject * unpack_rec(PyObject *unpack_from, char *ptr, PyObject *mview, char *item, const Py_ssize_t *shape, const Py_ssize_t *strides, const Py_ssize_t *suboffsets, Py_ssize_t ndim, Py_ssize_t itemsize) { PyObject *lst, *x; Py_ssize_t i; assert(ndim >= 0); assert(shape != NULL); assert(strides != NULL); if (ndim == 0) { memcpy(item, ptr, itemsize); x = PyObject_CallFunctionObjArgs(unpack_from, mview, NULL); if (x == NULL) return NULL; if (PyTuple_GET_SIZE(x) == 1) { PyObject *tmp = PyTuple_GET_ITEM(x, 0); Py_INCREF(tmp); Py_DECREF(x); return tmp; } return x; } lst = PyList_New(shape[0]); if (lst == NULL) return NULL; for (i = 0; i < shape[0]; ptr+=strides[0], i++) { char *nextptr = ADJUST_PTR(ptr, suboffsets); x = unpack_rec(unpack_from, nextptr, mview, item, shape+1, strides+1, suboffsets ? suboffsets+1 : NULL, ndim-1, itemsize); if (x == NULL) { Py_DECREF(lst); return NULL; } PyList_SET_ITEM(lst, i, x); } return lst; } static PyObject * ndarray_as_list(NDArrayObject *nd) { PyObject *structobj = NULL, *unpack_from = NULL; PyObject *lst = NULL, *mview = NULL; Py_buffer *base = &nd->head->base; Py_ssize_t *shape = base->shape; Py_ssize_t *strides = base->strides; Py_ssize_t simple_shape[1]; Py_ssize_t simple_strides[1]; char *item = NULL; PyObject *format; char *fmt = base->format; base = &nd->head->base; if (fmt == NULL) { PyErr_SetString(PyExc_ValueError, "ndarray: tolist() does not support format=NULL, use " "tobytes()"); return NULL; } if (shape == NULL) { assert(ND_C_CONTIGUOUS(nd->head->flags)); assert(base->strides == NULL); assert(base->ndim <= 1); shape = simple_shape; shape[0] = base->len; strides = simple_strides; strides[0] = base->itemsize; } else if (strides == NULL) { assert(ND_C_CONTIGUOUS(nd->head->flags)); strides = strides_from_shape(nd->head, 0); if (strides == NULL) return NULL; } format = PyUnicode_FromString(fmt); if (format == NULL) goto out; structobj = PyObject_CallFunctionObjArgs(Struct, format, NULL); Py_DECREF(format); if (structobj == NULL) goto out; unpack_from = PyObject_GetAttrString(structobj, "unpack_from"); if (unpack_from == NULL) goto out; item = PyMem_Malloc(base->itemsize); if (item == NULL) { PyErr_NoMemory(); goto out; } mview = PyMemoryView_FromMemory(item, base->itemsize, PyBUF_WRITE); if (mview == NULL) goto out; lst = unpack_rec(unpack_from, base->buf, mview, item, shape, strides, base->suboffsets, base->ndim, base->itemsize); out: Py_XDECREF(mview); PyMem_XFree(item); Py_XDECREF(unpack_from); Py_XDECREF(structobj); if (strides != base->strides && strides != simple_strides) PyMem_XFree(strides); return lst; } /****************************************************************************/ /* Initialize ndbuf */ /****************************************************************************/ /* State of a new ndbuf during initialization. 'OK' means that initialization is complete. 'PTR' means that a pointer has been initialized, but the state of the memory is still undefined and ndbuf->offset is disregarded. +-----------------+-----------+-------------+----------------+ | | ndbuf_new | init_simple | init_structure | +-----------------+-----------+-------------+----------------+ | next | OK (NULL) | OK | OK | +-----------------+-----------+-------------+----------------+ | prev | OK (NULL) | OK | OK | +-----------------+-----------+-------------+----------------+ | len | OK | OK | OK | +-----------------+-----------+-------------+----------------+ | offset | OK | OK | OK | +-----------------+-----------+-------------+----------------+ | data | PTR | OK | OK | +-----------------+-----------+-------------+----------------+ | flags | user | user | OK | +-----------------+-----------+-------------+----------------+ | exports | OK (0) | OK | OK | +-----------------+-----------+-------------+----------------+ | base.obj | OK (NULL) | OK | OK | +-----------------+-----------+-------------+----------------+ | base.buf | PTR | PTR | OK | +-----------------+-----------+-------------+----------------+ | base.len | len(data) | len(data) | OK | +-----------------+-----------+-------------+----------------+ | base.itemsize | 1 | OK | OK | +-----------------+-----------+-------------+----------------+ | base.readonly | 0 | OK | OK | +-----------------+-----------+-------------+----------------+ | base.format | NULL | OK | OK | +-----------------+-----------+-------------+----------------+ | base.ndim | 1 | 1 | OK | +-----------------+-----------+-------------+----------------+ | base.shape | NULL | NULL | OK | +-----------------+-----------+-------------+----------------+ | base.strides | NULL | NULL | OK | +-----------------+-----------+-------------+----------------+ | base.suboffsets | NULL | NULL | OK | +-----------------+-----------+-------------+----------------+ | base.internal | OK | OK | OK | +-----------------+-----------+-------------+----------------+ */ static Py_ssize_t get_itemsize(PyObject *format) { PyObject *tmp; Py_ssize_t itemsize; tmp = PyObject_CallFunctionObjArgs(calcsize, format, NULL); if (tmp == NULL) return -1; itemsize = PyLong_AsSsize_t(tmp); Py_DECREF(tmp); return itemsize; } static char * get_format(PyObject *format) { PyObject *tmp; char *fmt; tmp = PyUnicode_AsASCIIString(format); if (tmp == NULL) return NULL; fmt = PyMem_Malloc(PyBytes_GET_SIZE(tmp)+1); if (fmt == NULL) { PyErr_NoMemory(); Py_DECREF(tmp); return NULL; } strcpy(fmt, PyBytes_AS_STRING(tmp)); Py_DECREF(tmp); return fmt; } static int init_simple(ndbuf_t *ndbuf, PyObject *items, PyObject *format, Py_ssize_t itemsize) { PyObject *mview; Py_buffer *base = &ndbuf->base; int ret; mview = PyMemoryView_FromBuffer(base); if (mview == NULL) return -1; ret = pack_from_list(mview, items, format, itemsize); Py_DECREF(mview); if (ret < 0) return -1; base->readonly = !(ndbuf->flags & ND_WRITABLE); base->itemsize = itemsize; base->format = get_format(format); if (base->format == NULL) return -1; return 0; } static Py_ssize_t * seq_as_ssize_array(PyObject *seq, Py_ssize_t len, int is_shape) { Py_ssize_t *dest; Py_ssize_t x, i; /* ndim = len <= ND_MAX_NDIM, so PyMem_New() is actually not needed. */ dest = PyMem_New(Py_ssize_t, len); if (dest == NULL) { PyErr_NoMemory(); return NULL; } for (i = 0; i < len; i++) { PyObject *tmp = PySequence_Fast_GET_ITEM(seq, i); if (!PyLong_Check(tmp)) { PyErr_Format(PyExc_ValueError, "elements of %s must be integers", is_shape ? "shape" : "strides"); PyMem_Free(dest); return NULL; } x = PyLong_AsSsize_t(tmp); if (PyErr_Occurred()) { PyMem_Free(dest); return NULL; } if (is_shape && x < 0) { PyErr_Format(PyExc_ValueError, "elements of shape must be integers >= 0"); PyMem_Free(dest); return NULL; } dest[i] = x; } return dest; } static Py_ssize_t * strides_from_shape(const ndbuf_t *ndbuf, int flags) { const Py_buffer *base = &ndbuf->base; Py_ssize_t *s, i; s = PyMem_Malloc(base->ndim * (sizeof *s)); if (s == NULL) { PyErr_NoMemory(); return NULL; } if (flags & ND_FORTRAN) { s[0] = base->itemsize; for (i = 1; i < base->ndim; i++) s[i] = s[i-1] * base->shape[i-1]; } else { s[base->ndim-1] = base->itemsize; for (i = base->ndim-2; i >= 0; i--) s[i] = s[i+1] * base->shape[i+1]; } return s; } /* Bounds check: len := complete length of allocated memory offset := start of the array A single array element is indexed by: i = indices[0] * strides[0] + indices[1] * strides[1] + ... imin is reached when all indices[n] combined with positive strides are 0 and all indices combined with negative strides are shape[n]-1, which is the maximum index for the nth dimension. imax is reached when all indices[n] combined with negative strides are 0 and all indices combined with positive strides are shape[n]-1. */ static int verify_structure(Py_ssize_t len, Py_ssize_t itemsize, Py_ssize_t offset, const Py_ssize_t *shape, const Py_ssize_t *strides, Py_ssize_t ndim) { Py_ssize_t imin, imax; Py_ssize_t n; assert(ndim >= 0); if (ndim == 0 && (offset < 0 || offset+itemsize > len)) goto invalid_combination; for (n = 0; n < ndim; n++) if (strides[n] % itemsize) { PyErr_SetString(PyExc_ValueError, "strides must be a multiple of itemsize"); return -1; } for (n = 0; n < ndim; n++) if (shape[n] == 0) return 0; imin = imax = 0; for (n = 0; n < ndim; n++) if (strides[n] <= 0) imin += (shape[n]-1) * strides[n]; else imax += (shape[n]-1) * strides[n]; if (imin + offset < 0 || imax + offset + itemsize > len) goto invalid_combination; return 0; invalid_combination: PyErr_SetString(PyExc_ValueError, "invalid combination of buffer, shape and strides"); return -1; } /* Convert a NumPy-style array to an array using suboffsets to stride in the first dimension. Requirements: ndim > 0. Contiguous example ================== Input: ------ shape = {2, 2, 3}; strides = {6, 3, 1}; suboffsets = NULL; data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; buf = &data[0] Output: ------- shape = {2, 2, 3}; strides = {sizeof(char *), 3, 1}; suboffsets = {0, -1, -1}; data = {p1, p2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; | | ^ ^ `---'---' | | | `---------------------' buf = &data[0] So, in the example the input resembles the three-dimensional array char v[2][2][3], while the output resembles an array of two pointers to two-dimensional arrays: char (*v[2])[2][3]. Non-contiguous example: ======================= Input (with offset and negative strides): ----------------------------------------- shape = {2, 2, 3}; strides = {-6, 3, -1}; offset = 8 suboffsets = NULL; data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; Output: ------- shape = {2, 2, 3}; strides = {-sizeof(char *), 3, -1}; suboffsets = {2, -1, -1}; newdata = {p1, p2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; | | ^ ^ ^ ^ `---'---' | | `- p2+suboffsets[0] | `-----------|--- p1+suboffsets[0] `---------------------' buf = &newdata[1] # striding backwards over the pointers. suboffsets[0] is the same as the offset that one would specify if the two {2, 3} subarrays were created directly, hence the name. */ static int init_suboffsets(ndbuf_t *ndbuf) { Py_buffer *base = &ndbuf->base; Py_ssize_t start, step; Py_ssize_t imin, suboffset0; Py_ssize_t addsize; Py_ssize_t n; char *data; assert(base->ndim > 0); assert(base->suboffsets == NULL); /* Allocate new data with additional space for shape[0] pointers. */ addsize = base->shape[0] * (sizeof (char *)); /* Align array start to a multiple of 8. */ addsize = 8 * ((addsize + 7) / 8); data = PyMem_Malloc(ndbuf->len + addsize); if (data == NULL) { PyErr_NoMemory(); return -1; } memcpy(data + addsize, ndbuf->data, ndbuf->len); PyMem_Free(ndbuf->data); ndbuf->data = data; ndbuf->len += addsize; base->buf = ndbuf->data; /* imin: minimum index of the input array relative to ndbuf->offset. suboffset0: offset for each sub-array of the output. This is the same as calculating -imin' for a sub-array of ndim-1. */ imin = suboffset0 = 0; for (n = 0; n < base->ndim; n++) { if (base->shape[n] == 0) break; if (base->strides[n] <= 0) { Py_ssize_t x = (base->shape[n]-1) * base->strides[n]; imin += x; suboffset0 += (n >= 1) ? -x : 0; } } /* Initialize the array of pointers to the sub-arrays. */ start = addsize + ndbuf->offset + imin; step = base->strides[0] < 0 ? -base->strides[0] : base->strides[0]; for (n = 0; n < base->shape[0]; n++) ((char **)base->buf)[n] = (char *)base->buf + start + n*step; /* Initialize suboffsets. */ base->suboffsets = PyMem_Malloc(base->ndim * (sizeof *base->suboffsets)); if (base->suboffsets == NULL) { PyErr_NoMemory(); return -1; } base->suboffsets[0] = suboffset0; for (n = 1; n < base->ndim; n++) base->suboffsets[n] = -1; /* Adjust strides for the first (zeroth) dimension. */ if (base->strides[0] >= 0) { base->strides[0] = sizeof(char *); } else { /* Striding backwards. */ base->strides[0] = -(Py_ssize_t)sizeof(char *); if (base->shape[0] > 0) base->buf = (char *)base->buf + (base->shape[0]-1) * sizeof(char *); } ndbuf->flags &= ~(ND_C|ND_FORTRAN); ndbuf->offset = 0; return 0; } static void init_len(Py_buffer *base) { Py_ssize_t i; base->len = 1; for (i = 0; i < base->ndim; i++) base->len *= base->shape[i]; base->len *= base->itemsize; } static int init_structure(ndbuf_t *ndbuf, PyObject *shape, PyObject *strides, Py_ssize_t ndim) { Py_buffer *base = &ndbuf->base; base->ndim = (int)ndim; if (ndim == 0) { if (ndbuf->flags & ND_PIL) { PyErr_SetString(PyExc_TypeError, "ndim = 0 cannot be used in conjunction with ND_PIL"); return -1; } ndbuf->flags |= (ND_SCALAR|ND_C|ND_FORTRAN); return 0; } /* shape */ base->shape = seq_as_ssize_array(shape, ndim, 1); if (base->shape == NULL) return -1; /* strides */ if (strides) { base->strides = seq_as_ssize_array(strides, ndim, 0); } else { base->strides = strides_from_shape(ndbuf, ndbuf->flags); } if (base->strides == NULL) return -1; if (verify_structure(base->len, base->itemsize, ndbuf->offset, base->shape, base->strides, ndim) < 0) return -1; /* buf */ base->buf = ndbuf->data + ndbuf->offset; /* len */ init_len(base); /* ndbuf->flags */ if (PyBuffer_IsContiguous(base, 'C')) ndbuf->flags |= ND_C; if (PyBuffer_IsContiguous(base, 'F')) ndbuf->flags |= ND_FORTRAN; /* convert numpy array to suboffset representation */ if (ndbuf->flags & ND_PIL) { /* modifies base->buf, base->strides and base->suboffsets **/ return init_suboffsets(ndbuf); } return 0; } static ndbuf_t * init_ndbuf(PyObject *items, PyObject *shape, PyObject *strides, Py_ssize_t offset, PyObject *format, int flags) { ndbuf_t *ndbuf; Py_ssize_t ndim; Py_ssize_t nitems; Py_ssize_t itemsize; /* ndim = len(shape) */ CHECK_LIST_OR_TUPLE(shape); ndim = PySequence_Fast_GET_SIZE(shape); if (ndim > ND_MAX_NDIM) { PyErr_Format(PyExc_ValueError, "ndim must not exceed %d", ND_MAX_NDIM); return NULL; } /* len(strides) = len(shape) */ if (strides) { CHECK_LIST_OR_TUPLE(strides); if (PySequence_Fast_GET_SIZE(strides) == 0) strides = NULL; else if (flags & ND_FORTRAN) { PyErr_SetString(PyExc_TypeError, "ND_FORTRAN cannot be used together with strides"); return NULL; } else if (PySequence_Fast_GET_SIZE(strides) != ndim) { PyErr_SetString(PyExc_ValueError, "len(shape) != len(strides)"); return NULL; } } /* itemsize */ itemsize = get_itemsize(format); if (itemsize <= 0) { if (itemsize == 0) { PyErr_SetString(PyExc_ValueError, "itemsize must not be zero"); } return NULL; } /* convert scalar to list */ if (ndim == 0) { items = PyTuple_Pack(1, items); if (items == NULL) return NULL; } else { CHECK_LIST_OR_TUPLE(items); Py_INCREF(items); } /* number of items */ nitems = PySequence_Fast_GET_SIZE(items); if (nitems == 0) { PyErr_SetString(PyExc_ValueError, "initializer list or tuple must not be empty"); Py_DECREF(items); return NULL; } ndbuf = ndbuf_new(nitems, itemsize, offset, flags); if (ndbuf == NULL) { Py_DECREF(items); return NULL; } if (init_simple(ndbuf, items, format, itemsize) < 0) goto error; if (init_structure(ndbuf, shape, strides, ndim) < 0) goto error; Py_DECREF(items); return ndbuf; error: Py_DECREF(items); ndbuf_free(ndbuf); return NULL; } /* initialize and push a new base onto the linked list */ static int ndarray_push_base(NDArrayObject *nd, PyObject *items, PyObject *shape, PyObject *strides, Py_ssize_t offset, PyObject *format, int flags) { ndbuf_t *ndbuf; ndbuf = init_ndbuf(items, shape, strides, offset, format, flags); if (ndbuf == NULL) return -1; ndbuf_push(nd, ndbuf); return 0; } #define PyBUF_UNUSED 0x10000 static int ndarray_init(PyObject *self, PyObject *args, PyObject *kwds) { NDArrayObject *nd = (NDArrayObject *)self; static char *kwlist[] = { "obj", "shape", "strides", "offset", "format", "flags", "getbuf", NULL }; PyObject *v = NULL; /* initializer: scalar, list, tuple or base object */ PyObject *shape = NULL; /* size of each dimension */ PyObject *strides = NULL; /* number of bytes to the next elt in each dim */ Py_ssize_t offset = 0; /* buffer offset */ PyObject *format = simple_format; /* struct module specifier: "B" */ int flags = ND_DEFAULT; /* base buffer and ndarray flags */ int getbuf = PyBUF_UNUSED; /* re-exporter: getbuffer request flags */ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OOnOii", kwlist, &v, &shape, &strides, &offset, &format, &flags, &getbuf)) return -1; /* NDArrayObject is re-exporter */ if (PyObject_CheckBuffer(v) && shape == NULL) { if (strides || offset || format != simple_format || !(flags == ND_DEFAULT || flags == ND_REDIRECT)) { PyErr_SetString(PyExc_TypeError, "construction from exporter object only takes 'obj', 'getbuf' " "and 'flags' arguments"); return -1; } getbuf = (getbuf == PyBUF_UNUSED) ? PyBUF_FULL_RO : getbuf; if (ndarray_init_staticbuf(v, nd, getbuf) < 0) return -1; init_flags(nd->head); nd->head->flags |= flags; return 0; } /* NDArrayObject is the original base object. */ if (getbuf != PyBUF_UNUSED) { PyErr_SetString(PyExc_TypeError, "getbuf argument only valid for construction from exporter " "object"); return -1; } if (shape == NULL) { PyErr_SetString(PyExc_TypeError, "shape is a required argument when constructing from " "list, tuple or scalar"); return -1; } if (flags & ND_VAREXPORT) { nd->flags |= ND_VAREXPORT; flags &= ~ND_VAREXPORT; } /* Initialize and push the first base buffer onto the linked list. */ return ndarray_push_base(nd, v, shape, strides, offset, format, flags); } /* Push an additional base onto the linked list. */ static PyObject * ndarray_push(PyObject *self, PyObject *args, PyObject *kwds) { NDArrayObject *nd = (NDArrayObject *)self; static char *kwlist[] = { "items", "shape", "strides", "offset", "format", "flags", NULL }; PyObject *items = NULL; /* initializer: scalar, list or tuple */ PyObject *shape = NULL; /* size of each dimension */ PyObject *strides = NULL; /* number of bytes to the next elt in each dim */ PyObject *format = simple_format; /* struct module specifier: "B" */ Py_ssize_t offset = 0; /* buffer offset */ int flags = ND_DEFAULT; /* base buffer flags */ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|OnOi", kwlist, &items, &shape, &strides, &offset, &format, &flags)) return NULL; if (flags & ND_VAREXPORT) { PyErr_SetString(PyExc_ValueError, "ND_VAREXPORT flag can only be used during object creation"); return NULL; } if (ND_IS_CONSUMER(nd)) { PyErr_SetString(PyExc_BufferError, "structure of re-exporting object is immutable"); return NULL; } if (!(nd->flags&ND_VAREXPORT) && nd->head->exports > 0) { PyErr_Format(PyExc_BufferError, "cannot change structure: %zd exported buffer%s", nd->head->exports, nd->head->exports==1 ? "" : "s"); return NULL; } if (ndarray_push_base(nd, items, shape, strides, offset, format, flags) < 0) return NULL; Py_RETURN_NONE; } /* Pop a base from the linked list (if possible). */ static PyObject * ndarray_pop(PyObject *self, PyObject *dummy) { NDArrayObject *nd = (NDArrayObject *)self; if (ND_IS_CONSUMER(nd)) { PyErr_SetString(PyExc_BufferError, "structure of re-exporting object is immutable"); return NULL; } if (nd->head->exports > 0) { PyErr_Format(PyExc_BufferError, "cannot change structure: %zd exported buffer%s", nd->head->exports, nd->head->exports==1 ? "" : "s"); return NULL; } if (nd->head->next == NULL) { PyErr_SetString(PyExc_BufferError, "list only has a single base"); return NULL; } ndbuf_pop(nd); Py_RETURN_NONE; } /**************************************************************************/ /* getbuffer */ /**************************************************************************/ static int ndarray_getbuf(PyObject *op, Py_buffer *view, int flags) { NDArrayObject *self = (NDArrayObject*)op; ndbuf_t *ndbuf = self->head; Py_buffer *base = &ndbuf->base; int baseflags = ndbuf->flags; /* redirect mode */ if (base->obj != NULL && (baseflags&ND_REDIRECT)) { return PyObject_GetBuffer(base->obj, view, flags); } /* start with complete information */ *view = *base; view->obj = NULL; /* reconstruct format */ if (view->format == NULL) view->format = "B"; if (base->ndim != 0 && ((REQ_SHAPE(flags) && base->shape == NULL) || (REQ_STRIDES(flags) && base->strides == NULL))) { /* The ndarray is a re-exporter that has been created without full information for testing purposes. In this particular case the ndarray is not a PEP-3118 compliant buffer provider. */ PyErr_SetString(PyExc_BufferError, "re-exporter does not provide format, shape or strides"); return -1; } if (baseflags & ND_GETBUF_FAIL) { PyErr_SetString(PyExc_BufferError, "ND_GETBUF_FAIL: forced test exception"); if (baseflags & ND_GETBUF_UNDEFINED) view->obj = (PyObject *)0x1; /* wrong but permitted in <= 3.2 */ return -1; } if (REQ_WRITABLE(flags) && base->readonly) { PyErr_SetString(PyExc_BufferError, "ndarray is not writable"); return -1; } if (!REQ_FORMAT(flags)) { /* NULL indicates that the buffer's data type has been cast to 'B'. view->itemsize is the _previous_ itemsize. If shape is present, the equality product(shape) * itemsize = len still holds at this point. The equality calcsize(format) = itemsize does _not_ hold from here on! */ view->format = NULL; } if (REQ_C_CONTIGUOUS(flags) && !ND_C_CONTIGUOUS(baseflags)) { PyErr_SetString(PyExc_BufferError, "ndarray is not C-contiguous"); return -1; } if (REQ_F_CONTIGUOUS(flags) && !ND_FORTRAN_CONTIGUOUS(baseflags)) { PyErr_SetString(PyExc_BufferError, "ndarray is not Fortran contiguous"); return -1; } if (REQ_ANY_CONTIGUOUS(flags) && !ND_ANY_CONTIGUOUS(baseflags)) { PyErr_SetString(PyExc_BufferError, "ndarray is not contiguous"); return -1; } if (!REQ_INDIRECT(flags) && (baseflags & ND_PIL)) { PyErr_SetString(PyExc_BufferError, "ndarray cannot be represented without suboffsets"); return -1; } if (!REQ_STRIDES(flags)) { if (!ND_C_CONTIGUOUS(baseflags)) { PyErr_SetString(PyExc_BufferError, "ndarray is not C-contiguous"); return -1; } view->strides = NULL; } if (!REQ_SHAPE(flags)) { /* PyBUF_SIMPLE or PyBUF_WRITABLE: at this point buf is C-contiguous, so base->buf = ndbuf->data. */ if (view->format != NULL) { /* PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT do not make sense. */ PyErr_Format(PyExc_BufferError, "ndarray: cannot cast to unsigned bytes if the format flag " "is present"); return -1; } /* product(shape) * itemsize = len and calcsize(format) = itemsize do _not_ hold from here on! */ view->ndim = 1; view->shape = NULL; } /* Ascertain that the new buffer has the same contiguity as the exporter */ if (ND_C_CONTIGUOUS(baseflags) != PyBuffer_IsContiguous(view, 'C') || /* skip cast to 1-d */ (view->format != NULL && view->shape != NULL && ND_FORTRAN_CONTIGUOUS(baseflags) != PyBuffer_IsContiguous(view, 'F')) || /* cast to 1-d */ (view->format == NULL && view->shape == NULL && !PyBuffer_IsContiguous(view, 'F'))) { PyErr_SetString(PyExc_BufferError, "ndarray: contiguity mismatch in getbuf()"); return -1; } view->obj = Py_NewRef(self); self->head->exports++; return 0; } static void ndarray_releasebuf(PyObject *op, Py_buffer *view) { NDArrayObject *self = (NDArrayObject*)op; if (!ND_IS_CONSUMER(self)) { ndbuf_t *ndbuf = view->internal; if (--ndbuf->exports == 0 && ndbuf != self->head) ndbuf_delete(self, ndbuf); } } static PyBufferProcs ndarray_as_buffer = { ndarray_getbuf, /* bf_getbuffer */ ndarray_releasebuf, /* bf_releasebuffer */ }; /**************************************************************************/ /* indexing/slicing */ /**************************************************************************/ static char * ptr_from_index(Py_buffer *base, Py_ssize_t index) { char *ptr; Py_ssize_t nitems; /* items in the first dimension */ if (base->shape) nitems = base->shape[0]; else { assert(base->ndim == 1 && SIMPLE_FORMAT(base->format)); nitems = base->len; } if (index < 0) { index += nitems; } if (index < 0 || index >= nitems) { PyErr_SetString(PyExc_IndexError, "index out of bounds"); return NULL; } ptr = (char *)base->buf; if (base->strides == NULL) ptr += base->itemsize * index; else ptr += base->strides[0] * index; ptr = ADJUST_PTR(ptr, base->suboffsets); return ptr; } static PyObject * ndarray_item(PyObject *op, Py_ssize_t index) { NDArrayObject *self = (NDArrayObject *)op; ndbuf_t *ndbuf = self->head; Py_buffer *base = &ndbuf->base; char *ptr; if (base->ndim == 0) { PyErr_SetString(PyExc_TypeError, "invalid indexing of scalar"); return NULL; } ptr = ptr_from_index(base, index); if (ptr == NULL) return NULL; if (base->ndim == 1) { return unpack_single(ptr, base->format, base->itemsize); } else { NDArrayObject *nd; Py_buffer *subview; nd = (NDArrayObject *)ndarray_new(&NDArray_Type, NULL, NULL); if (nd == NULL) return NULL; if (ndarray_init_staticbuf((PyObject *)self, nd, PyBUF_FULL_RO) < 0) { Py_DECREF(nd); return NULL; } subview = &nd->staticbuf.base; subview->buf = ptr; subview->len /= subview->shape[0]; subview->ndim--; subview->shape++; if (subview->strides) subview->strides++; if (subview->suboffsets) subview->suboffsets++; init_flags(&nd->staticbuf); return (PyObject *)nd; } } /* For each dimension, we get valid (start, stop, step, slicelength) quadruples from PySlice_GetIndicesEx(). Slicing NumPy arrays ==================== A pointer to an element in a NumPy array is defined by: ptr = (char *)buf + indices[0] * strides[0] + ... + indices[ndim-1] * strides[ndim-1] Adjust buf: ----------- Adding start[n] for each dimension effectively adds the constant: c = start[0] * strides[0] + ... + start[ndim-1] * strides[ndim-1] Therefore init_slice() adds all start[n] directly to buf. Adjust shape: ------------- Obviously shape[n] = slicelength[n] Adjust strides: --------------- In the original array, the next element in a dimension is reached by adding strides[n] to the pointer. In the sliced array, elements may be skipped, so the next element is reached by adding: strides[n] * step[n] Slicing PIL arrays ================== Layout: ------- In the first (zeroth) dimension, PIL arrays have an array of pointers to sub-arrays of ndim-1. Striding in the first dimension is done by getting the index of the nth pointer, dereference it and then add a suboffset to it. The arrays pointed to can best be seen a regular NumPy arrays. Adjust buf: ----------- In the original array, buf points to a location (usually the start) in the array of pointers. For the sliced array, start[0] can be added to buf in the same manner as for NumPy arrays. Adjust suboffsets: ------------------ Due to the dereferencing step in the addressing scheme, it is not possible to adjust buf for higher dimensions. Recall that the sub-arrays pointed to are regular NumPy arrays, so for each of those arrays adding start[n] effectively adds the constant: c = start[1] * strides[1] + ... + start[ndim-1] * strides[ndim-1] This constant is added to suboffsets[0]. suboffsets[0] in turn is added to each pointer right after dereferencing. Adjust shape and strides: ------------------------- Shape and strides are not influenced by the dereferencing step, so they are adjusted in the same manner as for NumPy arrays. Multiple levels of suboffsets ============================= For a construct like an array of pointers to array of pointers to sub-arrays of ndim-2: suboffsets[0] = start[1] * strides[1] suboffsets[1] = start[2] * strides[2] + ... */ static int init_slice(Py_buffer *base, PyObject *key, int dim) { Py_ssize_t start, stop, step, slicelength; if (PySlice_Unpack(key, &start, &stop, &step) < 0) { return -1; } slicelength = PySlice_AdjustIndices(base->shape[dim], &start, &stop, step); if (base->suboffsets == NULL || dim == 0) { adjust_buf: base->buf = (char *)base->buf + base->strides[dim] * start; } else { Py_ssize_t n = dim-1; while (n >= 0 && base->suboffsets[n] < 0) n--; if (n < 0) goto adjust_buf; /* all suboffsets are negative */ base->suboffsets[n] = base->suboffsets[n] + base->strides[dim] * start; } base->shape[dim] = slicelength; base->strides[dim] = base->strides[dim] * step; return 0; } static int copy_structure(Py_buffer *base) { Py_ssize_t *shape = NULL, *strides = NULL, *suboffsets = NULL; Py_ssize_t i; shape = PyMem_Malloc(base->ndim * (sizeof *shape)); strides = PyMem_Malloc(base->ndim * (sizeof *strides)); if (shape == NULL || strides == NULL) goto err_nomem; suboffsets = NULL; if (base->suboffsets) { suboffsets = PyMem_Malloc(base->ndim * (sizeof *suboffsets)); if (suboffsets == NULL) goto err_nomem; } for (i = 0; i < base->ndim; i++) { shape[i] = base->shape[i]; strides[i] = base->strides[i]; if (suboffsets) suboffsets[i] = base->suboffsets[i]; } base->shape = shape; base->strides = strides; base->suboffsets = suboffsets; return 0; err_nomem: PyErr_NoMemory(); PyMem_XFree(shape); PyMem_XFree(strides); PyMem_XFree(suboffsets); return -1; } static PyObject * ndarray_subscript(PyObject *op, PyObject *key) { NDArrayObject *self = (NDArrayObject*)op; NDArrayObject *nd; ndbuf_t *ndbuf; Py_buffer *base = &self->head->base; if (base->ndim == 0) { if (PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0) { return unpack_single(base->buf, base->format, base->itemsize); } else if (key == Py_Ellipsis) { return Py_NewRef(self); } else { PyErr_SetString(PyExc_TypeError, "invalid indexing of scalar"); return NULL; } } if (PyIndex_Check(key)) { Py_ssize_t index = PyLong_AsSsize_t(key); if (index == -1 && PyErr_Occurred()) return NULL; return ndarray_item(op, index); } nd = (NDArrayObject *)ndarray_new(&NDArray_Type, NULL, NULL); if (nd == NULL) return NULL; /* new ndarray is a consumer */ if (ndarray_init_staticbuf((PyObject *)self, nd, PyBUF_FULL_RO) < 0) { Py_DECREF(nd); return NULL; } /* copy shape, strides and suboffsets */ ndbuf = nd->head; base = &ndbuf->base; if (copy_structure(base) < 0) { Py_DECREF(nd); return NULL; } ndbuf->flags |= ND_OWN_ARRAYS; if (PySlice_Check(key)) { /* one-dimensional slice */ if (init_slice(base, key, 0) < 0) goto err_occurred; } else if (PyTuple_Check(key)) { /* multi-dimensional slice */ PyObject *tuple = key; Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); for (i = 0; i < n; i++) { key = PyTuple_GET_ITEM(tuple, i); if (!PySlice_Check(key)) goto type_error; if (init_slice(base, key, (int)i) < 0) goto err_occurred; } } else { goto type_error; } init_len(base); init_flags(ndbuf); return (PyObject *)nd; type_error: PyErr_Format(PyExc_TypeError, "cannot index memory using \"%T\"", key); err_occurred: Py_DECREF(nd); return NULL; } static int ndarray_ass_subscript(PyObject *op, PyObject *key, PyObject *value) { NDArrayObject *self = (NDArrayObject*)op; NDArrayObject *nd; Py_buffer *dest = &self->head->base; Py_buffer src; char *ptr; Py_ssize_t index; int ret = -1; if (dest->readonly) { PyErr_SetString(PyExc_TypeError, "ndarray is not writable"); return -1; } if (value == NULL) { PyErr_SetString(PyExc_TypeError, "ndarray data cannot be deleted"); return -1; } if (dest->ndim == 0) { if (key == Py_Ellipsis || (PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0)) { ptr = (char *)dest->buf; return pack_single(ptr, value, dest->format, dest->itemsize); } else { PyErr_SetString(PyExc_TypeError, "invalid indexing of scalar"); return -1; } } if (dest->ndim == 1 && PyIndex_Check(key)) { /* rvalue must be a single item */ index = PyLong_AsSsize_t(key); if (index == -1 && PyErr_Occurred()) return -1; else { ptr = ptr_from_index(dest, index); if (ptr == NULL) return -1; } return pack_single(ptr, value, dest->format, dest->itemsize); } /* rvalue must be an exporter */ if (PyObject_GetBuffer(value, &src, PyBUF_FULL_RO) == -1) return -1; nd = (NDArrayObject *)ndarray_subscript((PyObject*)self, key); if (nd != NULL) { dest = &nd->head->base; ret = copy_buffer(dest, &src); Py_DECREF(nd); } PyBuffer_Release(&src); return ret; } static PyObject * slice_indices(PyObject *self, PyObject *args) { PyObject *ret, *key, *tmp; Py_ssize_t s[4]; /* start, stop, step, slicelength */ Py_ssize_t i, len; if (!PyArg_ParseTuple(args, "On", &key, &len)) { return NULL; } if (!PySlice_Check(key)) { PyErr_SetString(PyExc_TypeError, "first argument must be a slice object"); return NULL; } if (PySlice_Unpack(key, &s[0], &s[1], &s[2]) < 0) { return NULL; } s[3] = PySlice_AdjustIndices(len, &s[0], &s[1], s[2]); ret = PyTuple_New(4); if (ret == NULL) return NULL; for (i = 0; i < 4; i++) { tmp = PyLong_FromSsize_t(s[i]); if (tmp == NULL) goto error; PyTuple_SET_ITEM(ret, i, tmp); } return ret; error: Py_DECREF(ret); return NULL; } static PyMappingMethods ndarray_as_mapping = { NULL, /* mp_length */ ndarray_subscript, /* mp_subscript */ ndarray_ass_subscript /* mp_ass_subscript */ }; static PySequenceMethods ndarray_as_sequence = { 0, /* sq_length */ 0, /* sq_concat */ 0, /* sq_repeat */ ndarray_item, /* sq_item */ }; /**************************************************************************/ /* getters */ /**************************************************************************/ static PyObject * ssize_array_as_tuple(Py_ssize_t *array, Py_ssize_t len) { PyObject *tuple, *x; Py_ssize_t i; if (array == NULL) return PyTuple_New(0); tuple = PyTuple_New(len); if (tuple == NULL) return NULL; for (i = 0; i < len; i++) { x = PyLong_FromSsize_t(array[i]); if (x == NULL) { Py_DECREF(tuple); return NULL; } PyTuple_SET_ITEM(tuple, i, x); } return tuple; } static PyObject * ndarray_get_flags(PyObject *op, void *closure) { NDArrayObject *self = (NDArrayObject*)op; return PyLong_FromLong(self->head->flags); } static PyObject * ndarray_get_offset(PyObject *op, void *closure) { NDArrayObject *self = (NDArrayObject*)op; ndbuf_t *ndbuf = self->head; return PyLong_FromSsize_t(ndbuf->offset); } static PyObject * ndarray_get_obj(PyObject *op, void *closure) { NDArrayObject *self = (NDArrayObject*)op; Py_buffer *base = &self->head->base; if (base->obj == NULL) { Py_RETURN_NONE; } return Py_NewRef(base->obj); } static PyObject * ndarray_get_nbytes(PyObject *op, void *closure) { NDArrayObject *self = (NDArrayObject*)op; Py_buffer *base = &self->head->base; return PyLong_FromSsize_t(base->len); } static PyObject * ndarray_get_readonly(PyObject *op, void *closure) { NDArrayObject *self = (NDArrayObject*)op; Py_buffer *base = &self->head->base; return PyBool_FromLong(base->readonly); } static PyObject * ndarray_get_itemsize(PyObject *op, void *closure) { NDArrayObject *self = (NDArrayObject*)op; Py_buffer *base = &self->head->base; return PyLong_FromSsize_t(base->itemsize); } static PyObject * ndarray_get_format(PyObject *op, void *closure) { NDArrayObject *self = (NDArrayObject*)op; Py_buffer *base = &self->head->base; const char *fmt = base->format ? base->format : ""; return PyUnicode_FromString(fmt); } static PyObject * ndarray_get_ndim(PyObject *op, void *closure) { NDArrayObject *self = (NDArrayObject*)op; Py_buffer *base = &self->head->base; return PyLong_FromSsize_t(base->ndim); } static PyObject * ndarray_get_shape(PyObject *op, void *closure) { NDArrayObject *self = (NDArrayObject*)op; Py_buffer *base = &self->head->base; return ssize_array_as_tuple(base->shape, base->ndim); } static PyObject * ndarray_get_strides(PyObject *op, void *closure) { NDArrayObject *self = (NDArrayObject*)op; Py_buffer *base = &self->head->base; return ssize_array_as_tuple(base->strides, base->ndim); } static PyObject * ndarray_get_suboffsets(PyObject *op, void *closure) { NDArrayObject *self = (NDArrayObject*)op; Py_buffer *base = &self->head->base; return ssize_array_as_tuple(base->suboffsets, base->ndim); } static PyObject * ndarray_c_contig(PyObject *self, void *dummy) { NDArrayObject *nd = (NDArrayObject *)self; int ret = PyBuffer_IsContiguous(&nd->head->base, 'C'); if (ret != ND_C_CONTIGUOUS(nd->head->flags)) { PyErr_SetString(PyExc_RuntimeError, "results from PyBuffer_IsContiguous() and flags differ"); return NULL; } return PyBool_FromLong(ret); } static PyObject * ndarray_fortran_contig(PyObject *self, void *dummy) { NDArrayObject *nd = (NDArrayObject *)self; int ret = PyBuffer_IsContiguous(&nd->head->base, 'F'); if (ret != ND_FORTRAN_CONTIGUOUS(nd->head->flags)) { PyErr_SetString(PyExc_RuntimeError, "results from PyBuffer_IsContiguous() and flags differ"); return NULL; } return PyBool_FromLong(ret); } static PyObject * ndarray_contig(PyObject *self, void *dummy) { NDArrayObject *nd = (NDArrayObject *)self; int ret = PyBuffer_IsContiguous(&nd->head->base, 'A'); if (ret != ND_ANY_CONTIGUOUS(nd->head->flags)) { PyErr_SetString(PyExc_RuntimeError, "results from PyBuffer_IsContiguous() and flags differ"); return NULL; } return PyBool_FromLong(ret); } static PyGetSetDef ndarray_getset [] = { /* ndbuf */ { "flags", ndarray_get_flags, NULL, NULL, NULL}, { "offset", ndarray_get_offset, NULL, NULL, NULL}, /* ndbuf.base */ { "obj", ndarray_get_obj, NULL, NULL, NULL}, { "nbytes", ndarray_get_nbytes, NULL, NULL, NULL}, { "readonly", ndarray_get_readonly, NULL, NULL, NULL}, { "itemsize", ndarray_get_itemsize, NULL, NULL, NULL}, { "format", ndarray_get_format, NULL, NULL, NULL}, { "ndim", ndarray_get_ndim, NULL, NULL, NULL}, { "shape", ndarray_get_shape, NULL, NULL, NULL}, { "strides", ndarray_get_strides, NULL, NULL, NULL}, { "suboffsets", ndarray_get_suboffsets, NULL, NULL, NULL}, { "c_contiguous", ndarray_c_contig, NULL, NULL, NULL}, { "f_contiguous", ndarray_fortran_contig, NULL, NULL, NULL}, { "contiguous", ndarray_contig, NULL, NULL, NULL}, {NULL} }; static PyObject * ndarray_tolist(PyObject *self, PyObject *dummy) { return ndarray_as_list((NDArrayObject *)self); } static PyObject * ndarray_tobytes(PyObject *self, PyObject *dummy) { ndbuf_t *ndbuf = ((NDArrayObject *)self)->head; Py_buffer *src = &ndbuf->base; Py_buffer dest; PyObject *ret = NULL; char *mem; if (ND_C_CONTIGUOUS(ndbuf->flags)) return PyBytes_FromStringAndSize(src->buf, src->len); assert(src->shape != NULL); assert(src->strides != NULL); assert(src->ndim > 0); mem = PyMem_Malloc(src->len); if (mem == NULL) { PyErr_NoMemory(); return NULL; } dest = *src; dest.buf = mem; dest.suboffsets = NULL; dest.strides = strides_from_shape(ndbuf, 0); if (dest.strides == NULL) goto out; if (copy_buffer(&dest, src) < 0) goto out; ret = PyBytes_FromStringAndSize(mem, src->len); out: PyMem_XFree(dest.strides); PyMem_Free(mem); return ret; } /* add redundant (negative) suboffsets for testing */ static PyObject * ndarray_add_suboffsets(PyObject *self, PyObject *dummy) { NDArrayObject *nd = (NDArrayObject *)self; Py_buffer *base = &nd->head->base; Py_ssize_t i; if (base->suboffsets != NULL) { PyErr_SetString(PyExc_TypeError, "cannot add suboffsets to PIL-style array"); return NULL; } if (base->strides == NULL) { PyErr_SetString(PyExc_TypeError, "cannot add suboffsets to array without strides"); return NULL; } base->suboffsets = PyMem_Malloc(base->ndim * (sizeof *base->suboffsets)); if (base->suboffsets == NULL) { PyErr_NoMemory(); return NULL; } for (i = 0; i < base->ndim; i++) base->suboffsets[i] = -1; nd->head->flags &= ~(ND_C|ND_FORTRAN); Py_RETURN_NONE; } /* Test PyMemoryView_FromBuffer(): return a memoryview from a static buffer. Obviously this is fragile and only one such view may be active at any time. Never use anything like this in real code! */ static char *infobuf = NULL; static PyObject * ndarray_memoryview_from_buffer(PyObject *self, PyObject *dummy) { const NDArrayObject *nd = (NDArrayObject *)self; const Py_buffer *view = &nd->head->base; const ndbuf_t *ndbuf; static char format[ND_MAX_NDIM+1]; static Py_ssize_t shape[ND_MAX_NDIM]; static Py_ssize_t strides[ND_MAX_NDIM]; static Py_ssize_t suboffsets[ND_MAX_NDIM]; static Py_buffer info; char *p; if (!ND_IS_CONSUMER(nd)) ndbuf = nd->head; /* self is ndarray/original exporter */ else if (NDArray_Check(view->obj) && !ND_IS_CONSUMER(view->obj)) /* self is ndarray and consumer from ndarray/original exporter */ ndbuf = ((NDArrayObject *)view->obj)->head; else { PyErr_SetString(PyExc_TypeError, "memoryview_from_buffer(): ndarray must be original exporter or " "consumer from ndarray/original exporter"); return NULL; } info = *view; p = PyMem_Realloc(infobuf, ndbuf->len); if (p == NULL) { PyMem_Free(infobuf); PyErr_NoMemory(); infobuf = NULL; return NULL; } else { infobuf = p; } /* copy the complete raw data */ memcpy(infobuf, ndbuf->data, ndbuf->len); info.buf = infobuf + ((char *)view->buf - ndbuf->data); if (view->format) { if (strlen(view->format) > ND_MAX_NDIM) { PyErr_Format(PyExc_TypeError, "memoryview_from_buffer: format is limited to %d characters", ND_MAX_NDIM); return NULL; } strcpy(format, view->format); info.format = format; } if (view->ndim > ND_MAX_NDIM) { PyErr_Format(PyExc_TypeError, "memoryview_from_buffer: ndim is limited to %d", ND_MAX_NDIM); return NULL; } if (view->shape) { memcpy(shape, view->shape, view->ndim * sizeof(Py_ssize_t)); info.shape = shape; } if (view->strides) { memcpy(strides, view->strides, view->ndim * sizeof(Py_ssize_t)); info.strides = strides; } if (view->suboffsets) { memcpy(suboffsets, view->suboffsets, view->ndim * sizeof(Py_ssize_t)); info.suboffsets = suboffsets; } return PyMemoryView_FromBuffer(&info); } /* Get a single item from bufobj at the location specified by seq. seq is a list or tuple of indices. The purpose of this function is to check other functions against PyBuffer_GetPointer(). */ static PyObject * get_pointer(PyObject *self, PyObject *args) { PyObject *ret = NULL, *bufobj, *seq; Py_buffer view; Py_ssize_t indices[ND_MAX_NDIM]; Py_ssize_t i; void *ptr; if (!PyArg_ParseTuple(args, "OO", &bufobj, &seq)) { return NULL; } CHECK_LIST_OR_TUPLE(seq); if (PyObject_GetBuffer(bufobj, &view, PyBUF_FULL_RO) < 0) return NULL; if (view.ndim > ND_MAX_NDIM) { PyErr_Format(PyExc_ValueError, "get_pointer(): ndim > %d", ND_MAX_NDIM); goto out; } if (PySequence_Fast_GET_SIZE(seq) != view.ndim) { PyErr_SetString(PyExc_ValueError, "get_pointer(): len(indices) != ndim"); goto out; } for (i = 0; i < view.ndim; i++) { PyObject *x = PySequence_Fast_GET_ITEM(seq, i); indices[i] = PyLong_AsSsize_t(x); if (PyErr_Occurred()) goto out; if (indices[i] < 0 || indices[i] >= view.shape[i]) { PyErr_Format(PyExc_ValueError, "get_pointer(): invalid index %zd at position %zd", indices[i], i); goto out; } } ptr = PyBuffer_GetPointer(&view, indices); ret = unpack_single(ptr, view.format, view.itemsize); out: PyBuffer_Release(&view); return ret; } static PyObject * get_sizeof_void_p(PyObject *self, PyObject *Py_UNUSED(ignored)) { return PyLong_FromSize_t(sizeof(void *)); } static char get_ascii_order(PyObject *order) { PyObject *ascii_order; char ord; if (!PyUnicode_Check(order)) { PyErr_SetString(PyExc_TypeError, "order must be a string"); return CHAR_MAX; } ascii_order = PyUnicode_AsASCIIString(order); if (ascii_order == NULL) { return CHAR_MAX; } ord = PyBytes_AS_STRING(ascii_order)[0]; Py_DECREF(ascii_order); if (ord != 'C' && ord != 'F' && ord != 'A') { PyErr_SetString(PyExc_ValueError, "invalid order, must be C, F or A"); return CHAR_MAX; } return ord; } /* Get a contiguous memoryview. */ static PyObject * get_contiguous(PyObject *self, PyObject *args) { PyObject *obj; PyObject *buffertype; PyObject *order; long type; char ord; if (!PyArg_ParseTuple(args, "OOO", &obj, &buffertype, &order)) { return NULL; } if (!PyLong_Check(buffertype)) { PyErr_SetString(PyExc_TypeError, "buffertype must be PyBUF_READ or PyBUF_WRITE"); return NULL; } type = PyLong_AsLong(buffertype); if (type == -1 && PyErr_Occurred()) { return NULL; } if (type != PyBUF_READ && type != PyBUF_WRITE) { PyErr_SetString(PyExc_ValueError, "invalid buffer type"); return NULL; } ord = get_ascii_order(order); if (ord == CHAR_MAX) return NULL; return PyMemoryView_GetContiguous(obj, (int)type, ord); } /* PyBuffer_ToContiguous() */ static PyObject * py_buffer_to_contiguous(PyObject *self, PyObject *args) { PyObject *obj; PyObject *order; PyObject *ret = NULL; int flags; char ord; Py_buffer view; char *buf = NULL; if (!PyArg_ParseTuple(args, "OOi", &obj, &order, &flags)) { return NULL; } if (PyObject_GetBuffer(obj, &view, flags) < 0) { return NULL; } ord = get_ascii_order(order); if (ord == CHAR_MAX) { goto out; } buf = PyMem_Malloc(view.len); if (buf == NULL) { PyErr_NoMemory(); goto out; } if (PyBuffer_ToContiguous(buf, &view, view.len, ord) < 0) { goto out; } ret = PyBytes_FromStringAndSize(buf, view.len); out: PyBuffer_Release(&view); PyMem_XFree(buf); return ret; } static int fmtcmp(const char *fmt1, const char *fmt2) { if (fmt1 == NULL) { return fmt2 == NULL || strcmp(fmt2, "B") == 0; } if (fmt2 == NULL) { return fmt1 == NULL || strcmp(fmt1, "B") == 0; } return strcmp(fmt1, fmt2) == 0; } static int arraycmp(const Py_ssize_t *a1, const Py_ssize_t *a2, const Py_ssize_t *shape, Py_ssize_t ndim) { Py_ssize_t i; for (i = 0; i < ndim; i++) { if (shape && shape[i] <= 1) { /* strides can differ if the dimension is less than 2 */ continue; } if (a1[i] != a2[i]) { return 0; } } return 1; } /* Compare two contiguous buffers for physical equality. */ static PyObject * cmp_contig(PyObject *self, PyObject *args) { PyObject *b1, *b2; /* buffer objects */ Py_buffer v1, v2; PyObject *ret; int equal = 0; if (!PyArg_ParseTuple(args, "OO", &b1, &b2)) { return NULL; } if (PyObject_GetBuffer(b1, &v1, PyBUF_FULL_RO) < 0) { PyErr_SetString(PyExc_TypeError, "cmp_contig: first argument does not implement the buffer " "protocol"); return NULL; } if (PyObject_GetBuffer(b2, &v2, PyBUF_FULL_RO) < 0) { PyErr_SetString(PyExc_TypeError, "cmp_contig: second argument does not implement the buffer " "protocol"); PyBuffer_Release(&v1); return NULL; } if (!(PyBuffer_IsContiguous(&v1, 'C')&&PyBuffer_IsContiguous(&v2, 'C')) && !(PyBuffer_IsContiguous(&v1, 'F')&&PyBuffer_IsContiguous(&v2, 'F'))) { goto result; } /* readonly may differ if created from non-contiguous */ if (v1.len != v2.len || v1.itemsize != v2.itemsize || v1.ndim != v2.ndim || !fmtcmp(v1.format, v2.format) || !!v1.shape != !!v2.shape || !!v1.strides != !!v2.strides || !!v1.suboffsets != !!v2.suboffsets) { goto result; } if ((v1.shape && !arraycmp(v1.shape, v2.shape, NULL, v1.ndim)) || (v1.strides && !arraycmp(v1.strides, v2.strides, v1.shape, v1.ndim)) || (v1.suboffsets && !arraycmp(v1.suboffsets, v2.suboffsets, NULL, v1.ndim))) { goto result; } if (memcmp((char *)v1.buf, (char *)v2.buf, v1.len) != 0) { goto result; } equal = 1; result: PyBuffer_Release(&v1); PyBuffer_Release(&v2); ret = equal ? Py_True : Py_False; return Py_NewRef(ret); } static PyObject * is_contiguous(PyObject *self, PyObject *args) { PyObject *obj; PyObject *order; PyObject *ret = NULL; Py_buffer view, *base; char ord; if (!PyArg_ParseTuple(args, "OO", &obj, &order)) { return NULL; } ord = get_ascii_order(order); if (ord == CHAR_MAX) { return NULL; } if (NDArray_Check(obj)) { /* Skip the buffer protocol to check simple etc. buffers directly. */ base = &((NDArrayObject *)obj)->head->base; ret = PyBuffer_IsContiguous(base, ord) ? Py_True : Py_False; } else { if (PyObject_GetBuffer(obj, &view, PyBUF_FULL_RO) < 0) { PyErr_SetString(PyExc_TypeError, "is_contiguous: object does not implement the buffer " "protocol"); return NULL; } ret = PyBuffer_IsContiguous(&view, ord) ? Py_True : Py_False; PyBuffer_Release(&view); } return Py_NewRef(ret); } static Py_hash_t ndarray_hash(PyObject *self) { const NDArrayObject *nd = (NDArrayObject *)self; const Py_buffer *view = &nd->head->base; PyObject *bytes; Py_hash_t hash; if (!view->readonly) { PyErr_SetString(PyExc_ValueError, "cannot hash writable ndarray object"); return -1; } if (view->obj != NULL && PyObject_Hash(view->obj) == -1) { return -1; } bytes = ndarray_tobytes(self, NULL); if (bytes == NULL) { return -1; } hash = PyObject_Hash(bytes); Py_DECREF(bytes); return hash; } static PyMethodDef ndarray_methods[] = { { "tolist", ndarray_tolist, METH_NOARGS, NULL }, { "tobytes", ndarray_tobytes, METH_NOARGS, NULL }, { "push", _PyCFunction_CAST(ndarray_push), METH_VARARGS|METH_KEYWORDS, NULL }, { "pop", ndarray_pop, METH_NOARGS, NULL }, { "add_suboffsets", ndarray_add_suboffsets, METH_NOARGS, NULL }, { "memoryview_from_buffer", ndarray_memoryview_from_buffer, METH_NOARGS, NULL }, {NULL} }; static PyTypeObject NDArray_Type = { PyVarObject_HEAD_INIT(NULL, 0) "ndarray", /* Name of this type */ sizeof(NDArrayObject), /* Basic object size */ 0, /* Item size for varobject */ ndarray_dealloc, /* tp_dealloc */ 0, /* tp_vectorcall_offset */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_as_async */ 0, /* tp_repr */ 0, /* tp_as_number */ &ndarray_as_sequence, /* tp_as_sequence */ &ndarray_as_mapping, /* tp_as_mapping */ ndarray_hash, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ 0, /* tp_setattro */ &ndarray_as_buffer, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ 0, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ ndarray_methods, /* tp_methods */ 0, /* tp_members */ ndarray_getset, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ ndarray_init, /* tp_init */ 0, /* tp_alloc */ ndarray_new, /* tp_new */ }; /**************************************************************************/ /* StaticArray Object */ /**************************************************************************/ static PyTypeObject StaticArray_Type; typedef struct { PyObject_HEAD int legacy_mode; /* if true, use the view.obj==NULL hack */ } StaticArrayObject; static char static_mem[12] = {0,1,2,3,4,5,6,7,8,9,10,11}; static Py_ssize_t static_shape[1] = {12}; static Py_ssize_t static_strides[1] = {1}; static Py_buffer static_buffer = { static_mem, /* buf */ NULL, /* obj */ 12, /* len */ 1, /* itemsize */ 1, /* readonly */ 1, /* ndim */ "B", /* format */ static_shape, /* shape */ static_strides, /* strides */ NULL, /* suboffsets */ NULL /* internal */ }; static PyObject * staticarray_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { return (PyObject *)PyObject_New(StaticArrayObject, &StaticArray_Type); } static int staticarray_init(PyObject *self, PyObject *args, PyObject *kwds) { StaticArrayObject *a = (StaticArrayObject *)self; static char *kwlist[] = { "legacy_mode", NULL }; PyObject *legacy_mode = Py_False; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O", kwlist, &legacy_mode)) return -1; a->legacy_mode = (legacy_mode != Py_False); return 0; } static void staticarray_dealloc(PyObject *self) { PyObject_Free(self); } /* Return a buffer for a PyBUF_FULL_RO request. Flags are not checked, which makes this object a non-compliant exporter! */ static int staticarray_getbuf(PyObject *op, Py_buffer *view, int flags) { StaticArrayObject *self = (StaticArrayObject *)op; *view = static_buffer; if (self->legacy_mode) { view->obj = NULL; /* Don't use this in new code. */ } else { view->obj = Py_NewRef(self); } return 0; } static PyBufferProcs staticarray_as_buffer = { staticarray_getbuf, /* bf_getbuffer */ NULL, /* bf_releasebuffer */ }; static PyTypeObject StaticArray_Type = { PyVarObject_HEAD_INIT(NULL, 0) "staticarray", /* Name of this type */ sizeof(StaticArrayObject), /* Basic object size */ 0, /* Item size for varobject */ staticarray_dealloc, /* tp_dealloc */ 0, /* tp_vectorcall_offset */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_as_async */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ &staticarray_as_buffer, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ 0, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ staticarray_init, /* tp_init */ 0, /* tp_alloc */ staticarray_new, /* tp_new */ }; static struct PyMethodDef _testbuffer_functions[] = { {"slice_indices", slice_indices, METH_VARARGS, NULL}, {"get_pointer", get_pointer, METH_VARARGS, NULL}, {"get_sizeof_void_p", get_sizeof_void_p, METH_NOARGS, NULL}, {"get_contiguous", get_contiguous, METH_VARARGS, NULL}, {"py_buffer_to_contiguous", py_buffer_to_contiguous, METH_VARARGS, NULL}, {"is_contiguous", is_contiguous, METH_VARARGS, NULL}, {"cmp_contig", cmp_contig, METH_VARARGS, NULL}, {NULL, NULL} }; static struct PyModuleDef _testbuffermodule = { PyModuleDef_HEAD_INIT, "_testbuffer", NULL, -1, _testbuffer_functions, NULL, NULL, NULL, NULL }; static int _testbuffer_exec(PyObject *mod) { Py_SET_TYPE(&NDArray_Type, &PyType_Type); if (PyType_Ready(&NDArray_Type)) { return -1; } if (PyModule_AddType(mod, &NDArray_Type) < 0) { return -1; } Py_SET_TYPE(&StaticArray_Type, &PyType_Type); if (PyModule_AddType(mod, &StaticArray_Type) < 0) { return -1; } structmodule = PyImport_ImportModule("struct"); if (structmodule == NULL) { return -1; } Struct = PyObject_GetAttrString(structmodule, "Struct"); if (Struct == NULL) { return -1; } calcsize = PyObject_GetAttrString(structmodule, "calcsize"); if (calcsize == NULL) { return -1; } simple_format = PyUnicode_FromString(simple_fmt); if (simple_format == NULL) { return -1; } #define ADD_INT_MACRO(mod, macro) \ do { \ if (PyModule_AddIntConstant(mod, #macro, macro) < 0) { \ return -1; \ } \ } while (0) ADD_INT_MACRO(mod, ND_MAX_NDIM); ADD_INT_MACRO(mod, ND_VAREXPORT); ADD_INT_MACRO(mod, ND_WRITABLE); ADD_INT_MACRO(mod, ND_FORTRAN); ADD_INT_MACRO(mod, ND_SCALAR); ADD_INT_MACRO(mod, ND_PIL); ADD_INT_MACRO(mod, ND_GETBUF_FAIL); ADD_INT_MACRO(mod, ND_GETBUF_UNDEFINED); ADD_INT_MACRO(mod, ND_REDIRECT); ADD_INT_MACRO(mod, PyBUF_SIMPLE); ADD_INT_MACRO(mod, PyBUF_WRITABLE); ADD_INT_MACRO(mod, PyBUF_FORMAT); ADD_INT_MACRO(mod, PyBUF_ND); ADD_INT_MACRO(mod, PyBUF_STRIDES); ADD_INT_MACRO(mod, PyBUF_INDIRECT); ADD_INT_MACRO(mod, PyBUF_C_CONTIGUOUS); ADD_INT_MACRO(mod, PyBUF_F_CONTIGUOUS); ADD_INT_MACRO(mod, PyBUF_ANY_CONTIGUOUS); ADD_INT_MACRO(mod, PyBUF_FULL); ADD_INT_MACRO(mod, PyBUF_FULL_RO); ADD_INT_MACRO(mod, PyBUF_RECORDS); ADD_INT_MACRO(mod, PyBUF_RECORDS_RO); ADD_INT_MACRO(mod, PyBUF_STRIDED); ADD_INT_MACRO(mod, PyBUF_STRIDED_RO); ADD_INT_MACRO(mod, PyBUF_CONTIG); ADD_INT_MACRO(mod, PyBUF_CONTIG_RO); ADD_INT_MACRO(mod, PyBUF_READ); ADD_INT_MACRO(mod, PyBUF_WRITE); #undef ADD_INT_MACRO return 0; } PyMODINIT_FUNC PyInit__testbuffer(void) { PyObject *mod = PyModule_Create(&_testbuffermodule); if (mod == NULL) { return NULL; } #ifdef Py_GIL_DISABLED PyUnstable_Module_SetGIL(mod, Py_MOD_GIL_NOT_USED); #endif if (_testbuffer_exec(mod) < 0) { Py_DECREF(mod); return NULL; } return mod; } /* * Copyright (C) 1995, 1996, 1997, 1998, and 1999 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef HAVE_GETADDRINFO /* * Error return codes from getaddrinfo() */ #ifdef EAI_ADDRFAMILY /* If this is defined, there is a conflicting implementation in the C library, which can't be used for some reason. Make sure it won't interfere with this emulation. */ #undef EAI_ADDRFAMILY #undef EAI_AGAIN #undef EAI_BADFLAGS #undef EAI_FAIL #undef EAI_FAMILY #undef EAI_MEMORY #undef EAI_NODATA #undef EAI_NONAME #undef EAI_SERVICE #undef EAI_SOCKTYPE #undef EAI_SYSTEM #undef EAI_BADHINTS #undef EAI_PROTOCOL #undef EAI_MAX #undef getaddrinfo #define getaddrinfo fake_getaddrinfo #endif /* EAI_ADDRFAMILY */ #define EAI_ADDRFAMILY 1 /* address family for hostname not supported */ #define EAI_AGAIN 2 /* temporary failure in name resolution */ #define EAI_BADFLAGS 3 /* invalid value for ai_flags */ #define EAI_FAIL 4 /* non-recoverable failure in name resolution */ #define EAI_FAMILY 5 /* ai_family not supported */ #define EAI_MEMORY 6 /* memory allocation failure */ #define EAI_NODATA 7 /* no address associated with hostname */ #define EAI_NONAME 8 /* hostname nor servname provided, or not known */ #define EAI_SERVICE 9 /* servname not supported for ai_socktype */ #define EAI_SOCKTYPE 10 /* ai_socktype not supported */ #define EAI_SYSTEM 11 /* system error returned in errno */ #define EAI_BADHINTS 12 #define EAI_PROTOCOL 13 #define EAI_MAX 14 /* * Flag values for getaddrinfo() */ #ifdef AI_PASSIVE #undef AI_PASSIVE #undef AI_CANONNAME #undef AI_NUMERICHOST #undef AI_NUMERICSERV #undef AI_MASK #undef AI_ALL #undef AI_V4MAPPED_CFG #undef AI_ADDRCONFIG #undef AI_V4MAPPED #undef AI_DEFAULT #endif /* AI_PASSIVE */ #define AI_PASSIVE 0x00000001 /* get address to use bind() */ #define AI_CANONNAME 0x00000002 /* fill ai_canonname */ #define AI_NUMERICHOST 0x00000004 /* prevent name resolution */ #define AI_NUMERICSERV 0x00000008 /* prevent service resolution */ /* valid flags for addrinfo */ #define AI_MASK (AI_PASSIVE | AI_CANONNAME | AI_NUMERICHOST | AI_NUMERICSERV) #define AI_ALL 0x00000100 /* IPv6 and IPv4-mapped (with AI_V4MAPPED) */ #define AI_V4MAPPED_CFG 0x00000200 /* accept IPv4-mapped if kernel supports */ #define AI_ADDRCONFIG 0x00000400 /* only if any address is assigned */ #define AI_V4MAPPED 0x00000800 /* accept IPv4-mapped IPv6 address */ /* special recommended flags for getipnodebyname */ #define AI_DEFAULT (AI_V4MAPPED_CFG | AI_ADDRCONFIG) #endif /* !HAVE_GETADDRINFO */ #ifndef HAVE_GETNAMEINFO /* * Constants for getnameinfo() */ #ifndef NI_MAXHOST #define NI_MAXHOST 1025 #define NI_MAXSERV 32 #endif /* !NI_MAXHOST */ /* * Flag values for getnameinfo() */ #ifndef NI_NOFQDN #define NI_NOFQDN 0x00000001 #define NI_NUMERICHOST 0x00000002 #define NI_NAMEREQD 0x00000004 #define NI_NUMERICSERV 0x00000008 #define NI_DGRAM 0x00000010 #endif /* !NI_NOFQDN */ #endif /* !HAVE_GETNAMEINFO */ #ifndef HAVE_ADDRINFO struct addrinfo { int ai_flags; /* AI_PASSIVE, AI_CANONNAME */ int ai_family; /* PF_xxx */ int ai_socktype; /* SOCK_xxx */ int ai_protocol; /* 0 or IPPROTO_xxx for IPv4 and IPv6 */ size_t ai_addrlen; /* length of ai_addr */ char *ai_canonname; /* canonical name for hostname */ struct sockaddr *ai_addr; /* binary address */ struct addrinfo *ai_next; /* next structure in linked list */ }; #endif /* !HAVE_ADDRINFO */ #ifndef HAVE_SOCKADDR_STORAGE /* * RFC 2553: protocol-independent placeholder for socket addresses */ #define _SS_MAXSIZE 128 #define _SS_ALIGNSIZE (sizeof(long long)) #define _SS_PAD1SIZE (_SS_ALIGNSIZE - sizeof(u_char) * 2) #define _SS_PAD2SIZE (_SS_MAXSIZE - sizeof(u_char) * 2 - \ _SS_PAD1SIZE - _SS_ALIGNSIZE) struct sockaddr_storage { #ifdef HAVE_SOCKADDR_SA_LEN unsigned char ss_len; /* address length */ unsigned char ss_family; /* address family */ #else unsigned short ss_family; /* address family */ #endif /* HAVE_SOCKADDR_SA_LEN */ char __ss_pad1[_SS_PAD1SIZE]; long long __ss_align; /* force desired structure storage alignment */ char __ss_pad2[_SS_PAD2SIZE]; }; #endif /* !HAVE_SOCKADDR_STORAGE */ #ifdef __cplusplus extern "C" { #endif #ifdef ENABLE_IPV6 extern void freehostent(struct hostent *); #endif #ifdef __cplusplus } #endif /* Common code for use by all hashlib related modules. */ #include "pycore_lock.h" // PyMutex /* * Internal error messages used for reporting an unsupported hash algorithm. * The algorithm can be given by its name, a callable or a PEP-247 module. * The same message is raised by Lib/hashlib.py::__get_builtin_constructor() * and _hmacmodule.c::find_hash_info(). */ #define HASHLIB_UNSUPPORTED_ALGORITHM "unsupported hash algorithm %S" #define HASHLIB_UNSUPPORTED_STR_ALGORITHM "unsupported hash algorithm %s" /* * Obtain a buffer view from a buffer-like object 'obj'. * * On success, store the result in 'view' and return 0. * On error, set an exception and return -1. */ static inline int _Py_hashlib_get_buffer_view(PyObject *obj, Py_buffer *view) { if (PyUnicode_Check(obj)) { PyErr_SetString(PyExc_TypeError, "Strings must be encoded before hashing"); return -1; } if (!PyObject_CheckBuffer(obj)) { PyErr_SetString(PyExc_TypeError, "object supporting the buffer API required"); return -1; } if (PyObject_GetBuffer(obj, view, PyBUF_SIMPLE) == -1) { return -1; } if (view->ndim > 1) { PyErr_SetString(PyExc_BufferError, "Buffer must be single dimension"); PyBuffer_Release(view); return -1; } return 0; } /* * Call _Py_hashlib_get_buffer_view() and check if it succeeded. * * On error, set an exception and execute the ERRACTION statements. */ #define GET_BUFFER_VIEW_OR_ERROR(OBJ, VIEW, ERRACTION) \ do { \ if (_Py_hashlib_get_buffer_view(OBJ, VIEW) < 0) { \ assert(PyErr_Occurred()); \ ERRACTION; \ } \ } while (0) #define GET_BUFFER_VIEW_OR_ERROUT(OBJ, VIEW) \ GET_BUFFER_VIEW_OR_ERROR(OBJ, VIEW, return NULL) /* * Helper code to synchronize access to the hash object when the GIL is * released around a CPU consuming hashlib operation. * * Code accessing a mutable part of the hash object must be enclosed in * an HASHLIB_{ACQUIRE,RELEASE}_LOCK block or explicitly acquire and release * the mutex inside a Py_BEGIN_ALLOW_THREADS -- Py_END_ALLOW_THREADS block if * they wish to release the GIL for an operation. */ #define HASHLIB_OBJECT_HEAD \ PyObject_HEAD \ /* Guard against race conditions during incremental update(). */ \ PyMutex mutex; #define HASHLIB_INIT_MUTEX(OBJ) \ do { \ (OBJ)->mutex = (PyMutex){0}; \ } while (0) #define HASHLIB_ACQUIRE_LOCK(OBJ) PyMutex_Lock(&(OBJ)->mutex) #define HASHLIB_RELEASE_LOCK(OBJ) PyMutex_Unlock(&(OBJ)->mutex) /* * Message length above which the GIL is to be released * when performing hashing operations. */ #define HASHLIB_GIL_MINSIZE 2048 // Macros for executing code while conditionally holding the GIL. // // These only drop the GIL if the lock acquisition itself is likely to // block. Thus the non-blocking acquire gating the GIL release for a // blocking lock acquisition. The intent of these macros is to surround // the assumed always "fast" operations that you aren't releasing the // GIL around. /* * Execute a suite of C statements 'STATEMENTS'. * * The GIL is held if 'SIZE' is below the HASHLIB_GIL_MINSIZE threshold. */ #define HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(SIZE, STATEMENTS) \ do { \ if ((SIZE) > HASHLIB_GIL_MINSIZE) { \ Py_BEGIN_ALLOW_THREADS \ STATEMENTS; \ Py_END_ALLOW_THREADS \ } \ else { \ STATEMENTS; \ } \ } while (0) /* * Lock 'OBJ' and execute a suite of C statements 'STATEMENTS'. * * The GIL is held if 'SIZE' is below the HASHLIB_GIL_MINSIZE threshold. */ #define HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(OBJ, SIZE, STATEMENTS) \ do { \ if ((SIZE) > HASHLIB_GIL_MINSIZE) { \ Py_BEGIN_ALLOW_THREADS \ HASHLIB_ACQUIRE_LOCK(OBJ); \ STATEMENTS; \ HASHLIB_RELEASE_LOCK(OBJ); \ Py_END_ALLOW_THREADS \ } \ else { \ HASHLIB_ACQUIRE_LOCK(OBJ); \ STATEMENTS; \ HASHLIB_RELEASE_LOCK(OBJ); \ } \ } while (0) static inline int _Py_hashlib_data_argument(PyObject **res, PyObject *data, PyObject *string) { if (data != NULL && string == NULL) { // called as H(data) or H(data=...) *res = data; return 1; } else if (data == NULL && string != NULL) { // called as H(string=...) if (PyErr_WarnEx(PyExc_DeprecationWarning, "the 'string' keyword parameter is deprecated since " "Python 3.15 and slated for removal in Python 3.19; " "use the 'data' keyword parameter or pass the data " "to hash as a positional argument instead", 1) < 0) { *res = NULL; return -1; } *res = string; return 1; } else if (data == NULL && string == NULL) { // fast path when no data is given assert(!PyErr_Occurred()); *res = NULL; return 0; } else { // called as H(data=..., string) *res = NULL; PyErr_SetString(PyExc_TypeError, "'data' and 'string' are mutually exclusive " "and support for 'string' keyword parameter " "is slated for removal in a future version."); return -1; } } /* / Author: Sam Rushing / Hacked for Unix by AMK / $Id$ / Modified to support mmap with offset - to map a 'window' of a file / Author: Yotam Medini yotamm@mellanox.co.il / / mmapmodule.cpp -- map a view of a file into memory / / todo: need permission flags, perhaps a 'chsize' analog / not all functions check range yet!!! / / / This version of mmapmodule.c has been changed significantly / from the original mmapfile.c on which it was based. / The original version of mmapfile is maintained by Sam at / ftp://squirl.nightmare.com/pub/python/python-ext. */ #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 #endif #include #include "pycore_abstract.h" // _Py_convert_optional_to_ssize_t() #include "pycore_bytesobject.h" // _PyBytes_Find() #include "pycore_fileutils.h" // _Py_stat_struct #include "pycore_mmap.h" // _PyAnnotateMemoryMap() #include "pycore_weakref.h" // FT_CLEAR_WEAKREFS() #include // offsetof() #ifndef MS_WINDOWS # include // close() #endif #ifndef MS_WINDOWS #define UNIX # ifdef HAVE_FCNTL_H # include # endif /* HAVE_FCNTL_H */ #endif #ifdef MS_WINDOWS #include #include // LsaNtStatusToWinError static int my_getpagesize(void) { SYSTEM_INFO si; GetSystemInfo(&si); return si.dwPageSize; } static int my_getallocationgranularity (void) { SYSTEM_INFO si; GetSystemInfo(&si); return si.dwAllocationGranularity; } #endif #ifdef UNIX #include #include #if defined(HAVE_SYSCONF) && defined(_SC_PAGESIZE) static int my_getpagesize(void) { return sysconf(_SC_PAGESIZE); } #define my_getallocationgranularity my_getpagesize #else #define my_getpagesize getpagesize #endif #endif /* UNIX */ #include #ifdef HAVE_SYS_TYPES_H #include #endif /* HAVE_SYS_TYPES_H */ /* Prefer MAP_ANONYMOUS since MAP_ANON is deprecated according to man page. */ #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) # define MAP_ANONYMOUS MAP_ANON #endif /*[clinic input] module mmap class mmap.mmap "mmap_object *" "" [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=82a9f8a529905b9b]*/ typedef enum { ACCESS_DEFAULT, ACCESS_READ, ACCESS_WRITE, ACCESS_COPY } access_mode; typedef struct { PyObject_HEAD char * data; Py_ssize_t size; Py_ssize_t pos; /* relative to offset */ #ifdef MS_WINDOWS long long offset; #else off_t offset; #endif Py_ssize_t exports; #ifdef MS_WINDOWS HANDLE map_handle; HANDLE file_handle; wchar_t * tagname; #endif #ifdef UNIX int fd; int flags; #endif PyObject *weakreflist; access_mode access; _Bool trackfd; } mmap_object; #define mmap_object_CAST(op) ((mmap_object *)(op)) #include "clinic/mmapmodule.c.h" /* Return a Py_ssize_t from the object arg. This conversion logic is similar to what AC uses for `Py_ssize_t` arguments. Returns -1 on error. Use PyErr_Occurred() to disambiguate. */ static Py_ssize_t _As_Py_ssize_t(PyObject *arg) { assert(arg != NULL); Py_ssize_t ival = -1; PyObject *iobj = _PyNumber_Index(arg); if (iobj != NULL) { ival = PyLong_AsSsize_t(iobj); Py_DECREF(iobj); } return ival; } static void mmap_object_dealloc(PyObject *op) { mmap_object *m_obj = mmap_object_CAST(op); PyTypeObject *tp = Py_TYPE(m_obj); PyObject_GC_UnTrack(m_obj); #ifdef MS_WINDOWS Py_BEGIN_ALLOW_THREADS if (m_obj->data != NULL) UnmapViewOfFile (m_obj->data); if (m_obj->map_handle != NULL) CloseHandle (m_obj->map_handle); if (m_obj->file_handle != INVALID_HANDLE_VALUE) CloseHandle (m_obj->file_handle); Py_END_ALLOW_THREADS if (m_obj->tagname) PyMem_Free(m_obj->tagname); #endif /* MS_WINDOWS */ #ifdef UNIX Py_BEGIN_ALLOW_THREADS if (m_obj->fd >= 0) (void) close(m_obj->fd); if (m_obj->data!=NULL) { munmap(m_obj->data, m_obj->size); } Py_END_ALLOW_THREADS #endif /* UNIX */ FT_CLEAR_WEAKREFS(op, m_obj->weakreflist); tp->tp_free(m_obj); Py_DECREF(tp); } /*[clinic input] @critical_section mmap.mmap.close [clinic start generated code]*/ static PyObject * mmap_mmap_close_impl(mmap_object *self) /*[clinic end generated code: output=a1ae0c727546f78d input=25020035f047eae1]*/ { if (self->exports > 0) { PyErr_SetString(PyExc_BufferError, "cannot close "\ "exported pointers exist"); return NULL; } #ifdef MS_WINDOWS /* For each resource we maintain, we need to check the value is valid, and if so, free the resource and set the member value to an invalid value so the dealloc does not attempt to resource clearing again. TODO - should we check for errors in the close operations??? */ HANDLE map_handle = self->map_handle; HANDLE file_handle = self->file_handle; char *data = self->data; self->map_handle = NULL; self->file_handle = INVALID_HANDLE_VALUE; self->data = NULL; Py_BEGIN_ALLOW_THREADS if (data != NULL) { UnmapViewOfFile(data); } if (map_handle != NULL) { CloseHandle(map_handle); } if (file_handle != INVALID_HANDLE_VALUE) { CloseHandle(file_handle); } Py_END_ALLOW_THREADS #endif /* MS_WINDOWS */ #ifdef UNIX int fd = self->fd; char *data = self->data; self->fd = -1; self->data = NULL; Py_BEGIN_ALLOW_THREADS if (0 <= fd) (void) close(fd); if (data != NULL) { munmap(data, self->size); } Py_END_ALLOW_THREADS #endif Py_RETURN_NONE; } #ifdef MS_WINDOWS #define CHECK_VALID(err) \ do { \ if (self->map_handle == NULL) { \ PyErr_SetString(PyExc_ValueError, "mmap closed or invalid"); \ return err; \ } \ } while (0) #define CHECK_VALID_OR_RELEASE(err, buffer) \ do { \ if (self->map_handle == NULL) { \ PyErr_SetString(PyExc_ValueError, "mmap closed or invalid"); \ PyBuffer_Release(&(buffer)); \ return (err); \ } \ } while (0) #endif /* MS_WINDOWS */ #ifdef UNIX #define CHECK_VALID(err) \ do { \ if (self->data == NULL) { \ PyErr_SetString(PyExc_ValueError, "mmap closed or invalid"); \ return err; \ } \ } while (0) #define CHECK_VALID_OR_RELEASE(err, buffer) \ do { \ if (self->data == NULL) { \ PyErr_SetString(PyExc_ValueError, "mmap closed or invalid"); \ PyBuffer_Release(&(buffer)); \ return (err); \ } \ } while (0) #endif /* UNIX */ #if defined(MS_WINDOWS) && !defined(DONT_USE_SEH) static DWORD filter_page_exception(EXCEPTION_POINTERS *ptrs, EXCEPTION_RECORD *record) { *record = *ptrs->ExceptionRecord; if (record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR || record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) { return EXCEPTION_EXECUTE_HANDLER; } return EXCEPTION_CONTINUE_SEARCH; } static DWORD filter_page_exception_method(mmap_object *self, EXCEPTION_POINTERS *ptrs, EXCEPTION_RECORD *record) { *record = *ptrs->ExceptionRecord; if (record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR || record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) { ULONG_PTR address = record->ExceptionInformation[1]; if (address >= (ULONG_PTR) self->data && address < (ULONG_PTR) self->data + (ULONG_PTR) self->size) { return EXCEPTION_EXECUTE_HANDLER; } } return EXCEPTION_CONTINUE_SEARCH; } static void _PyErr_SetFromNTSTATUS(ULONG status) { #if defined(MS_WINDOWS_DESKTOP) || defined(MS_WINDOWS_SYSTEM) PyErr_SetFromWindowsErr(LsaNtStatusToWinError((NTSTATUS)status)); #else if (status & 0x80000000) { // HRESULT-shaped codes are supported by PyErr_SetFromWindowsErr PyErr_SetFromWindowsErr((int)status); } else { // No mapping for NTSTATUS values, so just return it for diagnostic purposes // If we provide it as winerror it could incorrectly change the type of the exception. PyErr_Format(PyExc_OSError, "Operating system error NTSTATUS=0x%08lX", status); } #endif } #endif #if defined(MS_WINDOWS) && !defined(DONT_USE_SEH) #define HANDLE_INVALID_MEM(sourcecode) \ do { \ EXCEPTION_RECORD record; \ __try { \ sourcecode \ } \ __except (filter_page_exception(GetExceptionInformation(), &record)) { \ assert(record.ExceptionCode == EXCEPTION_IN_PAGE_ERROR || \ record.ExceptionCode == EXCEPTION_ACCESS_VIOLATION); \ if (record.ExceptionCode == EXCEPTION_IN_PAGE_ERROR) { \ _PyErr_SetFromNTSTATUS((ULONG)record.ExceptionInformation[2]); \ } \ else if (record.ExceptionCode == EXCEPTION_ACCESS_VIOLATION) { \ PyErr_SetFromWindowsErr(ERROR_NOACCESS); \ } \ return -1; \ } \ } while (0) #else #define HANDLE_INVALID_MEM(sourcecode) \ do { \ sourcecode \ } while (0) #endif #if defined(MS_WINDOWS) && !defined(DONT_USE_SEH) #define HANDLE_INVALID_MEM_METHOD(self, sourcecode) \ do { \ EXCEPTION_RECORD record; \ __try { \ sourcecode \ } \ __except (filter_page_exception_method(self, GetExceptionInformation(), \ &record)) { \ assert(record.ExceptionCode == EXCEPTION_IN_PAGE_ERROR || \ record.ExceptionCode == EXCEPTION_ACCESS_VIOLATION); \ if (record.ExceptionCode == EXCEPTION_IN_PAGE_ERROR) { \ _PyErr_SetFromNTSTATUS((ULONG)record.ExceptionInformation[2]); \ } \ else if (record.ExceptionCode == EXCEPTION_ACCESS_VIOLATION) { \ PyErr_SetFromWindowsErr(ERROR_NOACCESS); \ } \ return -1; \ } \ } while (0) #else #define HANDLE_INVALID_MEM_METHOD(self, sourcecode) \ do { \ sourcecode \ } while (0) #endif int safe_memcpy(void *dest, const void *src, size_t count) { HANDLE_INVALID_MEM( memcpy(dest, src, count); ); return 0; } int safe_byte_copy(char *dest, const char *src) { HANDLE_INVALID_MEM( *dest = *src; ); return 0; } int safe_memchr(char **out, const void *ptr, int ch, size_t count) { HANDLE_INVALID_MEM( *out = (char *) memchr(ptr, ch, count); ); return 0; } int safe_memmove(void *dest, const void *src, size_t count) { HANDLE_INVALID_MEM( memmove(dest, src, count); ); return 0; } int safe_copy_from_slice(char *dest, const char *src, Py_ssize_t start, Py_ssize_t step, Py_ssize_t slicelen) { HANDLE_INVALID_MEM( size_t cur; Py_ssize_t i; for (cur = start, i = 0; i < slicelen; cur += step, i++) { dest[cur] = src[i]; } ); return 0; } int safe_copy_to_slice(char *dest, const char *src, Py_ssize_t start, Py_ssize_t step, Py_ssize_t slicelen) { HANDLE_INVALID_MEM( size_t cur; Py_ssize_t i; for (cur = start, i = 0; i < slicelen; cur += step, i++) { dest[i] = src[cur]; } ); return 0; } int _safe_PyBytes_Find(Py_ssize_t *out, mmap_object *self, const char *haystack, Py_ssize_t len_haystack, const char *needle, Py_ssize_t len_needle, Py_ssize_t offset) { HANDLE_INVALID_MEM_METHOD(self, *out = _PyBytes_Find(haystack, len_haystack, needle, len_needle, offset); ); return 0; } int _safe_PyBytes_ReverseFind(Py_ssize_t *out, mmap_object *self, const char *haystack, Py_ssize_t len_haystack, const char *needle, Py_ssize_t len_needle, Py_ssize_t offset) { HANDLE_INVALID_MEM_METHOD(self, *out = _PyBytes_ReverseFind(haystack, len_haystack, needle, len_needle, offset); ); return 0; } PyObject * _safe_PyBytes_FromStringAndSize(char *start, size_t num_bytes) { if (num_bytes == 1) { char dest; if (safe_byte_copy(&dest, start) < 0) { return NULL; } else { return PyBytes_FromStringAndSize(&dest, 1); } } else { PyBytesWriter *writer = PyBytesWriter_Create(num_bytes); if (writer == NULL) { return NULL; } if (safe_memcpy(PyBytesWriter_GetData(writer), start, num_bytes) < 0) { PyBytesWriter_Discard(writer); return NULL; } return PyBytesWriter_Finish(writer); } } /*[clinic input] @critical_section mmap.mmap.read_byte [clinic start generated code]*/ static PyObject * mmap_mmap_read_byte_impl(mmap_object *self) /*[clinic end generated code: output=d931da1319f3869b input=5b8c6a904bdddda9]*/ { CHECK_VALID(NULL); if (self->pos >= self->size) { PyErr_SetString(PyExc_ValueError, "read byte out of range"); return NULL; } char dest; if (safe_byte_copy(&dest, self->data + self->pos) < 0) { return NULL; } self->pos++; return PyLong_FromLong((unsigned char) dest); } /*[clinic input] @critical_section mmap.mmap.readline [clinic start generated code]*/ static PyObject * mmap_mmap_readline_impl(mmap_object *self) /*[clinic end generated code: output=b9d2bf9999283311 input=2c4efd1d06e1cdd1]*/ { Py_ssize_t remaining; char *start, *eol; CHECK_VALID(NULL); remaining = (self->pos < self->size) ? self->size - self->pos : 0; if (!remaining) return Py_GetConstant(Py_CONSTANT_EMPTY_BYTES); start = self->data + self->pos; if (safe_memchr(&eol, start, '\n', remaining) < 0) { return NULL; } if (!eol) eol = self->data + self->size; else ++eol; /* advance past newline */ PyObject *result = _safe_PyBytes_FromStringAndSize(start, eol - start); if (result != NULL) { self->pos += (eol - start); } return result; } /*[clinic input] @critical_section mmap.mmap.read n as num_bytes: object(converter='_Py_convert_optional_to_ssize_t', type='Py_ssize_t', c_default='PY_SSIZE_T_MAX') = None / [clinic start generated code]*/ static PyObject * mmap_mmap_read_impl(mmap_object *self, Py_ssize_t num_bytes) /*[clinic end generated code: output=3b4d4f3704ed0969 input=8f97f361d435e357]*/ { Py_ssize_t remaining; CHECK_VALID(NULL); /* silently 'adjust' out-of-range requests */ remaining = (self->pos < self->size) ? self->size - self->pos : 0; if (num_bytes < 0 || num_bytes > remaining) num_bytes = remaining; PyObject *result = _safe_PyBytes_FromStringAndSize(self->data + self->pos, num_bytes); if (result != NULL) { self->pos += num_bytes; } return result; } static PyObject * mmap_gfind_lock_held(mmap_object *self, Py_buffer *view, PyObject *start_obj, PyObject *end_obj, int reverse) { Py_ssize_t start = self->pos; Py_ssize_t end = self->size; CHECK_VALID(NULL); if (start_obj != Py_None) { start = _As_Py_ssize_t(start_obj); if (start == -1 && PyErr_Occurred()) { return NULL; } if (end_obj != Py_None) { end = _As_Py_ssize_t(end_obj); if (end == -1 && PyErr_Occurred()) { return NULL; } } } if (start < 0) start += self->size; if (start < 0) start = 0; else if (start > self->size) start = self->size; if (end < 0) end += self->size; if (end < 0) end = 0; else if (end > self->size) end = self->size; Py_ssize_t index; PyObject *result; CHECK_VALID(NULL); if (end < start) { result = PyLong_FromSsize_t(-1); } else if (reverse) { assert(0 <= start && start <= end && end <= self->size); if (_safe_PyBytes_ReverseFind(&index, self, self->data + start, end - start, view->buf, view->len, start) < 0) { result = NULL; } else { result = PyLong_FromSsize_t(index); } } else { assert(0 <= start && start <= end && end <= self->size); if (_safe_PyBytes_Find(&index, self, self->data + start, end - start, view->buf, view->len, start) < 0) { result = NULL; } else { result = PyLong_FromSsize_t(index); } } return result; } /*[clinic input] @critical_section mmap.mmap.find view: Py_buffer start: object = None end: object = None / [clinic start generated code]*/ static PyObject * mmap_mmap_find_impl(mmap_object *self, Py_buffer *view, PyObject *start, PyObject *end) /*[clinic end generated code: output=ef8878a322f00192 input=0135504494b52c2b]*/ { return mmap_gfind_lock_held(self, view, start, end, 0); } /*[clinic input] @critical_section mmap.mmap.rfind = mmap.mmap.find [clinic start generated code]*/ static PyObject * mmap_mmap_rfind_impl(mmap_object *self, Py_buffer *view, PyObject *start, PyObject *end) /*[clinic end generated code: output=73b918940d67c2b8 input=8aecdd1f70c06c62]*/ { return mmap_gfind_lock_held(self, view, start, end, 1); } static int is_writable(mmap_object *self) { if (self->access != ACCESS_READ) return 1; PyErr_Format(PyExc_TypeError, "mmap can't modify a readonly memory map."); return 0; } #if defined(MS_WINDOWS) || defined(HAVE_MREMAP) static int is_resizeable(mmap_object *self) { if (self->exports > 0) { PyErr_SetString(PyExc_BufferError, "mmap can't resize with extant buffers exported."); return 0; } if (!self->trackfd) { PyErr_SetString(PyExc_ValueError, "mmap can't resize with trackfd=False."); return 0; } if ((self->access == ACCESS_WRITE) || (self->access == ACCESS_DEFAULT)) return 1; PyErr_Format(PyExc_TypeError, "mmap can't resize a readonly or copy-on-write memory map."); return 0; } #endif /* MS_WINDOWS || HAVE_MREMAP */ /*[clinic input] @critical_section mmap.mmap.write bytes as data: Py_buffer / [clinic start generated code]*/ static PyObject * mmap_mmap_write_impl(mmap_object *self, Py_buffer *data) /*[clinic end generated code: output=9e97063efb6fb27b input=3f16fa79aa89d6f7]*/ { CHECK_VALID(NULL); if (!is_writable(self)) { return NULL; } if (self->pos > self->size || self->size - self->pos < data->len) { PyErr_SetString(PyExc_ValueError, "data out of range"); return NULL; } CHECK_VALID(NULL); PyObject *result; if (safe_memcpy(self->data + self->pos, data->buf, data->len) < 0) { result = NULL; } else { self->pos += data->len; result = PyLong_FromSsize_t(data->len); } return result; } /*[clinic input] @critical_section mmap.mmap.write_byte byte as value: unsigned_char / [clinic start generated code]*/ static PyObject * mmap_mmap_write_byte_impl(mmap_object *self, unsigned char value) /*[clinic end generated code: output=aa11adada9b17510 input=32740bfa174f0991]*/ { CHECK_VALID(NULL); if (!is_writable(self)) return NULL; CHECK_VALID(NULL); if (self->pos >= self->size) { PyErr_SetString(PyExc_ValueError, "write byte out of range"); return NULL; } if (safe_byte_copy(self->data + self->pos, (const char*)&value) < 0) { return NULL; } self->pos++; Py_RETURN_NONE; } /*[clinic input] @critical_section mmap.mmap.size [clinic start generated code]*/ static PyObject * mmap_mmap_size_impl(mmap_object *self) /*[clinic end generated code: output=c177e65e83a648ff input=f69c072efd2e1595]*/ { CHECK_VALID(NULL); #ifdef MS_WINDOWS if (self->file_handle != INVALID_HANDLE_VALUE) { DWORD low,high; long long size; low = GetFileSize(self->file_handle, &high); if (low == INVALID_FILE_SIZE) { /* It might be that the function appears to have failed, when indeed its size equals INVALID_FILE_SIZE */ DWORD error = GetLastError(); if (error != NO_ERROR) return PyErr_SetFromWindowsErr(error); } if (!high && low < LONG_MAX) return PyLong_FromLong((long)low); size = (((long long)high)<<32) + low; return PyLong_FromLongLong(size); } #endif /* MS_WINDOWS */ #ifdef UNIX if (self->fd != -1) { struct _Py_stat_struct status; if (_Py_fstat(self->fd, &status) == -1) return NULL; #ifdef HAVE_LARGEFILE_SUPPORT return PyLong_FromLongLong(status.st_size); #else return PyLong_FromLong(status.st_size); #endif } #endif /* UNIX */ else if (self->trackfd) { return PyLong_FromSsize_t(self->size); } else { PyErr_SetString(PyExc_ValueError, "can't get size with trackfd=False"); return NULL; } } /* This assumes that you want the entire file mapped, / and when recreating the map will make the new file / have the new size / / Is this really necessary? This could easily be done / from python by just closing and re-opening with the / new size? */ #if defined(MS_WINDOWS) || defined(HAVE_MREMAP) /*[clinic input] @critical_section mmap.mmap.resize newsize as new_size: Py_ssize_t / [clinic start generated code]*/ static PyObject * mmap_mmap_resize_impl(mmap_object *self, Py_ssize_t new_size) /*[clinic end generated code: output=6f262537ce9c2dcc input=b6b5dee52a41b79f]*/ { CHECK_VALID(NULL); if (!is_resizeable(self)) { return NULL; } if (new_size < 0 || PY_SSIZE_T_MAX - new_size < self->offset) { PyErr_SetString(PyExc_ValueError, "new size out of range"); return NULL; } { #ifdef MS_WINDOWS DWORD error = 0, file_resize_error = 0; char* old_data = self->data; LARGE_INTEGER offset, max_size; offset.QuadPart = self->offset; max_size.QuadPart = self->offset + new_size; /* close the file mapping */ CloseHandle(self->map_handle); /* if the file mapping still exists, it cannot be resized. */ if (self->tagname) { self->map_handle = OpenFileMappingW(FILE_MAP_WRITE, FALSE, self->tagname); if (self->map_handle) { PyErr_SetFromWindowsErr(ERROR_USER_MAPPED_FILE); return NULL; } } else { self->map_handle = NULL; } /* if it's not the paging file, unmap the view and resize the file */ if (self->file_handle != INVALID_HANDLE_VALUE) { if (!UnmapViewOfFile(self->data)) { return PyErr_SetFromWindowsErr(GetLastError()); }; self->data = NULL; /* resize the file */ if (!SetFilePointerEx(self->file_handle, max_size, NULL, FILE_BEGIN) || !SetEndOfFile(self->file_handle)) { /* resizing failed. try to remap the file */ file_resize_error = GetLastError(); max_size.QuadPart = self->size; new_size = self->size; } } /* create a new file mapping and map a new view */ /* FIXME: call CreateFileMappingW with wchar_t tagname */ self->map_handle = CreateFileMappingW( self->file_handle, NULL, PAGE_READWRITE, max_size.HighPart, max_size.LowPart, self->tagname); error = GetLastError(); /* ERROR_ALREADY_EXISTS implies that between our closing the handle above and calling CreateFileMapping here, someone's created a different mapping with the same name. There's nothing we can usefully do so we invalidate our mapping and error out. */ if (error == ERROR_ALREADY_EXISTS) { CloseHandle(self->map_handle); self->map_handle = NULL; } else if (self->map_handle != NULL) { self->data = MapViewOfFile(self->map_handle, FILE_MAP_WRITE, offset.HighPart, offset.LowPart, new_size); if (self->data != NULL) { /* copy the old view if using the paging file */ if (self->file_handle == INVALID_HANDLE_VALUE) { memcpy(self->data, old_data, self->size < new_size ? self->size : new_size); if (!UnmapViewOfFile(old_data)) { error = GetLastError(); } } self->size = new_size; } else { error = GetLastError(); CloseHandle(self->map_handle); self->map_handle = NULL; } } if (error) { return PyErr_SetFromWindowsErr(error); } /* It's possible for a resize to fail, typically because another mapping is still held against the same underlying file. Even if nothing has failed -- ie we're still returning a valid file mapping -- raise the error as an exception as the resize won't have happened */ if (file_resize_error) { PyErr_SetFromWindowsErr(file_resize_error); return NULL; } Py_RETURN_NONE; #endif /* MS_WINDOWS */ #ifdef UNIX void *newmap; #ifdef __linux__ if (self->fd == -1 && !(self->flags & MAP_PRIVATE) && new_size > self->size) { PyErr_Format(PyExc_ValueError, "mmap: can't expand a shared anonymous mapping on Linux"); return NULL; } #endif if (self->fd != -1 && ftruncate(self->fd, self->offset + new_size) == -1) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } #ifdef MREMAP_MAYMOVE newmap = mremap(self->data, self->size, new_size, MREMAP_MAYMOVE); #else #if defined(__NetBSD__) newmap = mremap(self->data, self->size, self->data, new_size, 0); #else newmap = mremap(self->data, self->size, new_size, 0); #endif /* __NetBSD__ */ #endif if (newmap == (void *)-1) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } self->data = newmap; self->size = new_size; Py_RETURN_NONE; #endif /* UNIX */ } } #endif /* MS_WINDOWS || HAVE_MREMAP */ /*[clinic input] @critical_section mmap.mmap.tell [clinic start generated code]*/ static PyObject * mmap_mmap_tell_impl(mmap_object *self) /*[clinic end generated code: output=6034958630e1b1d1 input=fd163acacf45c3a5]*/ { CHECK_VALID(NULL); return PyLong_FromSize_t(self->pos); } /*[clinic input] @critical_section mmap.mmap.flush offset: Py_ssize_t = 0 size: Py_ssize_t = -1 / * flags: int = 0 [clinic start generated code]*/ static PyObject * mmap_mmap_flush_impl(mmap_object *self, Py_ssize_t offset, Py_ssize_t size, int flags) /*[clinic end generated code: output=4225f4174dc75a53 input=42ba5fb716b6c294]*/ { CHECK_VALID(NULL); if (size == -1) { size = self->size - offset; } if (size < 0 || offset < 0 || self->size - offset < size) { PyErr_SetString(PyExc_ValueError, "flush values out of range"); return NULL; } if (self->access == ACCESS_READ || self->access == ACCESS_COPY) Py_RETURN_NONE; #if defined(MS_WINDOWS_DESKTOP) || defined(MS_WINDOWS_APP) || defined(MS_WINDOWS_SYSTEM) if (!FlushViewOfFile(self->data+offset, size)) { PyErr_SetFromWindowsErr(GetLastError()); return NULL; } Py_RETURN_NONE; #elif defined(UNIX) if (flags == 0) { flags = MS_SYNC; } if (-1 == msync(self->data + offset, size, flags)) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } Py_RETURN_NONE; #else PyErr_SetString(PyExc_ValueError, "flush not supported on this system"); return NULL; #endif } /*[clinic input] @critical_section mmap.mmap.seek pos as dist: Py_ssize_t whence as how: int = 0 / [clinic start generated code]*/ static PyObject * mmap_mmap_seek_impl(mmap_object *self, Py_ssize_t dist, int how) /*[clinic end generated code: output=00310494e8b8c592 input=e2fda5d081c3db22]*/ { CHECK_VALID(NULL); Py_ssize_t where; switch (how) { case 0: /* relative to start */ where = dist; break; case 1: /* relative to current position */ if (PY_SSIZE_T_MAX - self->pos < dist) goto onoutofrange; where = self->pos + dist; break; case 2: /* relative to end */ if (PY_SSIZE_T_MAX - self->size < dist) goto onoutofrange; where = self->size + dist; break; default: PyErr_SetString(PyExc_ValueError, "unknown seek type"); return NULL; } if (where > self->size || where < 0) goto onoutofrange; self->pos = where; return PyLong_FromSsize_t(self->pos); onoutofrange: PyErr_SetString(PyExc_ValueError, "seek out of range"); return NULL; } /*[clinic input] mmap.mmap.set_name name: str / [clinic start generated code]*/ static PyObject * mmap_mmap_set_name_impl(mmap_object *self, const char *name) /*[clinic end generated code: output=1edaf4fd51277760 input=6c7dd91cad205f07]*/ { #if defined(MAP_ANONYMOUS) && defined(__linux__) const char *prefix = "cpython:mmap:"; if (strlen(name) + strlen(prefix) > 79) { PyErr_SetString(PyExc_ValueError, "name is too long"); return NULL; } if (self->flags & MAP_ANONYMOUS) { char buf[80]; sprintf(buf, "%s%s", prefix, name); if (_PyAnnotateMemoryMap(self->data, self->size, buf) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } Py_RETURN_NONE; } else { /* cannot name non-anonymous mappings */ PyErr_SetString(PyExc_ValueError, "Cannot set annotation on non-anonymous mappings"); return NULL; } #else /* naming not supported on this platform */ PyErr_SetString(PyExc_NotImplementedError, "Annotation of mmap is not supported on this platform"); return NULL; #endif } /*[clinic input] mmap.mmap.seekable [clinic start generated code]*/ static PyObject * mmap_mmap_seekable_impl(mmap_object *self) /*[clinic end generated code: output=6311dc3ea300fa38 input=5132505f6e259001]*/ { Py_RETURN_TRUE; } /*[clinic input] @critical_section mmap.mmap.move dest: Py_ssize_t src: Py_ssize_t count as cnt: Py_ssize_t / [clinic start generated code]*/ static PyObject * mmap_mmap_move_impl(mmap_object *self, Py_ssize_t dest, Py_ssize_t src, Py_ssize_t cnt) /*[clinic end generated code: output=391f549a44181793 input=cf8cfe10d9f6b448]*/ { CHECK_VALID(NULL); if (!is_writable(self)) { return NULL; } else { /* bounds check the values */ if (dest < 0 || src < 0 || cnt < 0) goto bounds; if (self->size - dest < cnt || self->size - src < cnt) goto bounds; CHECK_VALID(NULL); if (safe_memmove(self->data + dest, self->data + src, cnt) < 0) { return NULL; }; Py_RETURN_NONE; bounds: PyErr_SetString(PyExc_ValueError, "source, destination, or count out of range"); return NULL; } } static PyObject * mmap_closed_get(PyObject *op, void *Py_UNUSED(closure)) { mmap_object *self = mmap_object_CAST(op); PyObject *result; Py_BEGIN_CRITICAL_SECTION(op); #ifdef MS_WINDOWS result = PyBool_FromLong(self->map_handle == NULL ? 1 : 0); #elif defined(UNIX) result = PyBool_FromLong(self->data == NULL ? 1 : 0); #endif Py_END_CRITICAL_SECTION(); return result; } /*[clinic input] @critical_section mmap.mmap.__enter__ [clinic start generated code]*/ static PyObject * mmap_mmap___enter___impl(mmap_object *self) /*[clinic end generated code: output=92cfc59f4c4e2d26 input=a446541fbfe0b890]*/ { CHECK_VALID(NULL); return Py_NewRef(self); } /*[clinic input] @critical_section mmap.mmap.__exit__ exc_type: object exc_value: object traceback: object / [clinic start generated code]*/ static PyObject * mmap_mmap___exit___impl(mmap_object *self, PyObject *exc_type, PyObject *exc_value, PyObject *traceback) /*[clinic end generated code: output=bec7e3e319c1f07e input=5f28e91cf752bc64]*/ { return mmap_mmap_close_impl(self); } static PyObject * mmap__repr__method_lock_held(PyObject *op) { mmap_object *mobj = mmap_object_CAST(op); #ifdef MS_WINDOWS #define _Py_FORMAT_OFFSET "lld" if (mobj->map_handle == NULL) #elif defined(UNIX) # ifdef HAVE_LARGEFILE_SUPPORT # define _Py_FORMAT_OFFSET "lld" # else # define _Py_FORMAT_OFFSET "ld" # endif if (mobj->data == NULL) #endif { return PyUnicode_FromFormat("<%s closed=True>", Py_TYPE(op)->tp_name); } else { const char *access_str; switch (mobj->access) { case ACCESS_DEFAULT: access_str = "ACCESS_DEFAULT"; break; case ACCESS_READ: access_str = "ACCESS_READ"; break; case ACCESS_WRITE: access_str = "ACCESS_WRITE"; break; case ACCESS_COPY: access_str = "ACCESS_COPY"; break; default: Py_UNREACHABLE(); } return PyUnicode_FromFormat("<%s closed=False, access=%s, length=%zd, " "pos=%zd, offset=%" _Py_FORMAT_OFFSET ">", Py_TYPE(op)->tp_name, access_str, mobj->size, mobj->pos, mobj->offset); } } static PyObject * mmap__repr__method(PyObject *op) { PyObject *result; Py_BEGIN_CRITICAL_SECTION(op); result = mmap__repr__method_lock_held(op); Py_END_CRITICAL_SECTION(); return result; } #ifdef MS_WINDOWS /*[clinic input] @critical_section mmap.mmap.__sizeof__ [clinic start generated code]*/ static PyObject * mmap_mmap___sizeof___impl(mmap_object *self) /*[clinic end generated code: output=1aed30daff807d09 input=8a648868a089553c]*/ { size_t res = _PyObject_SIZE(Py_TYPE(self)); if (self->tagname) { res += (wcslen(self->tagname) + 1) * sizeof(self->tagname[0]); } return PyLong_FromSize_t(res); } #endif #if defined(MS_WINDOWS) && defined(Py_DEBUG) /*[clinic input] @critical_section mmap.mmap._protect flNewProtect: unsigned_int(bitwise=True) start: Py_ssize_t length: Py_ssize_t / [clinic start generated code]*/ static PyObject * mmap_mmap__protect_impl(mmap_object *self, unsigned int flNewProtect, Py_ssize_t start, Py_ssize_t length) /*[clinic end generated code: output=a87271a34d1ad6cf input=9170498c5e1482da]*/ { DWORD flOldProtect; CHECK_VALID(NULL); if (!VirtualProtect((void *) (self->data + start), length, flNewProtect, &flOldProtect)) { PyErr_SetFromWindowsErr(GetLastError()); return NULL; } Py_RETURN_NONE; } #endif #ifdef HAVE_MADVISE /*[clinic input] @critical_section mmap.mmap.madvise option: int start: Py_ssize_t = 0 length as length_obj: object = None / [clinic start generated code]*/ static PyObject * mmap_mmap_madvise_impl(mmap_object *self, int option, Py_ssize_t start, PyObject *length_obj) /*[clinic end generated code: output=816be656f08c0e3c input=2d37f7a4c87f1053]*/ { Py_ssize_t length; CHECK_VALID(NULL); if (length_obj == Py_None) { length = self->size; } else { length = _As_Py_ssize_t(length_obj); if (length == -1 && PyErr_Occurred()) { return NULL; } } if (start < 0 || start >= self->size) { PyErr_SetString(PyExc_ValueError, "madvise start out of bounds"); return NULL; } if (length < 0) { PyErr_SetString(PyExc_ValueError, "madvise length invalid"); return NULL; } if (PY_SSIZE_T_MAX - start < length) { PyErr_SetString(PyExc_OverflowError, "madvise length too large"); return NULL; } if (start + length > self->size) { length = self->size - start; } CHECK_VALID(NULL); if (madvise(self->data + start, length, option) != 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } Py_RETURN_NONE; } #endif // HAVE_MADVISE static struct PyMemberDef mmap_object_members[] = { {"__weaklistoffset__", Py_T_PYSSIZET, offsetof(mmap_object, weakreflist), Py_READONLY}, {NULL}, }; static struct PyMethodDef mmap_object_methods[] = { MMAP_MMAP_CLOSE_METHODDEF MMAP_MMAP_FIND_METHODDEF MMAP_MMAP_RFIND_METHODDEF MMAP_MMAP_FLUSH_METHODDEF MMAP_MMAP_MADVISE_METHODDEF MMAP_MMAP_MOVE_METHODDEF MMAP_MMAP_READ_METHODDEF MMAP_MMAP_READ_BYTE_METHODDEF MMAP_MMAP_READLINE_METHODDEF MMAP_MMAP_RESIZE_METHODDEF MMAP_MMAP_SEEK_METHODDEF MMAP_MMAP_SEEKABLE_METHODDEF MMAP_MMAP_SET_NAME_METHODDEF MMAP_MMAP_SIZE_METHODDEF MMAP_MMAP_TELL_METHODDEF MMAP_MMAP_WRITE_METHODDEF MMAP_MMAP_WRITE_BYTE_METHODDEF MMAP_MMAP___ENTER___METHODDEF MMAP_MMAP___EXIT___METHODDEF MMAP_MMAP___SIZEOF___METHODDEF MMAP_MMAP__PROTECT_METHODDEF {NULL, NULL} /* sentinel */ }; static PyGetSetDef mmap_object_getset[] = { {"closed", mmap_closed_get, NULL, NULL}, {NULL} }; /* Functions for treating an mmap'ed file as a buffer */ static int mmap_buffer_getbuf_lock_held(PyObject *op, Py_buffer *view, int flags) { mmap_object *self = mmap_object_CAST(op); CHECK_VALID(-1); if (PyBuffer_FillInfo(view, op, self->data, self->size, (self->access == ACCESS_READ), flags) < 0) return -1; self->exports++; return 0; } static int mmap_buffer_getbuf(PyObject *op, Py_buffer *view, int flags) { int result; Py_BEGIN_CRITICAL_SECTION(op); result = mmap_buffer_getbuf_lock_held(op, view, flags); Py_END_CRITICAL_SECTION(); return result; } static void mmap_buffer_releasebuf(PyObject *op, Py_buffer *Py_UNUSED(view)) { mmap_object *self = mmap_object_CAST(op); Py_BEGIN_CRITICAL_SECTION(self); self->exports--; Py_END_CRITICAL_SECTION(); } static Py_ssize_t mmap_length_lock_held(PyObject *op) { mmap_object *self = mmap_object_CAST(op); CHECK_VALID(-1); return self->size; } static Py_ssize_t mmap_length(PyObject *op) { Py_ssize_t result; Py_BEGIN_CRITICAL_SECTION(op); result = mmap_length_lock_held(op); Py_END_CRITICAL_SECTION(); return result; } static PyObject * mmap_item_lock_held(PyObject *op, Py_ssize_t i) { mmap_object *self = mmap_object_CAST(op); CHECK_VALID(NULL); if (i < 0 || i >= self->size) { PyErr_SetString(PyExc_IndexError, "mmap index out of range"); return NULL; } char dest; if (safe_byte_copy(&dest, self->data + i) < 0) { return NULL; } return PyBytes_FromStringAndSize(&dest, 1); } static PyObject * mmap_item(PyObject *op, Py_ssize_t i) { PyObject *result; Py_BEGIN_CRITICAL_SECTION(op); result = mmap_item_lock_held(op, i); Py_END_CRITICAL_SECTION(); return result; } static PyObject * mmap_subscript_lock_held(PyObject *op, PyObject *item) { mmap_object *self = mmap_object_CAST(op); CHECK_VALID(NULL); if (PyIndex_Check(item)) { Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError); if (i == -1 && PyErr_Occurred()) return NULL; if (i < 0) i += self->size; if (i < 0 || i >= self->size) { PyErr_SetString(PyExc_IndexError, "mmap index out of range"); return NULL; } CHECK_VALID(NULL); char dest; if (safe_byte_copy(&dest, self->data + i) < 0) { return NULL; } return PyLong_FromLong(Py_CHARMASK(dest)); } else if (PySlice_Check(item)) { Py_ssize_t start, stop, step, slicelen; if (PySlice_Unpack(item, &start, &stop, &step) < 0) { return NULL; } slicelen = PySlice_AdjustIndices(self->size, &start, &stop, step); CHECK_VALID(NULL); if (slicelen <= 0) return Py_GetConstant(Py_CONSTANT_EMPTY_BYTES); else if (step == 1) return _safe_PyBytes_FromStringAndSize(self->data + start, slicelen); else { char *result_buf = (char *)PyMem_Malloc(slicelen); PyObject *result; if (result_buf == NULL) return PyErr_NoMemory(); if (safe_copy_to_slice(result_buf, self->data, start, step, slicelen) < 0) { result = NULL; } else { result = PyBytes_FromStringAndSize(result_buf, slicelen); } PyMem_Free(result_buf); return result; } } else { PyErr_SetString(PyExc_TypeError, "mmap indices must be integers"); return NULL; } } static PyObject * mmap_subscript(PyObject *op, PyObject *item) { PyObject *result; Py_BEGIN_CRITICAL_SECTION(op); result = mmap_subscript_lock_held(op, item); Py_END_CRITICAL_SECTION(); return result; } static int mmap_ass_item_lock_held(PyObject *op, Py_ssize_t i, PyObject *v) { const char *buf; mmap_object *self = mmap_object_CAST(op); CHECK_VALID(-1); if (i < 0 || i >= self->size) { PyErr_SetString(PyExc_IndexError, "mmap index out of range"); return -1; } if (v == NULL) { PyErr_SetString(PyExc_TypeError, "mmap object doesn't support item deletion"); return -1; } if (! (PyBytes_Check(v) && PyBytes_Size(v)==1) ) { PyErr_SetString(PyExc_IndexError, "mmap assignment must be length-1 bytes()"); return -1; } if (!is_writable(self)) return -1; buf = PyBytes_AsString(v); if (safe_byte_copy(self->data + i, buf) < 0) { return -1; } return 0; } static int mmap_ass_item(PyObject *op, Py_ssize_t i, PyObject *v) { int result; Py_BEGIN_CRITICAL_SECTION(op); result = mmap_ass_item_lock_held(op, i, v); Py_END_CRITICAL_SECTION(); return result; } static int mmap_ass_subscript_lock_held(PyObject *op, PyObject *item, PyObject *value) { mmap_object *self = mmap_object_CAST(op); CHECK_VALID(-1); if (!is_writable(self)) return -1; if (PyIndex_Check(item)) { Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError); Py_ssize_t v; if (i == -1 && PyErr_Occurred()) return -1; if (i < 0) i += self->size; if (i < 0 || i >= self->size) { PyErr_SetString(PyExc_IndexError, "mmap index out of range"); return -1; } if (value == NULL) { PyErr_SetString(PyExc_TypeError, "mmap doesn't support item deletion"); return -1; } if (!PyIndex_Check(value)) { PyErr_SetString(PyExc_TypeError, "mmap item value must be an int"); return -1; } v = PyNumber_AsSsize_t(value, PyExc_TypeError); if (v == -1 && PyErr_Occurred()) return -1; if (v < 0 || v > 255) { PyErr_SetString(PyExc_ValueError, "mmap item value must be " "in range(0, 256)"); return -1; } CHECK_VALID(-1); char v_char = (char) v; if (safe_byte_copy(self->data + i, &v_char) < 0) { return -1; } return 0; } else if (PySlice_Check(item)) { Py_ssize_t start, stop, step, slicelen; Py_buffer vbuf; if (PySlice_Unpack(item, &start, &stop, &step) < 0) { return -1; } slicelen = PySlice_AdjustIndices(self->size, &start, &stop, step); if (value == NULL) { PyErr_SetString(PyExc_TypeError, "mmap object doesn't support slice deletion"); return -1; } if (PyObject_GetBuffer(value, &vbuf, PyBUF_SIMPLE) < 0) return -1; if (vbuf.len != slicelen) { PyErr_SetString(PyExc_IndexError, "mmap slice assignment is wrong size"); PyBuffer_Release(&vbuf); return -1; } CHECK_VALID_OR_RELEASE(-1, vbuf); int result = 0; if (slicelen == 0) { } else if (step == 1) { if (safe_memcpy(self->data + start, vbuf.buf, slicelen) < 0) { result = -1; } } else { if (safe_copy_from_slice(self->data, (char *)vbuf.buf, start, step, slicelen) < 0) { result = -1; } } PyBuffer_Release(&vbuf); return result; } else { PyErr_SetString(PyExc_TypeError, "mmap indices must be integer"); return -1; } } static int mmap_ass_subscript(PyObject *op, PyObject *item, PyObject *value) { int result; Py_BEGIN_CRITICAL_SECTION(op); result = mmap_ass_subscript_lock_held(op, item, value); Py_END_CRITICAL_SECTION(); return result; } static PyObject * new_mmap_object(PyTypeObject *type, PyObject *args, PyObject *kwdict); PyDoc_STRVAR(mmap_doc, "Windows: mmap(fileno, length[, tagname[, access[, offset[, trackfd]]]])\n\ \n\ Maps length bytes from the file specified by the file handle fileno,\n\ and returns a mmap object. If length is larger than the current size\n\ of the file, the file is extended to contain length bytes. If length\n\ is 0, the maximum length of the map is the current size of the file,\n\ except that if the file is empty Windows raises an exception (you cannot\n\ create an empty mapping on Windows).\n\ \n\ Unix: mmap(fileno, length[, flags[, prot[, access[, offset[, trackfd]]]]])\n\ \n\ Maps length bytes from the file specified by the file descriptor fileno,\n\ and returns a mmap object. If length is 0, the maximum length of the map\n\ will be the current size of the file when mmap is called.\n\ flags specifies the nature of the mapping. MAP_PRIVATE creates a\n\ private copy-on-write mapping, so changes to the contents of the mmap\n\ object will be private to this process, and MAP_SHARED creates a mapping\n\ that's shared with all other processes mapping the same areas of the file.\n\ The default value is MAP_SHARED.\n\ \n\ To map anonymous memory, pass -1 as the fileno (both versions)."); static PyType_Slot mmap_object_slots[] = { {Py_tp_new, new_mmap_object}, {Py_tp_dealloc, mmap_object_dealloc}, {Py_tp_repr, mmap__repr__method}, {Py_tp_doc, (void *)mmap_doc}, {Py_tp_methods, mmap_object_methods}, {Py_tp_members, mmap_object_members}, {Py_tp_getset, mmap_object_getset}, {Py_tp_getattro, PyObject_GenericGetAttr}, {Py_tp_traverse, _PyObject_VisitType}, /* as sequence */ {Py_sq_length, mmap_length}, {Py_sq_item, mmap_item}, {Py_sq_ass_item, mmap_ass_item}, /* as mapping */ {Py_mp_length, mmap_length}, {Py_mp_subscript, mmap_subscript}, {Py_mp_ass_subscript, mmap_ass_subscript}, /* as buffer */ {Py_bf_getbuffer, mmap_buffer_getbuf}, {Py_bf_releasebuffer, mmap_buffer_releasebuf}, {0, NULL}, }; static PyType_Spec mmap_object_spec = { .name = "mmap.mmap", .basicsize = sizeof(mmap_object), .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_IMMUTABLETYPE), .slots = mmap_object_slots, }; #ifdef UNIX #ifdef HAVE_LARGEFILE_SUPPORT #define _Py_PARSE_OFF_T "L" #else #define _Py_PARSE_OFF_T "l" #endif static PyObject * new_mmap_object(PyTypeObject *type, PyObject *args, PyObject *kwdict) { struct _Py_stat_struct status; int fstat_result = -1; mmap_object *m_obj; Py_ssize_t map_size; off_t offset = 0; int fd, flags = MAP_SHARED, prot = PROT_WRITE | PROT_READ; int devzero = -1; int access = (int)ACCESS_DEFAULT, trackfd = 1; static char *keywords[] = {"fileno", "length", "flags", "prot", "access", "offset", "trackfd", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwdict, "in|iii" _Py_PARSE_OFF_T "$p", keywords, &fd, &map_size, &flags, &prot, &access, &offset, &trackfd)) { return NULL; } if (map_size < 0) { PyErr_SetString(PyExc_OverflowError, "memory mapped length must be positive"); return NULL; } if (offset < 0) { PyErr_SetString(PyExc_OverflowError, "memory mapped offset must be positive"); return NULL; } if ((access != (int)ACCESS_DEFAULT) && ((flags != MAP_SHARED) || (prot != (PROT_WRITE | PROT_READ)))) return PyErr_Format(PyExc_ValueError, "mmap can't specify both access and flags, prot."); switch ((access_mode)access) { case ACCESS_READ: flags = MAP_SHARED; prot = PROT_READ; break; case ACCESS_WRITE: flags = MAP_SHARED; prot = PROT_READ | PROT_WRITE; break; case ACCESS_COPY: flags = MAP_PRIVATE; prot = PROT_READ | PROT_WRITE; break; case ACCESS_DEFAULT: /* map prot to access type */ if ((prot & PROT_READ) && (prot & PROT_WRITE)) { /* ACCESS_DEFAULT */ } else if (prot & PROT_WRITE) { access = ACCESS_WRITE; } else { access = ACCESS_READ; } break; default: return PyErr_Format(PyExc_ValueError, "mmap invalid access parameter."); } if (PySys_Audit("mmap.__new__", "ini" _Py_PARSE_OFF_T, fd, map_size, access, offset) < 0) { return NULL; } #ifdef __APPLE__ /* Issue #11277: fsync(2) is not enough on OS X - a special, OS X specific fcntl(2) is necessary to force DISKSYNC and get around mmap(2) bug */ if (fd != -1) (void)fcntl(fd, F_FULLFSYNC); #endif if (fd != -1) { Py_BEGIN_ALLOW_THREADS fstat_result = _Py_fstat_noraise(fd, &status); Py_END_ALLOW_THREADS } if (fd != -1 && fstat_result == 0 && S_ISREG(status.st_mode)) { if (map_size == 0) { if (status.st_size == 0) { PyErr_SetString(PyExc_ValueError, "cannot mmap an empty file"); return NULL; } if (offset >= status.st_size) { PyErr_SetString(PyExc_ValueError, "mmap offset is greater than file size"); return NULL; } if (status.st_size - offset > PY_SSIZE_T_MAX) { PyErr_SetString(PyExc_ValueError, "mmap length is too large"); return NULL; } map_size = (Py_ssize_t) (status.st_size - offset); } else if (offset > status.st_size || status.st_size - offset < map_size) { PyErr_SetString(PyExc_ValueError, "mmap length is greater than file size"); return NULL; } } m_obj = (mmap_object *)type->tp_alloc(type, 0); if (m_obj == NULL) {return NULL;} m_obj->data = NULL; m_obj->size = map_size; m_obj->pos = 0; m_obj->weakreflist = NULL; m_obj->exports = 0; m_obj->offset = offset; m_obj->trackfd = trackfd; if (fd == -1) { m_obj->fd = -1; /* Assume the caller wants to map anonymous memory. This is the same behaviour as Windows. mmap.mmap(-1, size) on both Windows and Unix map anonymous memory. */ #ifdef MAP_ANONYMOUS /* BSD way to map anonymous memory */ flags |= MAP_ANONYMOUS; /* VxWorks only supports MAP_ANONYMOUS with MAP_PRIVATE flag */ #ifdef __VXWORKS__ flags &= ~MAP_SHARED; flags |= MAP_PRIVATE; #endif #else /* SVR4 method to map anonymous memory is to open /dev/zero */ fd = devzero = _Py_open("/dev/zero", O_RDWR); if (devzero == -1) { Py_DECREF(m_obj); return NULL; } #endif } else if (trackfd) { m_obj->fd = _Py_dup(fd); if (m_obj->fd == -1) { Py_DECREF(m_obj); return NULL; } } else { m_obj->fd = -1; } m_obj->flags = flags; Py_BEGIN_ALLOW_THREADS m_obj->data = mmap(NULL, map_size, prot, flags, fd, offset); Py_END_ALLOW_THREADS int saved_errno = errno; if (devzero != -1) { close(devzero); } if (m_obj->data == (char *)-1) { m_obj->data = NULL; Py_DECREF(m_obj); errno = saved_errno; PyErr_SetFromErrno(PyExc_OSError); return NULL; } #ifdef MAP_ANONYMOUS if (m_obj->flags & MAP_ANONYMOUS) { (void)_PyAnnotateMemoryMap(m_obj->data, map_size, "cpython:mmap"); } #endif m_obj->access = (access_mode)access; return (PyObject *)m_obj; } #endif /* UNIX */ #ifdef MS_WINDOWS /* A note on sizes and offsets: while the actual map size must hold in a Py_ssize_t, both the total file size and the start offset can be longer than a Py_ssize_t, so we use long long which is always 64-bit. */ static PyObject * new_mmap_object(PyTypeObject *type, PyObject *args, PyObject *kwdict) { mmap_object *m_obj; Py_ssize_t map_size; long long offset = 0, size; DWORD off_hi; /* upper 32 bits of offset */ DWORD off_lo; /* lower 32 bits of offset */ DWORD size_hi; /* upper 32 bits of size */ DWORD size_lo; /* lower 32 bits of size */ PyObject *tagname = Py_None; DWORD dwErr = 0; int fileno; HANDLE fh = INVALID_HANDLE_VALUE; int access = (access_mode)ACCESS_DEFAULT; int trackfd = 1; DWORD flProtect, dwDesiredAccess; static char *keywords[] = { "fileno", "length", "tagname", "access", "offset", "trackfd", NULL }; if (!PyArg_ParseTupleAndKeywords(args, kwdict, "in|OiL$p", keywords, &fileno, &map_size, &tagname, &access, &offset, &trackfd)) { return NULL; } if (PySys_Audit("mmap.__new__", "iniL", fileno, map_size, access, offset) < 0) { return NULL; } switch((access_mode)access) { case ACCESS_READ: flProtect = PAGE_READONLY; dwDesiredAccess = FILE_MAP_READ; break; case ACCESS_DEFAULT: case ACCESS_WRITE: flProtect = PAGE_READWRITE; dwDesiredAccess = FILE_MAP_WRITE; break; case ACCESS_COPY: flProtect = PAGE_WRITECOPY; dwDesiredAccess = FILE_MAP_COPY; break; default: return PyErr_Format(PyExc_ValueError, "mmap invalid access parameter."); } if (map_size < 0) { PyErr_SetString(PyExc_OverflowError, "memory mapped length must be positive"); return NULL; } if (offset < 0) { PyErr_SetString(PyExc_OverflowError, "memory mapped offset must be positive"); return NULL; } /* assume -1 and 0 both mean invalid filedescriptor to 'anonymously' map memory. XXX: fileno == 0 is a valid fd, but was accepted prior to 2.5. XXX: Should this code be added? if (fileno == 0) PyErr_WarnEx(PyExc_DeprecationWarning, "don't use 0 for anonymous memory", 1); */ if (fileno != -1 && fileno != 0) { /* Ensure that fileno is within the CRT's valid range */ fh = _Py_get_osfhandle(fileno); if (fh == INVALID_HANDLE_VALUE) return NULL; /* Win9x appears to need us seeked to zero */ lseek(fileno, 0, SEEK_SET); } m_obj = (mmap_object *)type->tp_alloc(type, 0); if (m_obj == NULL) return NULL; /* Set every field to an invalid marker, so we can safely destruct the object in the face of failure */ m_obj->data = NULL; m_obj->file_handle = INVALID_HANDLE_VALUE; m_obj->map_handle = NULL; m_obj->tagname = NULL; m_obj->offset = offset; m_obj->trackfd = trackfd; if (fh != INVALID_HANDLE_VALUE) { if (trackfd) { /* It is necessary to duplicate the handle, so the Python code can close it on us */ if (!DuplicateHandle( GetCurrentProcess(), /* source process handle */ fh, /* handle to be duplicated */ GetCurrentProcess(), /* target proc handle */ &fh, /* result */ 0, /* access - ignored due to options value */ FALSE, /* inherited by child processes? */ DUPLICATE_SAME_ACCESS)) /* options */ { dwErr = GetLastError(); Py_DECREF(m_obj); PyErr_SetFromWindowsErr(dwErr); return NULL; } m_obj->file_handle = fh; } if (!map_size) { DWORD low,high; low = GetFileSize(fh, &high); /* low might just happen to have the value INVALID_FILE_SIZE; so we need to check the last error also. */ if (low == INVALID_FILE_SIZE && (dwErr = GetLastError()) != NO_ERROR) { Py_DECREF(m_obj); return PyErr_SetFromWindowsErr(dwErr); } size = (((long long) high) << 32) + low; if (size == 0) { PyErr_SetString(PyExc_ValueError, "cannot mmap an empty file"); Py_DECREF(m_obj); return NULL; } if (offset >= size) { PyErr_SetString(PyExc_ValueError, "mmap offset is greater than file size"); Py_DECREF(m_obj); return NULL; } if (size - offset > PY_SSIZE_T_MAX) { PyErr_SetString(PyExc_ValueError, "mmap length is too large"); Py_DECREF(m_obj); return NULL; } m_obj->size = (Py_ssize_t) (size - offset); } else { m_obj->size = map_size; size = offset + map_size; } } else { m_obj->size = map_size; size = offset + map_size; } /* set the initial position */ m_obj->pos = (size_t) 0; m_obj->weakreflist = NULL; m_obj->exports = 0; /* set the tag name */ if (!Py_IsNone(tagname)) { if (!PyUnicode_Check(tagname)) { Py_DECREF(m_obj); return PyErr_Format(PyExc_TypeError, "expected str or None for " "'tagname', not %.200s", Py_TYPE(tagname)->tp_name); } m_obj->tagname = PyUnicode_AsWideCharString(tagname, NULL); if (m_obj->tagname == NULL) { Py_DECREF(m_obj); return NULL; } } m_obj->access = (access_mode)access; size_hi = (DWORD)(size >> 32); size_lo = (DWORD)(size & 0xFFFFFFFF); off_hi = (DWORD)(offset >> 32); off_lo = (DWORD)(offset & 0xFFFFFFFF); /* For files, it would be sufficient to pass 0 as size. For anonymous maps, we have to pass the size explicitly. */ m_obj->map_handle = CreateFileMappingW(fh, NULL, flProtect, size_hi, size_lo, m_obj->tagname); if (m_obj->map_handle != NULL) { m_obj->data = (char *) MapViewOfFile(m_obj->map_handle, dwDesiredAccess, off_hi, off_lo, m_obj->size); if (m_obj->data != NULL) return (PyObject *)m_obj; else { dwErr = GetLastError(); CloseHandle(m_obj->map_handle); m_obj->map_handle = NULL; } } else dwErr = GetLastError(); Py_DECREF(m_obj); PyErr_SetFromWindowsErr(dwErr); return NULL; } #endif /* MS_WINDOWS */ static int mmap_exec(PyObject *module) { if (PyModule_AddObjectRef(module, "error", PyExc_OSError) < 0) { return -1; } PyObject *mmap_object_type = PyType_FromModuleAndSpec(module, &mmap_object_spec, NULL); if (mmap_object_type == NULL) { return -1; } int rc = PyModule_AddType(module, (PyTypeObject *)mmap_object_type); Py_DECREF(mmap_object_type); if (rc < 0) { return -1; } #define ADD_INT_MACRO(module, constant) \ do { \ if (PyModule_AddIntConstant(module, #constant, constant) < 0) { \ return -1; \ } \ } while (0) #ifdef PROT_EXEC ADD_INT_MACRO(module, PROT_EXEC); #endif #ifdef PROT_READ ADD_INT_MACRO(module, PROT_READ); #endif #ifdef PROT_WRITE ADD_INT_MACRO(module, PROT_WRITE); #endif #ifdef MAP_SHARED ADD_INT_MACRO(module, MAP_SHARED); #endif #ifdef MAP_PRIVATE ADD_INT_MACRO(module, MAP_PRIVATE); #endif #ifdef MAP_DENYWRITE ADD_INT_MACRO(module, MAP_DENYWRITE); #endif #ifdef MAP_EXECUTABLE ADD_INT_MACRO(module, MAP_EXECUTABLE); #endif #ifdef MAP_ANONYMOUS if (PyModule_AddIntConstant(module, "MAP_ANON", MAP_ANONYMOUS) < 0 ) { return -1; } ADD_INT_MACRO(module, MAP_ANONYMOUS); #endif #ifdef MAP_POPULATE ADD_INT_MACRO(module, MAP_POPULATE); #endif #ifdef MAP_STACK // Mostly a no-op on Linux and NetBSD, but useful on OpenBSD // for stack usage (even on x86 arch) ADD_INT_MACRO(module, MAP_STACK); #endif #ifdef MAP_ALIGNED_SUPER ADD_INT_MACRO(module, MAP_ALIGNED_SUPER); #endif #ifdef MAP_CONCEAL ADD_INT_MACRO(module, MAP_CONCEAL); #endif #ifdef MAP_NORESERVE ADD_INT_MACRO(module, MAP_NORESERVE); #endif #ifdef MAP_NOEXTEND ADD_INT_MACRO(module, MAP_NOEXTEND); #endif #ifdef MAP_HASSEMAPHORE ADD_INT_MACRO(module, MAP_HASSEMAPHORE); #endif #ifdef MAP_NOCACHE ADD_INT_MACRO(module, MAP_NOCACHE); #endif #ifdef MAP_JIT ADD_INT_MACRO(module, MAP_JIT); #endif #ifdef MAP_RESILIENT_CODESIGN ADD_INT_MACRO(module, MAP_RESILIENT_CODESIGN); #endif #ifdef MAP_RESILIENT_MEDIA ADD_INT_MACRO(module, MAP_RESILIENT_MEDIA); #endif #ifdef MAP_32BIT ADD_INT_MACRO(module, MAP_32BIT); #endif #ifdef MAP_TRANSLATED_ALLOW_EXECUTE ADD_INT_MACRO(module, MAP_TRANSLATED_ALLOW_EXECUTE); #endif #ifdef MAP_UNIX03 ADD_INT_MACRO(module, MAP_UNIX03); #endif #ifdef MAP_TPRO ADD_INT_MACRO(module, MAP_TPRO); #endif if (PyModule_AddIntConstant(module, "PAGESIZE", (long)my_getpagesize()) < 0 ) { return -1; } if (PyModule_AddIntConstant(module, "ALLOCATIONGRANULARITY", (long)my_getallocationgranularity()) < 0 ) { return -1; } ADD_INT_MACRO(module, ACCESS_DEFAULT); ADD_INT_MACRO(module, ACCESS_READ); ADD_INT_MACRO(module, ACCESS_WRITE); ADD_INT_MACRO(module, ACCESS_COPY); #ifdef MS_INVALIDATE ADD_INT_MACRO(module, MS_INVALIDATE); #endif #ifdef MS_ASYNC ADD_INT_MACRO(module, MS_ASYNC); #endif #ifdef MS_SYNC ADD_INT_MACRO(module, MS_SYNC); #endif #ifdef HAVE_MADVISE // Conventional advice values #ifdef MADV_NORMAL ADD_INT_MACRO(module, MADV_NORMAL); #endif #ifdef MADV_RANDOM ADD_INT_MACRO(module, MADV_RANDOM); #endif #ifdef MADV_SEQUENTIAL ADD_INT_MACRO(module, MADV_SEQUENTIAL); #endif #ifdef MADV_WILLNEED ADD_INT_MACRO(module, MADV_WILLNEED); #endif #ifdef MADV_DONTNEED ADD_INT_MACRO(module, MADV_DONTNEED); #endif // Linux-specific advice values #ifdef MADV_REMOVE ADD_INT_MACRO(module, MADV_REMOVE); #endif #ifdef MADV_DONTFORK ADD_INT_MACRO(module, MADV_DONTFORK); #endif #ifdef MADV_DOFORK ADD_INT_MACRO(module, MADV_DOFORK); #endif #ifdef MADV_HWPOISON ADD_INT_MACRO(module, MADV_HWPOISON); #endif #ifdef MADV_MERGEABLE ADD_INT_MACRO(module, MADV_MERGEABLE); #endif #ifdef MADV_UNMERGEABLE ADD_INT_MACRO(module, MADV_UNMERGEABLE); #endif #ifdef MADV_SOFT_OFFLINE ADD_INT_MACRO(module, MADV_SOFT_OFFLINE); #endif #ifdef MADV_HUGEPAGE ADD_INT_MACRO(module, MADV_HUGEPAGE); #endif #ifdef MADV_NOHUGEPAGE ADD_INT_MACRO(module, MADV_NOHUGEPAGE); #endif #ifdef MADV_DONTDUMP ADD_INT_MACRO(module, MADV_DONTDUMP); #endif #ifdef MADV_DODUMP ADD_INT_MACRO(module, MADV_DODUMP); #endif #ifdef MADV_FREE // (Also present on FreeBSD and macOS.) ADD_INT_MACRO(module, MADV_FREE); #endif // FreeBSD-specific #ifdef MADV_NOSYNC ADD_INT_MACRO(module, MADV_NOSYNC); #endif #ifdef MADV_AUTOSYNC ADD_INT_MACRO(module, MADV_AUTOSYNC); #endif #ifdef MADV_NOCORE ADD_INT_MACRO(module, MADV_NOCORE); #endif #ifdef MADV_CORE ADD_INT_MACRO(module, MADV_CORE); #endif #ifdef MADV_PROTECT ADD_INT_MACRO(module, MADV_PROTECT); #endif // Darwin-specific #ifdef MADV_FREE_REUSABLE // (As MADV_FREE but reclaims more faithful for task_info/Activity Monitor...) ADD_INT_MACRO(module, MADV_FREE_REUSABLE); #endif #ifdef MADV_FREE_REUSE // (Reuse pages previously tagged as reusable) ADD_INT_MACRO(module, MADV_FREE_REUSE); #endif #endif // HAVE_MADVISE return 0; } static PyModuleDef_Slot mmap_slots[] = { {Py_mod_exec, mmap_exec}, {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, {Py_mod_gil, Py_MOD_GIL_NOT_USED}, {0, NULL} }; static struct PyModuleDef mmapmodule = { .m_base = PyModuleDef_HEAD_INIT, .m_name = "mmap", .m_size = 0, .m_slots = mmap_slots, }; PyMODINIT_FUNC PyInit_mmap(void) { return PyModuleDef_Init(&mmapmodule); } /* UNIX group file access module */ // Argument Clinic uses the internal C API #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 #endif #include "Python.h" #include "posixmodule.h" #include // ERANGE #include // getgrgid_r() #include // memcpy() #include // sysconf() #include "clinic/grpmodule.c.h" /*[clinic input] module grp [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=cade63f2ed1bd9f8]*/ static PyStructSequence_Field struct_group_type_fields[] = { {"gr_name", "group name"}, {"gr_passwd", "password"}, {"gr_gid", "group id"}, {"gr_mem", "group members"}, {0} }; PyDoc_STRVAR(struct_group__doc__, "grp.struct_group: Results from getgr*() routines.\n\n\ This object may be accessed either as a tuple of\n\ (gr_name,gr_passwd,gr_gid,gr_mem)\n\ or via the object attributes as named in the above tuple.\n"); static PyStructSequence_Desc struct_group_type_desc = { "grp.struct_group", struct_group__doc__, struct_group_type_fields, 4, }; typedef struct { PyTypeObject *StructGrpType; } grpmodulestate; static inline grpmodulestate* get_grp_state(PyObject *module) { void *state = PyModule_GetState(module); assert(state != NULL); return (grpmodulestate *)state; } static struct PyModuleDef grpmodule; /* Mutex to protect calls to getgrgid(), getgrnam(), and getgrent(). * These functions return pointer to static data structure, which * may be overwritten by any subsequent calls. */ static PyMutex group_db_mutex = {0}; #define DEFAULT_BUFFER_SIZE 1024 static PyObject * mkgrent(PyObject *module, struct group *p) { int setIndex = 0; PyObject *v, *w; char **member; v = PyStructSequence_New(get_grp_state(module)->StructGrpType); if (v == NULL) return NULL; if ((w = PyList_New(0)) == NULL) { Py_DECREF(v); return NULL; } for (member = p->gr_mem; ; member++) { char *group_member; // member can be misaligned memcpy(&group_member, member, sizeof(group_member)); if (group_member == NULL) { break; } PyObject *x = PyUnicode_DecodeFSDefault(group_member); if (x == NULL || PyList_Append(w, x) != 0) { Py_XDECREF(x); Py_DECREF(w); Py_DECREF(v); return NULL; } Py_DECREF(x); } #define SET(i,val) PyStructSequence_SetItem(v, i, val) SET(setIndex++, PyUnicode_DecodeFSDefault(p->gr_name)); if (p->gr_passwd) SET(setIndex++, PyUnicode_DecodeFSDefault(p->gr_passwd)); else { SET(setIndex++, Py_None); Py_INCREF(Py_None); } SET(setIndex++, _PyLong_FromGid(p->gr_gid)); SET(setIndex++, w); #undef SET if (PyErr_Occurred()) { Py_DECREF(v); return NULL; } return v; } /*[clinic input] grp.getgrgid id: object Return the group database entry for the given numeric group ID. If id is not valid, raise KeyError. [clinic start generated code]*/ static PyObject * grp_getgrgid_impl(PyObject *module, PyObject *id) /*[clinic end generated code: output=30797c289504a1ba input=15fa0e2ccf5cda25]*/ { PyObject *retval = NULL; int nomem = 0; char *buf = NULL, *buf2 = NULL; gid_t gid; struct group *p; if (!_Py_Gid_Converter(id, &gid)) { return NULL; } #ifdef HAVE_GETGRGID_R int status; Py_ssize_t bufsize; /* Note: 'grp' will be used via pointer 'p' on getgrgid_r success. */ struct group grp; Py_BEGIN_ALLOW_THREADS bufsize = sysconf(_SC_GETGR_R_SIZE_MAX); if (bufsize == -1) { bufsize = DEFAULT_BUFFER_SIZE; } while (1) { buf2 = PyMem_RawRealloc(buf, bufsize); if (buf2 == NULL) { p = NULL; nomem = 1; break; } buf = buf2; status = getgrgid_r(gid, &grp, buf, bufsize, &p); if (status != 0) { p = NULL; } if (p != NULL || status != ERANGE) { break; } if (bufsize > (PY_SSIZE_T_MAX >> 1)) { nomem = 1; break; } bufsize <<= 1; } Py_END_ALLOW_THREADS #else PyMutex_Lock(&group_db_mutex); // The getgrgid() function need not be thread-safe. // https://pubs.opengroup.org/onlinepubs/9699919799/functions/getgrgid.html p = getgrgid(gid); #endif if (p == NULL) { #ifndef HAVE_GETGRGID_R PyMutex_Unlock(&group_db_mutex); #endif PyMem_RawFree(buf); if (nomem == 1) { return PyErr_NoMemory(); } PyObject *gid_obj = _PyLong_FromGid(gid); if (gid_obj == NULL) return NULL; PyErr_Format(PyExc_KeyError, "getgrgid(): gid not found: %S", gid_obj); Py_DECREF(gid_obj); return NULL; } retval = mkgrent(module, p); #ifdef HAVE_GETGRGID_R PyMem_RawFree(buf); #else PyMutex_Unlock(&group_db_mutex); #endif return retval; } /*[clinic input] grp.getgrnam name: unicode Return the group database entry for the given group name. If name is not valid, raise KeyError. [clinic start generated code]*/ static PyObject * grp_getgrnam_impl(PyObject *module, PyObject *name) /*[clinic end generated code: output=67905086f403c21c input=08ded29affa3c863]*/ { char *buf = NULL, *buf2 = NULL, *name_chars; int nomem = 0; struct group *p; PyObject *bytes, *retval = NULL; if ((bytes = PyUnicode_EncodeFSDefault(name)) == NULL) return NULL; /* check for embedded null bytes */ if (PyBytes_AsStringAndSize(bytes, &name_chars, NULL) == -1) goto out; #ifdef HAVE_GETGRNAM_R int status; Py_ssize_t bufsize; /* Note: 'grp' will be used via pointer 'p' on getgrnam_r success. */ struct group grp; Py_BEGIN_ALLOW_THREADS bufsize = sysconf(_SC_GETGR_R_SIZE_MAX); if (bufsize == -1) { bufsize = DEFAULT_BUFFER_SIZE; } while(1) { buf2 = PyMem_RawRealloc(buf, bufsize); if (buf2 == NULL) { p = NULL; nomem = 1; break; } buf = buf2; status = getgrnam_r(name_chars, &grp, buf, bufsize, &p); if (status != 0) { p = NULL; } if (p != NULL || status != ERANGE) { break; } if (bufsize > (PY_SSIZE_T_MAX >> 1)) { nomem = 1; break; } bufsize <<= 1; } Py_END_ALLOW_THREADS #else PyMutex_Lock(&group_db_mutex); // The getgrnam() function need not be thread-safe. // https://pubs.opengroup.org/onlinepubs/9699919799/functions/getgrnam.html p = getgrnam(name_chars); #endif if (p == NULL) { #ifndef HAVE_GETGRNAM_R PyMutex_Unlock(&group_db_mutex); #endif if (nomem == 1) { PyErr_NoMemory(); } else { PyErr_Format(PyExc_KeyError, "getgrnam(): name not found: %R", name); } goto out; } retval = mkgrent(module, p); #ifndef HAVE_GETGRNAM_R PyMutex_Unlock(&group_db_mutex); #endif out: PyMem_RawFree(buf); Py_DECREF(bytes); return retval; } /*[clinic input] grp.getgrall Return a list of all available group entries, in arbitrary order. An entry whose name starts with '+' or '-' represents an instruction to use YP/NIS and may not be accessible via getgrnam or getgrgid. [clinic start generated code]*/ static PyObject * grp_getgrall_impl(PyObject *module) /*[clinic end generated code: output=585dad35e2e763d7 input=d7df76c825c367df]*/ { PyObject *d = PyList_New(0); if (d == NULL) { return NULL; } PyMutex_Lock(&group_db_mutex); setgrent(); struct group *p; while ((p = getgrent()) != NULL) { // gh-126316: Don't release the mutex around mkgrent() since // setgrent()/endgrent() are not reentrant / thread-safe. A deadlock // is unlikely since mkgrent() should not be able to call arbitrary // Python code. PyObject *v = mkgrent(module, p); if (v == NULL || PyList_Append(d, v) != 0) { Py_XDECREF(v); Py_CLEAR(d); goto done; } Py_DECREF(v); } done: endgrent(); PyMutex_Unlock(&group_db_mutex); return d; } static PyMethodDef grp_methods[] = { GRP_GETGRGID_METHODDEF GRP_GETGRNAM_METHODDEF GRP_GETGRALL_METHODDEF {NULL, NULL} }; PyDoc_STRVAR(grp__doc__, "Access to the Unix group database.\n\ \n\ Group entries are reported as 4-tuples containing the following fields\n\ from the group database, in order:\n\ \n\ gr_name - name of the group\n\ gr_passwd - group password (encrypted); often empty\n\ gr_gid - numeric ID of the group\n\ gr_mem - list of members\n\ \n\ The gid is an integer, name and password are strings. (Note that most\n\ users are not explicitly listed as members of the groups they are in\n\ according to the password database. Check both databases to get\n\ complete membership information.)"); static int grpmodule_exec(PyObject *module) { grpmodulestate *state = get_grp_state(module); state->StructGrpType = PyStructSequence_NewType(&struct_group_type_desc); if (state->StructGrpType == NULL) { return -1; } if (PyModule_AddType(module, state->StructGrpType) < 0) { return -1; } return 0; } static PyModuleDef_Slot grpmodule_slots[] = { {Py_mod_exec, grpmodule_exec}, {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, {Py_mod_gil, Py_MOD_GIL_NOT_USED}, {0, NULL} }; static int grpmodule_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(get_grp_state(m)->StructGrpType); return 0; } static int grpmodule_clear(PyObject *m) { Py_CLEAR(get_grp_state(m)->StructGrpType); return 0; } static void grpmodule_free(void *m) { grpmodule_clear((PyObject *)m); } static struct PyModuleDef grpmodule = { PyModuleDef_HEAD_INIT, .m_name = "grp", .m_doc = grp__doc__, .m_size = sizeof(grpmodulestate), .m_methods = grp_methods, .m_slots = grpmodule_slots, .m_traverse = grpmodule_traverse, .m_clear = grpmodule_clear, .m_free = grpmodule_free, }; PyMODINIT_FUNC PyInit_grp(void) { return PyModuleDef_Init(&grpmodule); } /* zlibmodule.c -- gzip-compatible data compression */ /* See http://zlib.net/ */ /* Windows users: read Python's PCbuild\readme.txt */ #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 #endif #include "Python.h" #include "pycore_pyatomic_ft_wrappers.h" // FT_ATOMIC_STORE_CHAR_RELAXED #include "zlib.h" #include "stdbool.h" #include // offsetof() #if defined(ZLIB_VERNUM) && ZLIB_VERNUM < 0x1221 #error "At least zlib version 1.2.2.1 is required" #endif #if (SIZEOF_OFF_T == SIZEOF_SIZE_T) # define convert_to_z_off_t PyLong_AsSsize_t #elif (SIZEOF_OFF_T == SIZEOF_LONG_LONG) # define convert_to_z_off_t PyLong_AsLongLong #elif (SIZEOF_OFF_T == SIZEOF_LONG) # define convert_to_z_off_t PyLong_AsLong #else # error off_t does not match either size_t, long, or long long! #endif // Blocks output buffer wrappers #include "pycore_blocks_output_buffer.h" #if OUTPUT_BUFFER_MAX_BLOCK_SIZE > UINT32_MAX #error "The maximum block size accepted by zlib is UINT32_MAX." #endif /* On success, return value >= 0 On failure, return -1 */ static inline Py_ssize_t OutputBuffer_InitAndGrow(_BlocksOutputBuffer *buffer, Py_ssize_t max_length, Bytef **next_out, uint32_t *avail_out) { Py_ssize_t allocated; allocated = _BlocksOutputBuffer_InitAndGrow( buffer, max_length, (void**) next_out); *avail_out = (uint32_t) allocated; return allocated; } /* On success, return value >= 0 On failure, return -1 */ static inline Py_ssize_t OutputBuffer_Grow(_BlocksOutputBuffer *buffer, Bytef **next_out, uint32_t *avail_out) { Py_ssize_t allocated; allocated = _BlocksOutputBuffer_Grow( buffer, (void**) next_out, (Py_ssize_t) *avail_out); *avail_out = (uint32_t) allocated; return allocated; } static inline Py_ssize_t OutputBuffer_GetDataSize(_BlocksOutputBuffer *buffer, uint32_t avail_out) { return _BlocksOutputBuffer_GetDataSize(buffer, (Py_ssize_t) avail_out); } static inline PyObject * OutputBuffer_Finish(_BlocksOutputBuffer *buffer, uint32_t avail_out) { return _BlocksOutputBuffer_Finish(buffer, (Py_ssize_t) avail_out); } static inline void OutputBuffer_OnError(_BlocksOutputBuffer *buffer) { _BlocksOutputBuffer_OnError(buffer); } /* The max buffer size accepted by zlib is UINT32_MAX, the initial buffer size `init_size` may > it in 64-bit build. These wrapper functions maintain an UINT32_MAX sliding window for the first block: 1. OutputBuffer_WindowInitWithSize() 2. OutputBuffer_WindowGrow() 3. OutputBuffer_WindowFinish() 4. OutputBuffer_WindowOnError() ==== is the sliding window: 1. ====------ ^ next_posi, left_bytes is 6 2. ----====-- ^ next_posi, left_bytes is 2 3. --------== ^ next_posi, left_bytes is 0 */ typedef struct { Py_ssize_t left_bytes; Bytef *next_posi; } _Uint32Window; /* Initialize the buffer with an initial buffer size. On success, return value >= 0 On failure, return value < 0 */ static inline Py_ssize_t OutputBuffer_WindowInitWithSize(_BlocksOutputBuffer *buffer, _Uint32Window *window, Py_ssize_t init_size, Bytef **next_out, uint32_t *avail_out) { Py_ssize_t allocated = _BlocksOutputBuffer_InitWithSize( buffer, init_size, (void**) next_out); if (allocated >= 0) { // the UINT32_MAX sliding window Py_ssize_t window_size = Py_MIN((size_t)allocated, UINT32_MAX); *avail_out = (uint32_t) window_size; window->left_bytes = allocated - window_size; window->next_posi = *next_out + window_size; } return allocated; } /* Grow the buffer. On success, return value >= 0 On failure, return value < 0 */ static inline Py_ssize_t OutputBuffer_WindowGrow(_BlocksOutputBuffer *buffer, _Uint32Window *window, Bytef **next_out, uint32_t *avail_out) { Py_ssize_t allocated; /* ensure no gaps in the data. if inlined, this check could be optimized away.*/ if (*avail_out != 0) { PyErr_SetString(PyExc_SystemError, "*avail_out != 0 in OutputBuffer_WindowGrow()."); return -1; } // slide the UINT32_MAX sliding window if (window->left_bytes > 0) { Py_ssize_t window_size = Py_MIN((size_t)window->left_bytes, UINT32_MAX); *next_out = window->next_posi; *avail_out = (uint32_t) window_size; window->left_bytes -= window_size; window->next_posi += window_size; return window_size; } assert(window->left_bytes == 0); // only the first block may > UINT32_MAX allocated = _BlocksOutputBuffer_Grow( buffer, (void**) next_out, (Py_ssize_t) *avail_out); *avail_out = (uint32_t) allocated; return allocated; } /* Finish the buffer. On success, return a bytes object On failure, return NULL */ static inline PyObject * OutputBuffer_WindowFinish(_BlocksOutputBuffer *buffer, _Uint32Window *window, uint32_t avail_out) { Py_ssize_t real_avail_out = (Py_ssize_t) avail_out + window->left_bytes; return _BlocksOutputBuffer_Finish(buffer, real_avail_out); } static inline void OutputBuffer_WindowOnError(_BlocksOutputBuffer *buffer, _Uint32Window *window) { _BlocksOutputBuffer_OnError(buffer); } /* The following parameters are copied from zutil.h, version 0.95 */ #define DEFLATED 8 #if MAX_MEM_LEVEL >= 8 # define DEF_MEM_LEVEL 8 #else # define DEF_MEM_LEVEL MAX_MEM_LEVEL #endif /* Initial buffer size. */ #define DEF_BUF_SIZE (16*1024) #define DEF_MAX_INITIAL_BUF_SIZE (16 * 1024 * 1024) static PyModuleDef zlibmodule; typedef struct { PyTypeObject *Comptype; PyTypeObject *Decomptype; PyTypeObject *ZlibDecompressorType; PyObject *ZlibError; } zlibstate; static inline zlibstate* get_zlib_state(PyObject *module) { void *state = PyModule_GetState(module); assert(state != NULL); return (zlibstate *)state; } typedef struct { PyObject_HEAD z_stream zst; PyObject *unused_data; PyObject *unconsumed_tail; char eof; bool is_initialised; PyObject *zdict; PyMutex mutex; } compobject; #define _compobject_CAST(op) ((compobject *)op) static void zlib_error(zlibstate *state, z_stream zst, int err, const char *msg) { const char *zmsg = Z_NULL; /* In case of a version mismatch, zst.msg won't be initialized. Check for this case first, before looking at zst.msg. */ if (err == Z_VERSION_ERROR) zmsg = "library version mismatch"; if (zmsg == Z_NULL) zmsg = zst.msg; if (zmsg == Z_NULL) { switch (err) { case Z_BUF_ERROR: zmsg = "incomplete or truncated stream"; break; case Z_STREAM_ERROR: zmsg = "inconsistent stream state"; break; case Z_DATA_ERROR: zmsg = "invalid input data"; break; } } if (zmsg == Z_NULL) PyErr_Format(state->ZlibError, "Error %d %s", err, msg); else PyErr_Format(state->ZlibError, "Error %d %s: %.200s", err, msg, zmsg); } /*[clinic input] module zlib class zlib.Compress "compobject *" "&Comptype" class zlib.Decompress "compobject *" "&Decomptype" [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=093935115c3e3158]*/ static compobject * newcompobject(PyTypeObject *type) { compobject *self; assert(type != NULL); assert(type->tp_alloc != NULL); self = _compobject_CAST(type->tp_alloc(type, 0)); if (self == NULL) return NULL; self->eof = 0; self->is_initialised = 0; self->zdict = NULL; self->unused_data = Py_GetConstant(Py_CONSTANT_EMPTY_BYTES); if (self->unused_data == NULL) { Py_DECREF(self); return NULL; } self->unconsumed_tail = Py_GetConstant(Py_CONSTANT_EMPTY_BYTES); if (self->unconsumed_tail == NULL) { Py_DECREF(self); return NULL; } self->mutex = (PyMutex){0}; return self; } static void* PyZlib_Malloc(voidpf ctx, uInt items, uInt size) { if (size != 0 && items > (size_t)PY_SSIZE_T_MAX / size) return NULL; /* PyMem_Malloc() cannot be used: the GIL is not held when inflate() and deflate() are called */ return PyMem_RawMalloc((size_t)items * (size_t)size); } static void PyZlib_Free(voidpf ctx, void *ptr) { PyMem_RawFree(ptr); } static void arrange_input_buffer(z_stream *zst, Py_ssize_t *remains) { zst->avail_in = (uInt)Py_MIN((size_t)*remains, UINT_MAX); *remains -= zst->avail_in; } /*[clinic input] zlib.compress data: Py_buffer Binary data to be compressed. / level: int(c_default="Z_DEFAULT_COMPRESSION") = Z_DEFAULT_COMPRESSION Compression level, in 0-9 or -1. wbits: int(c_default="MAX_WBITS") = MAX_WBITS The window buffer size and container format. Returns a bytes object containing compressed data. [clinic start generated code]*/ static PyObject * zlib_compress_impl(PyObject *module, Py_buffer *data, int level, int wbits) /*[clinic end generated code: output=46bd152fadd66df2 input=c4d06ee5782a7e3f]*/ { PyObject *return_value; int flush; z_stream zst; _BlocksOutputBuffer buffer = {.writer = NULL}; zlibstate *state = get_zlib_state(module); Byte *ibuf = data->buf; Py_ssize_t ibuflen = data->len; if (OutputBuffer_InitAndGrow(&buffer, -1, &zst.next_out, &zst.avail_out) < 0) { goto error; } zst.opaque = NULL; zst.zalloc = PyZlib_Malloc; zst.zfree = PyZlib_Free; zst.next_in = ibuf; int err = deflateInit2(&zst, level, DEFLATED, wbits, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY); switch (err) { case Z_OK: break; case Z_MEM_ERROR: PyErr_SetString(PyExc_MemoryError, "Out of memory while compressing data"); goto error; case Z_STREAM_ERROR: PyErr_SetString(state->ZlibError, "Bad compression level"); goto error; default: deflateEnd(&zst); zlib_error(state, zst, err, "while compressing data"); goto error; } do { arrange_input_buffer(&zst, &ibuflen); flush = ibuflen == 0 ? Z_FINISH : Z_NO_FLUSH; do { if (zst.avail_out == 0) { if (OutputBuffer_Grow(&buffer, &zst.next_out, &zst.avail_out) < 0) { deflateEnd(&zst); goto error; } } Py_BEGIN_ALLOW_THREADS err = deflate(&zst, flush); Py_END_ALLOW_THREADS if (err == Z_STREAM_ERROR) { deflateEnd(&zst); zlib_error(state, zst, err, "while compressing data"); goto error; } } while (zst.avail_out == 0); assert(zst.avail_in == 0); } while (flush != Z_FINISH); assert(err == Z_STREAM_END); err = deflateEnd(&zst); if (err == Z_OK) { return_value = OutputBuffer_Finish(&buffer, zst.avail_out); if (return_value == NULL) { goto error; } return return_value; } else zlib_error(state, zst, err, "while finishing compression"); error: OutputBuffer_OnError(&buffer); return NULL; } /*[clinic input] zlib.decompress data: Py_buffer Compressed data. / wbits: int(c_default="MAX_WBITS") = MAX_WBITS The window buffer size and container format. bufsize: Py_ssize_t(c_default="DEF_BUF_SIZE", allow_negative=False) = DEF_BUF_SIZE The initial output buffer size. Returns a bytes object containing the uncompressed data. [clinic start generated code]*/ static PyObject * zlib_decompress_impl(PyObject *module, Py_buffer *data, int wbits, Py_ssize_t bufsize) /*[clinic end generated code: output=77c7e35111dc8c42 input=530077065b3a2233]*/ { PyObject *return_value; Byte *ibuf; Py_ssize_t ibuflen; int err, flush; z_stream zst; _BlocksOutputBuffer buffer = {.writer = NULL}; _Uint32Window window; // output buffer's UINT32_MAX sliding window zlibstate *state = get_zlib_state(module); if (bufsize == 0) { bufsize = 1; } if (OutputBuffer_WindowInitWithSize(&buffer, &window, bufsize, &zst.next_out, &zst.avail_out) < 0) { goto error; } ibuf = data->buf; ibuflen = data->len; zst.opaque = NULL; zst.zalloc = PyZlib_Malloc; zst.zfree = PyZlib_Free; zst.avail_in = 0; zst.next_in = ibuf; err = inflateInit2(&zst, wbits); switch (err) { case Z_OK: break; case Z_MEM_ERROR: PyErr_SetString(PyExc_MemoryError, "Out of memory while decompressing data"); goto error; default: inflateEnd(&zst); zlib_error(state, zst, err, "while preparing to decompress data"); goto error; } do { arrange_input_buffer(&zst, &ibuflen); flush = ibuflen == 0 ? Z_FINISH : Z_NO_FLUSH; do { if (zst.avail_out == 0) { if (OutputBuffer_WindowGrow(&buffer, &window, &zst.next_out, &zst.avail_out) < 0) { inflateEnd(&zst); goto error; } } Py_BEGIN_ALLOW_THREADS err = inflate(&zst, flush); Py_END_ALLOW_THREADS switch (err) { case Z_OK: _Py_FALLTHROUGH; case Z_BUF_ERROR: _Py_FALLTHROUGH; case Z_STREAM_END: break; case Z_MEM_ERROR: inflateEnd(&zst); PyErr_SetString(PyExc_MemoryError, "Out of memory while decompressing data"); goto error; default: inflateEnd(&zst); zlib_error(state, zst, err, "while decompressing data"); goto error; } } while (zst.avail_out == 0); } while (err != Z_STREAM_END && ibuflen != 0); if (err != Z_STREAM_END) { inflateEnd(&zst); zlib_error(state, zst, err, "while decompressing data"); goto error; } err = inflateEnd(&zst); if (err != Z_OK) { zlib_error(state, zst, err, "while finishing decompression"); goto error; } return_value = OutputBuffer_WindowFinish(&buffer, &window, zst.avail_out); if (return_value != NULL) { return return_value; } error: OutputBuffer_WindowOnError(&buffer, &window); return NULL; } /*[clinic input] zlib.compressobj level: int(c_default="Z_DEFAULT_COMPRESSION") = Z_DEFAULT_COMPRESSION The compression level (an integer in the range 0-9 or -1; default is currently equivalent to 6). Higher compression levels are slower, but produce smaller results. method: int(c_default="DEFLATED") = DEFLATED The compression algorithm. If given, this must be DEFLATED. wbits: int(c_default="MAX_WBITS") = MAX_WBITS +9 to +15: The base-two logarithm of the window size. Include a zlib container. -9 to -15: Generate a raw stream. +25 to +31: Include a gzip container. memLevel: int(c_default="DEF_MEM_LEVEL") = DEF_MEM_LEVEL Controls the amount of memory used for internal compression state. Valid values range from 1 to 9. Higher values result in higher memory usage, faster compression, and smaller output. strategy: int(c_default="Z_DEFAULT_STRATEGY") = Z_DEFAULT_STRATEGY Used to tune the compression algorithm. Possible values are Z_DEFAULT_STRATEGY, Z_FILTERED, and Z_HUFFMAN_ONLY. zdict: Py_buffer = None The predefined compression dictionary - a sequence of bytes containing subsequences that are likely to occur in the input data. Return a compressor object. [clinic start generated code]*/ static PyObject * zlib_compressobj_impl(PyObject *module, int level, int method, int wbits, int memLevel, int strategy, Py_buffer *zdict) /*[clinic end generated code: output=8b5bed9c8fc3814d input=2fa3d026f90ab8d5]*/ { zlibstate *state = get_zlib_state(module); if (zdict->buf != NULL && (size_t)zdict->len > UINT_MAX) { PyErr_SetString(PyExc_OverflowError, "zdict length does not fit in an unsigned int"); return NULL; } compobject *self = newcompobject(state->Comptype); if (self == NULL) goto error; self->zst.opaque = NULL; self->zst.zalloc = PyZlib_Malloc; self->zst.zfree = PyZlib_Free; self->zst.next_in = NULL; self->zst.avail_in = 0; int err = deflateInit2(&self->zst, level, method, wbits, memLevel, strategy); switch (err) { case Z_OK: self->is_initialised = 1; if (zdict->buf == NULL) { goto success; } else { err = deflateSetDictionary(&self->zst, zdict->buf, (unsigned int)zdict->len); switch (err) { case Z_OK: goto success; case Z_STREAM_ERROR: PyErr_SetString(PyExc_ValueError, "Invalid dictionary"); goto error; default: PyErr_SetString(PyExc_ValueError, "deflateSetDictionary()"); goto error; } } case Z_MEM_ERROR: PyErr_SetString(PyExc_MemoryError, "Can't allocate memory for compression object"); goto error; case Z_STREAM_ERROR: PyErr_SetString(PyExc_ValueError, "Invalid initialization option"); goto error; default: zlib_error(state, self->zst, err, "while creating compression object"); goto error; } error: Py_CLEAR(self); success: return (PyObject *)self; } static int set_inflate_zdict(zlibstate *state, compobject *self) { Py_buffer zdict_buf; if (PyObject_GetBuffer(self->zdict, &zdict_buf, PyBUF_SIMPLE) == -1) { return -1; } if ((size_t)zdict_buf.len > UINT_MAX) { PyErr_SetString(PyExc_OverflowError, "zdict length does not fit in an unsigned int"); PyBuffer_Release(&zdict_buf); return -1; } int err; err = inflateSetDictionary(&self->zst, zdict_buf.buf, (unsigned int)zdict_buf.len); PyBuffer_Release(&zdict_buf); if (err != Z_OK) { zlib_error(state, self->zst, err, "while setting zdict"); return -1; } return 0; } /*[clinic input] zlib.decompressobj wbits: int(c_default="MAX_WBITS") = MAX_WBITS The window buffer size and container format. zdict: object(c_default="NULL") = b'' The predefined compression dictionary. This must be the same dictionary as used by the compressor that produced the input data. Return a decompressor object. [clinic start generated code]*/ static PyObject * zlib_decompressobj_impl(PyObject *module, int wbits, PyObject *zdict) /*[clinic end generated code: output=3069b99994f36906 input=d3832b8511fc977b]*/ { zlibstate *state = get_zlib_state(module); if (zdict != NULL && !PyObject_CheckBuffer(zdict)) { PyErr_SetString(PyExc_TypeError, "zdict argument must support the buffer protocol"); return NULL; } compobject *self = newcompobject(state->Decomptype); if (self == NULL) return NULL; self->zst.opaque = NULL; self->zst.zalloc = PyZlib_Malloc; self->zst.zfree = PyZlib_Free; self->zst.next_in = NULL; self->zst.avail_in = 0; if (zdict != NULL) { self->zdict = Py_NewRef(zdict); } int err = inflateInit2(&self->zst, wbits); switch (err) { case Z_OK: self->is_initialised = 1; if (self->zdict != NULL && wbits < 0) { if (set_inflate_zdict(state, self) < 0) { Py_DECREF(self); return NULL; } } return (PyObject *)self; case Z_STREAM_ERROR: Py_DECREF(self); PyErr_SetString(PyExc_ValueError, "Invalid initialization option"); return NULL; case Z_MEM_ERROR: Py_DECREF(self); PyErr_SetString(PyExc_MemoryError, "Can't allocate memory for decompression object"); return NULL; default: zlib_error(state, self->zst, err, "while creating decompression object"); Py_DECREF(self); return NULL; } } static void compobject_dealloc_impl(PyObject *op, int (*dealloc)(z_streamp)) { PyTypeObject *type = Py_TYPE(op); PyObject_GC_UnTrack(op); compobject *self = _compobject_CAST(op); assert(!PyMutex_IsLocked(&self->mutex)); if (self->is_initialised) { (void)dealloc(&self->zst); } Py_XDECREF(self->unused_data); Py_XDECREF(self->unconsumed_tail); Py_XDECREF(self->zdict); type->tp_free(self); Py_DECREF(type); } static int compobject_traverse(PyObject *op, visitproc visit, void *arg) { compobject *self = _compobject_CAST(op); Py_VISIT(Py_TYPE(op)); Py_VISIT(self->zdict); return 0; } static void Comp_dealloc(PyObject *op) { compobject_dealloc_impl(op, &deflateEnd); } static void Decomp_dealloc(PyObject *op) { compobject_dealloc_impl(op, &inflateEnd); } /*[clinic input] zlib.Compress.compress cls: defining_class data: Py_buffer Binary data to be compressed. / Returns a bytes object containing compressed data. After calling this function, some of the input data may still be stored in internal buffers for later processing. Call the flush() method to clear these buffers. [clinic start generated code]*/ static PyObject * zlib_Compress_compress_impl(compobject *self, PyTypeObject *cls, Py_buffer *data) /*[clinic end generated code: output=6731b3f0ff357ca6 input=04d00f65ab01d260]*/ { PyObject *return_value; int err; _BlocksOutputBuffer buffer = {.writer = NULL}; zlibstate *state = PyType_GetModuleState(cls); PyMutex_Lock(&self->mutex); self->zst.next_in = data->buf; Py_ssize_t ibuflen = data->len; if (OutputBuffer_InitAndGrow(&buffer, -1, &self->zst.next_out, &self->zst.avail_out) < 0) { goto error; } do { arrange_input_buffer(&self->zst, &ibuflen); do { if (self->zst.avail_out == 0) { if (OutputBuffer_Grow(&buffer, &self->zst.next_out, &self->zst.avail_out) < 0) { goto error; } } Py_BEGIN_ALLOW_THREADS err = deflate(&self->zst, Z_NO_FLUSH); Py_END_ALLOW_THREADS if (err == Z_STREAM_ERROR) { zlib_error(state, self->zst, err, "while compressing data"); goto error; } } while (self->zst.avail_out == 0); assert(self->zst.avail_in == 0); } while (ibuflen != 0); return_value = OutputBuffer_Finish(&buffer, self->zst.avail_out); if (return_value != NULL) { goto success; } error: OutputBuffer_OnError(&buffer); return_value = NULL; success: PyMutex_Unlock(&self->mutex); return return_value; } /* Helper for objdecompress() and flush(). Saves any unconsumed input data in self->unused_data or self->unconsumed_tail, as appropriate. */ static int save_unconsumed_input(compobject *self, Py_buffer *data, int err) { if (err == Z_STREAM_END) { /* The end of the compressed data has been reached. Store the leftover input data in self->unused_data. */ if (self->zst.avail_in > 0) { Py_ssize_t old_size = PyBytes_GET_SIZE(self->unused_data); Py_ssize_t left_size; left_size = (Byte *)data->buf + data->len - self->zst.next_in; if (left_size > (PY_SSIZE_T_MAX - old_size)) { PyErr_NoMemory(); return -1; } PyBytesWriter *writer = PyBytesWriter_Create(old_size + left_size); if (writer == NULL) { return -1; } char *new_data = PyBytesWriter_GetData(writer); memcpy(new_data, PyBytes_AS_STRING(self->unused_data), old_size); memcpy(new_data + old_size, self->zst.next_in, left_size); PyObject *new_unused_data = PyBytesWriter_Finish(writer); if (new_unused_data == NULL) { return -1; } Py_SETREF(self->unused_data, new_unused_data); self->zst.avail_in = 0; } } if (self->zst.avail_in > 0 || PyBytes_GET_SIZE(self->unconsumed_tail)) { /* This code handles two distinct cases: 1. Output limit was reached. Save leftover input in unconsumed_tail. 2. All input data was consumed. Clear unconsumed_tail. */ Py_ssize_t left_size = (Byte *)data->buf + data->len - self->zst.next_in; PyObject *new_data = PyBytes_FromStringAndSize( (char *)self->zst.next_in, left_size); if (new_data == NULL) return -1; Py_SETREF(self->unconsumed_tail, new_data); } return 0; } /*[clinic input] @permit_long_docstring_body zlib.Decompress.decompress cls: defining_class data: Py_buffer The binary data to decompress. / max_length: Py_ssize_t(allow_negative=False) = 0 The maximum allowable length of the decompressed data. Unconsumed input data will be stored in the unconsumed_tail attribute. Return a bytes object containing the decompressed version of the data. After calling this function, some of the input data may still be stored in internal buffers for later processing. Call the flush() method to clear these buffers. [clinic start generated code]*/ static PyObject * zlib_Decompress_decompress_impl(compobject *self, PyTypeObject *cls, Py_buffer *data, Py_ssize_t max_length) /*[clinic end generated code: output=b024a93c2c922d57 input=77de124bd2a2ecc0]*/ { int err = Z_OK; Py_ssize_t ibuflen; PyObject *return_value; _BlocksOutputBuffer buffer = {.writer = NULL}; PyObject *module = PyType_GetModule(cls); if (module == NULL) return NULL; zlibstate *state = get_zlib_state(module); if (max_length == 0) { max_length = -1; } PyMutex_Lock(&self->mutex); self->zst.next_in = data->buf; ibuflen = data->len; if (OutputBuffer_InitAndGrow(&buffer, max_length, &self->zst.next_out, &self->zst.avail_out) < 0) { goto abort; } do { arrange_input_buffer(&self->zst, &ibuflen); do { if (self->zst.avail_out == 0) { if (OutputBuffer_GetDataSize(&buffer, self->zst.avail_out) == max_length) { goto save; } if (OutputBuffer_Grow(&buffer, &self->zst.next_out, &self->zst.avail_out) < 0) { goto abort; } } Py_BEGIN_ALLOW_THREADS err = inflate(&self->zst, Z_SYNC_FLUSH); Py_END_ALLOW_THREADS switch (err) { case Z_OK: _Py_FALLTHROUGH; case Z_BUF_ERROR: _Py_FALLTHROUGH; case Z_STREAM_END: break; default: if (err == Z_NEED_DICT && self->zdict != NULL) { if (set_inflate_zdict(state, self) < 0) { goto abort; } else break; } goto save; } } while (self->zst.avail_out == 0 || err == Z_NEED_DICT); } while (err != Z_STREAM_END && ibuflen != 0); save: if (save_unconsumed_input(self, data, err) < 0) goto abort; if (err == Z_STREAM_END) { /* This is the logical place to call inflateEnd, but the old behaviour of only calling it on flush() is preserved. */ FT_ATOMIC_STORE_CHAR_RELAXED(self->eof, 1); } else if (err != Z_OK && err != Z_BUF_ERROR) { /* We will only get Z_BUF_ERROR if the output buffer was full but there wasn't more output when we tried again, so it is not an error condition. */ zlib_error(state, self->zst, err, "while decompressing data"); goto abort; } return_value = OutputBuffer_Finish(&buffer, self->zst.avail_out); if (return_value != NULL) { goto success; } abort: OutputBuffer_OnError(&buffer); return_value = NULL; success: PyMutex_Unlock(&self->mutex); return return_value; } /*[clinic input] zlib.Compress.flush cls: defining_class mode: int(c_default="Z_FINISH") = zlib.Z_FINISH One of the constants Z_SYNC_FLUSH, Z_FULL_FLUSH, Z_FINISH. If mode == Z_FINISH, the compressor object can no longer be used after calling the flush() method. Otherwise, more data can still be compressed. / Return a bytes object containing any remaining compressed data. [clinic start generated code]*/ static PyObject * zlib_Compress_flush_impl(compobject *self, PyTypeObject *cls, int mode) /*[clinic end generated code: output=c7efd13efd62add2 input=286146e29442eb6c]*/ { int err; PyObject *return_value; _BlocksOutputBuffer buffer = {.writer = NULL}; zlibstate *state = PyType_GetModuleState(cls); /* Flushing with Z_NO_FLUSH is a no-op, so there's no point in doing any work at all; just return an empty string. */ if (mode == Z_NO_FLUSH) { return Py_GetConstant(Py_CONSTANT_EMPTY_BYTES); } PyMutex_Lock(&self->mutex); self->zst.avail_in = 0; if (OutputBuffer_InitAndGrow(&buffer, -1, &self->zst.next_out, &self->zst.avail_out) < 0) { goto error; } do { if (self->zst.avail_out == 0) { if (OutputBuffer_Grow(&buffer, &self->zst.next_out, &self->zst.avail_out) < 0) { goto error; } } Py_BEGIN_ALLOW_THREADS err = deflate(&self->zst, mode); Py_END_ALLOW_THREADS if (err == Z_STREAM_ERROR) { zlib_error(state, self->zst, err, "while flushing"); goto error; } } while (self->zst.avail_out == 0); assert(self->zst.avail_in == 0); /* If mode is Z_FINISH, we also have to call deflateEnd() to free various data structures. Note we should only get Z_STREAM_END when mode is Z_FINISH, but checking both for safety*/ if (err == Z_STREAM_END && mode == Z_FINISH) { err = deflateEnd(&self->zst); if (err != Z_OK) { zlib_error(state, self->zst, err, "while finishing compression"); goto error; } else self->is_initialised = 0; /* We will only get Z_BUF_ERROR if the output buffer was full but there wasn't more output when we tried again, so it is not an error condition. */ } else if (err != Z_OK && err != Z_BUF_ERROR) { zlib_error(state, self->zst, err, "while flushing"); goto error; } return_value = OutputBuffer_Finish(&buffer, self->zst.avail_out); if (return_value != NULL) { goto success; } error: OutputBuffer_OnError(&buffer); return_value = NULL; success: PyMutex_Unlock(&self->mutex); return return_value; } #ifdef HAVE_ZLIB_COPY /*[clinic input] zlib.Compress.copy cls: defining_class Return a copy of the compression object. [clinic start generated code]*/ static PyObject * zlib_Compress_copy_impl(compobject *self, PyTypeObject *cls) /*[clinic end generated code: output=c4d2cfb4b0d7350b input=235497e482d40986]*/ { zlibstate *state = PyType_GetModuleState(cls); compobject *return_value = newcompobject(state->Comptype); if (!return_value) return NULL; /* Copy the zstream state * We use mutex to make this thread-safe */ PyMutex_Lock(&self->mutex); int err = deflateCopy(&return_value->zst, &self->zst); switch (err) { case Z_OK: break; case Z_STREAM_ERROR: PyErr_SetString(PyExc_ValueError, "Inconsistent stream state"); goto error; case Z_MEM_ERROR: PyErr_SetString(PyExc_MemoryError, "Can't allocate memory for compression object"); goto error; default: zlib_error(state, self->zst, err, "while copying compression object"); goto error; } Py_XSETREF(return_value->unused_data, Py_NewRef(self->unused_data)); Py_XSETREF(return_value->unconsumed_tail, Py_NewRef(self->unconsumed_tail)); Py_XSETREF(return_value->zdict, Py_XNewRef(self->zdict)); return_value->eof = self->eof; /* Mark it as being initialized */ return_value->is_initialised = 1; PyMutex_Unlock(&self->mutex); return (PyObject *)return_value; error: PyMutex_Unlock(&self->mutex); Py_XDECREF(return_value); return NULL; } /*[clinic input] zlib.Compress.__copy__ cls: defining_class [clinic start generated code]*/ static PyObject * zlib_Compress___copy___impl(compobject *self, PyTypeObject *cls) /*[clinic end generated code: output=074613db332cb668 input=5c0188367ab0fe64]*/ { return zlib_Compress_copy_impl(self, cls); } /*[clinic input] zlib.Compress.__deepcopy__ cls: defining_class memo: object / [clinic start generated code]*/ static PyObject * zlib_Compress___deepcopy___impl(compobject *self, PyTypeObject *cls, PyObject *memo) /*[clinic end generated code: output=24b3aed785f54033 input=c90347319a514430]*/ { return zlib_Compress_copy_impl(self, cls); } /*[clinic input] zlib.Decompress.copy cls: defining_class Return a copy of the decompression object. [clinic start generated code]*/ static PyObject * zlib_Decompress_copy_impl(compobject *self, PyTypeObject *cls) /*[clinic end generated code: output=a7ddc016e1d0a781 input=20ef3aa208282ff2]*/ { zlibstate *state = PyType_GetModuleState(cls); compobject *return_value = newcompobject(state->Decomptype); if (!return_value) return NULL; /* Copy the zstream state * We use mutex to make this thread-safe */ PyMutex_Lock(&self->mutex); int err = inflateCopy(&return_value->zst, &self->zst); switch (err) { case Z_OK: break; case Z_STREAM_ERROR: PyErr_SetString(PyExc_ValueError, "Inconsistent stream state"); goto error; case Z_MEM_ERROR: PyErr_SetString(PyExc_MemoryError, "Can't allocate memory for decompression object"); goto error; default: zlib_error(state, self->zst, err, "while copying decompression object"); goto error; } Py_XSETREF(return_value->unused_data, Py_NewRef(self->unused_data)); Py_XSETREF(return_value->unconsumed_tail, Py_NewRef(self->unconsumed_tail)); Py_XSETREF(return_value->zdict, Py_XNewRef(self->zdict)); return_value->eof = self->eof; /* Mark it as being initialized */ return_value->is_initialised = 1; PyMutex_Unlock(&self->mutex); return (PyObject *)return_value; error: PyMutex_Unlock(&self->mutex); Py_XDECREF(return_value); return NULL; } /*[clinic input] zlib.Decompress.__copy__ cls: defining_class [clinic start generated code]*/ static PyObject * zlib_Decompress___copy___impl(compobject *self, PyTypeObject *cls) /*[clinic end generated code: output=cf1e6473744f53fa input=cc3143067b622bdf]*/ { return zlib_Decompress_copy_impl(self, cls); } /*[clinic input] zlib.Decompress.__deepcopy__ cls: defining_class memo: object / [clinic start generated code]*/ static PyObject * zlib_Decompress___deepcopy___impl(compobject *self, PyTypeObject *cls, PyObject *memo) /*[clinic end generated code: output=34f7b719a0c0d51b input=fc13b9c58622544e]*/ { return zlib_Decompress_copy_impl(self, cls); } #endif /*[clinic input] zlib.Decompress.flush cls: defining_class length: Py_ssize_t(c_default="DEF_BUF_SIZE") = zlib.DEF_BUF_SIZE the initial size of the output buffer. / Return a bytes object containing any remaining decompressed data. [clinic start generated code]*/ static PyObject * zlib_Decompress_flush_impl(compobject *self, PyTypeObject *cls, Py_ssize_t length) /*[clinic end generated code: output=4532fc280bd0f8f2 input=42f1f4b75230e2cd]*/ { int err, flush; Py_buffer data; PyObject *return_value; Py_ssize_t ibuflen; _BlocksOutputBuffer buffer = {.writer = NULL}; _Uint32Window window; // output buffer's UINT32_MAX sliding window PyObject *module = PyType_GetModule(cls); if (module == NULL) { return NULL; } zlibstate *state = get_zlib_state(module); if (length <= 0) { PyErr_SetString(PyExc_ValueError, "length must be greater than zero"); return NULL; } PyMutex_Lock(&self->mutex); if (PyObject_GetBuffer(self->unconsumed_tail, &data, PyBUF_SIMPLE) == -1) { PyMutex_Unlock(&self->mutex); return NULL; } self->zst.next_in = data.buf; ibuflen = data.len; if (OutputBuffer_WindowInitWithSize(&buffer, &window, length, &self->zst.next_out, &self->zst.avail_out) < 0) { goto abort; } do { arrange_input_buffer(&self->zst, &ibuflen); flush = ibuflen == 0 ? Z_FINISH : Z_NO_FLUSH; do { if (self->zst.avail_out == 0) { if (OutputBuffer_WindowGrow(&buffer, &window, &self->zst.next_out, &self->zst.avail_out) < 0) { goto abort; } } Py_BEGIN_ALLOW_THREADS err = inflate(&self->zst, flush); Py_END_ALLOW_THREADS switch (err) { case Z_OK: _Py_FALLTHROUGH; case Z_BUF_ERROR: _Py_FALLTHROUGH; case Z_STREAM_END: break; default: goto save; } } while (self->zst.avail_out == 0 || err == Z_NEED_DICT); } while (err != Z_STREAM_END && ibuflen != 0); save: if (save_unconsumed_input(self, &data, err) < 0) { goto abort; } /* If at end of stream, clean up any memory allocated by zlib. */ if (err == Z_STREAM_END) { FT_ATOMIC_STORE_CHAR_RELAXED(self->eof, 1); self->is_initialised = 0; err = inflateEnd(&self->zst); if (err != Z_OK) { zlib_error(state, self->zst, err, "while finishing decompression"); goto abort; } } return_value = OutputBuffer_WindowFinish(&buffer, &window, self->zst.avail_out); if (return_value != NULL) { goto success; } abort: OutputBuffer_WindowOnError(&buffer, &window); return_value = NULL; success: PyBuffer_Release(&data); PyMutex_Unlock(&self->mutex); return return_value; } typedef struct { PyObject_HEAD z_stream zst; PyObject *zdict; PyMutex mutex; PyObject *unused_data; uint8_t *input_buffer; Py_ssize_t input_buffer_size; /* zst>avail_in is only 32 bit, so we store the true length separately. Conversion and looping is encapsulated in decompress_buf() */ Py_ssize_t avail_in_real; bool is_initialised; char eof; /* Py_T_BOOL expects a char */ char needs_input; } ZlibDecompressor; #define ZlibDecompressor_CAST(op) ((ZlibDecompressor *)(op)) /*[clinic input] class zlib._ZlibDecompressor "ZlibDecompressor *" "&ZlibDecompressorType" [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=49151d1d703e6bcc]*/ static void ZlibDecompressor_dealloc(PyObject *op) { PyTypeObject *type = Py_TYPE(op); PyObject_GC_UnTrack(op); ZlibDecompressor *self = ZlibDecompressor_CAST(op); assert(!PyMutex_IsLocked(&self->mutex)); if (self->is_initialised) { inflateEnd(&self->zst); } PyMem_Free(self->input_buffer); Py_CLEAR(self->unused_data); Py_CLEAR(self->zdict); type->tp_free(self); Py_DECREF(type); } static int ZlibDecompressor_traverse(PyObject *op, visitproc visit, void *arg) { ZlibDecompressor *self = ZlibDecompressor_CAST(op); Py_VISIT(Py_TYPE(op)); Py_VISIT(self->zdict); return 0; } static int set_inflate_zdict_ZlibDecompressor(zlibstate *state, ZlibDecompressor *self) { Py_buffer zdict_buf; if (PyObject_GetBuffer(self->zdict, &zdict_buf, PyBUF_SIMPLE) == -1) { return -1; } if ((size_t)zdict_buf.len > UINT_MAX) { PyErr_SetString(PyExc_OverflowError, "zdict length does not fit in an unsigned int"); PyBuffer_Release(&zdict_buf); return -1; } int err; err = inflateSetDictionary(&self->zst, zdict_buf.buf, (unsigned int)zdict_buf.len); PyBuffer_Release(&zdict_buf); if (err != Z_OK) { zlib_error(state, self->zst, err, "while setting zdict"); return -1; } return 0; } static Py_ssize_t arrange_output_buffer_with_maximum(uint32_t *avail_out, uint8_t **next_out, PyObject **buffer, Py_ssize_t length, Py_ssize_t max_length) { Py_ssize_t occupied; if (*buffer == NULL) { if (!(*buffer = PyBytes_FromStringAndSize(NULL, length))) return -1; occupied = 0; } else { occupied = *next_out - (uint8_t *)PyBytes_AS_STRING(*buffer); if (length == occupied) { Py_ssize_t new_length; assert(length <= max_length); /* can not scale the buffer over max_length */ if (length == max_length) return -2; if (length <= (max_length >> 1)) new_length = length << 1; else new_length = max_length; if (_PyBytes_Resize(buffer, new_length) < 0) return -1; length = new_length; } } *avail_out = (uint32_t)Py_MIN((size_t)(length - occupied), UINT32_MAX); *next_out = (uint8_t *)PyBytes_AS_STRING(*buffer) + occupied; return length; } /* Decompress data of length self->avail_in_real in self->state.next_in. The output buffer is allocated dynamically and returned. If the max_length is of sufficiently low size, max_length is allocated immediately. At most max_length bytes are returned, so some of the input may not be consumed. self->state.next_in and self->avail_in_real are updated to reflect the consumed input. */ static PyObject* decompress_buf(ZlibDecompressor *self, Py_ssize_t max_length) { /* data_size is strictly positive, but because we repeatedly have to compare against max_length and PyBytes_GET_SIZE we declare it as signed */ PyObject *return_value = NULL; Py_ssize_t hard_limit; Py_ssize_t obuflen; zlibstate *state = PyType_GetModuleState(Py_TYPE(self)); int err = Z_OK; /* When sys.maxsize is passed as default use DEF_BUF_SIZE as start buffer. In this particular case the data may not necessarily be very big, so it is better to grow dynamically.*/ if ((max_length < 0) || max_length == PY_SSIZE_T_MAX) { hard_limit = PY_SSIZE_T_MAX; obuflen = DEF_BUF_SIZE; } else { /* Assume that decompressor is used in file decompression with a fixed block size of max_length. In that case we will reach max_length almost always (except at the end of the file). So it makes sense to allocate max_length. */ hard_limit = max_length; obuflen = max_length; if (obuflen > DEF_MAX_INITIAL_BUF_SIZE){ // Safeguard against memory overflow. obuflen = DEF_MAX_INITIAL_BUF_SIZE; } } do { arrange_input_buffer(&(self->zst), &(self->avail_in_real)); do { obuflen = arrange_output_buffer_with_maximum(&(self->zst.avail_out), &(self->zst.next_out), &return_value, obuflen, hard_limit); if (obuflen == -1){ PyErr_SetString(PyExc_MemoryError, "Insufficient memory for buffer allocation"); goto error; } else if (obuflen == -2) { break; } Py_BEGIN_ALLOW_THREADS err = inflate(&self->zst, Z_SYNC_FLUSH); Py_END_ALLOW_THREADS switch (err) { case Z_OK: _Py_FALLTHROUGH; case Z_BUF_ERROR: _Py_FALLTHROUGH; case Z_STREAM_END: break; default: if (err == Z_NEED_DICT) { goto error; } else { break; } } } while (self->zst.avail_out == 0); } while(err != Z_STREAM_END && self->avail_in_real != 0); if (err == Z_STREAM_END) { FT_ATOMIC_STORE_CHAR_RELAXED(self->eof, 1); self->is_initialised = 0; /* Unlike the Decompress object we call inflateEnd here as there are no backwards compatibility issues */ err = inflateEnd(&self->zst); if (err != Z_OK) { zlib_error(state, self->zst, err, "while finishing decompression"); goto error; } } else if (err != Z_OK && err != Z_BUF_ERROR) { zlib_error(state, self->zst, err, "while decompressing data"); goto error; } self->avail_in_real += self->zst.avail_in; if (_PyBytes_Resize(&return_value, self->zst.next_out - (uint8_t *)PyBytes_AS_STRING(return_value)) != 0) { goto error; } goto success; error: Py_CLEAR(return_value); success: return return_value; } static PyObject * decompress(ZlibDecompressor *self, uint8_t *data, size_t len, Py_ssize_t max_length) { bool input_buffer_in_use; PyObject *result; /* Prepend unconsumed input if necessary */ if (self->zst.next_in != NULL) { size_t avail_now, avail_total; /* Number of bytes we can append to input buffer */ avail_now = (self->input_buffer + self->input_buffer_size) - (self->zst.next_in + self->avail_in_real); /* Number of bytes we can append if we move existing contents to beginning of buffer (overwriting consumed input) */ avail_total = self->input_buffer_size - self->avail_in_real; if (avail_total < len) { size_t offset = self->zst.next_in - self->input_buffer; uint8_t *tmp; size_t new_size = self->input_buffer_size + len - avail_now; /* Assign to temporary variable first, so we don't lose address of allocated buffer if realloc fails */ tmp = PyMem_Realloc(self->input_buffer, new_size); if (tmp == NULL) { PyErr_SetNone(PyExc_MemoryError); return NULL; } self->input_buffer = tmp; self->input_buffer_size = new_size; self->zst.next_in = self->input_buffer + offset; } else if (avail_now < len) { memmove(self->input_buffer, self->zst.next_in, self->avail_in_real); self->zst.next_in = self->input_buffer; } memcpy((void*)(self->zst.next_in + self->avail_in_real), data, len); self->avail_in_real += len; input_buffer_in_use = 1; } else { self->zst.next_in = data; self->avail_in_real = len; input_buffer_in_use = 0; } result = decompress_buf(self, max_length); if(result == NULL) { self->zst.next_in = NULL; return NULL; } if (self->eof) { FT_ATOMIC_STORE_CHAR_RELAXED(self->needs_input, 0); if (self->avail_in_real > 0) { PyObject *unused_data = PyBytes_FromStringAndSize( (char *)self->zst.next_in, self->avail_in_real); if (unused_data == NULL) { goto error; } Py_XSETREF(self->unused_data, unused_data); } } else if (self->avail_in_real == 0) { self->zst.next_in = NULL; FT_ATOMIC_STORE_CHAR_RELAXED(self->needs_input, 1); } else { FT_ATOMIC_STORE_CHAR_RELAXED(self->needs_input, 0); /* If we did not use the input buffer, we now have to copy the tail from the caller's buffer into the input buffer */ if (!input_buffer_in_use) { /* Discard buffer if it's too small (resizing it may needlessly copy the current contents) */ if (self->input_buffer != NULL && self->input_buffer_size < self->avail_in_real) { PyMem_Free(self->input_buffer); self->input_buffer = NULL; } /* Allocate if necessary */ if (self->input_buffer == NULL) { self->input_buffer = PyMem_Malloc(self->avail_in_real); if (self->input_buffer == NULL) { PyErr_SetNone(PyExc_MemoryError); goto error; } self->input_buffer_size = self->avail_in_real; } /* Copy tail */ memcpy(self->input_buffer, self->zst.next_in, self->avail_in_real); self->zst.next_in = self->input_buffer; } } return result; error: Py_XDECREF(result); return NULL; } /*[clinic input] @permit_long_docstring_body zlib._ZlibDecompressor.decompress data: Py_buffer max_length: Py_ssize_t=-1 Decompress *data*, returning uncompressed data as bytes. If *max_length* is nonnegative, returns at most *max_length* bytes of decompressed data. If this limit is reached and further output can be produced, *self.needs_input* will be set to ``False``. In this case, the next call to *decompress()* may provide *data* as b'' to obtain more of the output. If all of the input data was decompressed and returned (either because this was less than *max_length* bytes, or because *max_length* was negative), *self.needs_input* will be set to True. Attempting to decompress data after the end of stream is reached raises an EOFError. Any data found after the end of the stream is ignored and saved in the unused_data attribute. [clinic start generated code]*/ static PyObject * zlib__ZlibDecompressor_decompress_impl(ZlibDecompressor *self, Py_buffer *data, Py_ssize_t max_length) /*[clinic end generated code: output=ac00dcf73e843e99 input=c9278e791be1152b]*/ { PyObject *result = NULL; PyMutex_Lock(&self->mutex); if (self->eof) { PyErr_SetString(PyExc_EOFError, "End of stream already reached"); } else { result = decompress(self, data->buf, data->len, max_length); } PyMutex_Unlock(&self->mutex); return result; } /*[clinic input] @classmethod zlib._ZlibDecompressor.__new__ wbits: int(c_default='MAX_WBITS') = MAX_WBITS zdict: object(c_default='NULL') = b'' The predefined compression dictionary. This is a sequence of bytes (such as a bytes object) containing subsequences that are expected to occur frequently in the data that is to be compressed. Those subsequences that are expected to be most common should come at the end of the dictionary. This must be the same dictionary as used by the compressor that produced the input data. Create a decompressor object for decompressing data incrementally. [clinic start generated code]*/ static PyObject * zlib__ZlibDecompressor_impl(PyTypeObject *type, int wbits, PyObject *zdict) /*[clinic end generated code: output=1065607df0d33baa input=9ebad0be6de226e2]*/ { assert(type != NULL && type->tp_alloc != NULL); zlibstate *state = PyType_GetModuleState(type); ZlibDecompressor *self = ZlibDecompressor_CAST(type->tp_alloc(type, 0)); if (self == NULL) { return NULL; } self->eof = 0; self->needs_input = 1; self->avail_in_real = 0; self->input_buffer = NULL; self->input_buffer_size = 0; self->zdict = Py_XNewRef(zdict); self->zst.opaque = NULL; self->zst.zalloc = PyZlib_Malloc; self->zst.zfree = PyZlib_Free; self->zst.next_in = NULL; self->zst.avail_in = 0; self->unused_data = Py_GetConstant(Py_CONSTANT_EMPTY_BYTES); self->mutex = (PyMutex){0}; int err = inflateInit2(&(self->zst), wbits); switch (err) { case Z_OK: self->is_initialised = 1; if (self->zdict != NULL && wbits < 0) { if (set_inflate_zdict_ZlibDecompressor(state, self) < 0) { Py_DECREF(self); return NULL; } } return (PyObject *)self; case Z_STREAM_ERROR: Py_DECREF(self); PyErr_SetString(PyExc_ValueError, "Invalid initialization option"); return NULL; case Z_MEM_ERROR: Py_DECREF(self); PyErr_SetString(PyExc_MemoryError, "Can't allocate memory for decompression object"); return NULL; default: zlib_error(state, self->zst, err, "while creating decompression object"); Py_DECREF(self); return NULL; } } #include "clinic/zlibmodule.c.h" static PyMethodDef comp_methods[] = { ZLIB_COMPRESS_COMPRESS_METHODDEF ZLIB_COMPRESS_FLUSH_METHODDEF ZLIB_COMPRESS_COPY_METHODDEF ZLIB_COMPRESS___COPY___METHODDEF ZLIB_COMPRESS___DEEPCOPY___METHODDEF {NULL, NULL} }; static PyMethodDef Decomp_methods[] = { ZLIB_DECOMPRESS_DECOMPRESS_METHODDEF ZLIB_DECOMPRESS_FLUSH_METHODDEF ZLIB_DECOMPRESS_COPY_METHODDEF ZLIB_DECOMPRESS___COPY___METHODDEF ZLIB_DECOMPRESS___DEEPCOPY___METHODDEF {NULL, NULL} }; static PyMethodDef ZlibDecompressor_methods[] = { ZLIB__ZLIBDECOMPRESSOR_DECOMPRESS_METHODDEF {NULL} }; static PyObject * Decomp_unused_data_get(PyObject *op, void *Py_UNUSED(ignored)) { compobject *self = _compobject_CAST(op); PyMutex_Lock(&self->mutex); assert(self->unused_data != NULL); PyObject *result = Py_NewRef(self->unused_data); PyMutex_Unlock(&self->mutex); return result; } static PyObject * Decomp_unconsumed_tail_get(PyObject *op, void *Py_UNUSED(ignored)) { compobject *self = _compobject_CAST(op); PyMutex_Lock(&self->mutex); assert(self->unconsumed_tail != NULL); PyObject *result = Py_NewRef(self->unconsumed_tail); PyMutex_Unlock(&self->mutex); return result; } static PyGetSetDef Decomp_getset[] = { {"unused_data", Decomp_unused_data_get, NULL, NULL}, {"unconsumed_tail", Decomp_unconsumed_tail_get, NULL, NULL}, {NULL}, }; #define COMP_OFF(x) offsetof(compobject, x) static PyMemberDef Decomp_members[] = { {"eof", Py_T_BOOL, COMP_OFF(eof), Py_READONLY}, {NULL}, }; PyDoc_STRVAR(ZlibDecompressor_eof__doc__, "True if the end-of-stream marker has been reached."); PyDoc_STRVAR(ZlibDecompressor_unused_data__doc__, "Data found after the end of the compressed stream."); PyDoc_STRVAR(ZlibDecompressor_needs_input_doc, "True if more input is needed before more decompressed data can be produced."); static PyObject * ZlibDecompressor_unused_data_get(PyObject *op, void *Py_UNUSED(ignored)) { ZlibDecompressor *self = ZlibDecompressor_CAST(op); PyMutex_Lock(&self->mutex); assert(self->unused_data != NULL); PyObject *result = Py_NewRef(self->unused_data); PyMutex_Unlock(&self->mutex); return result; } static PyGetSetDef ZlibDecompressor_getset[] = { {"unused_data", ZlibDecompressor_unused_data_get, NULL, ZlibDecompressor_unused_data__doc__}, {NULL}, }; static PyMemberDef ZlibDecompressor_members[] = { {"eof", Py_T_BOOL, offsetof(ZlibDecompressor, eof), Py_READONLY, ZlibDecompressor_eof__doc__}, {"needs_input", Py_T_BOOL, offsetof(ZlibDecompressor, needs_input), Py_READONLY, ZlibDecompressor_needs_input_doc}, {NULL}, }; /*[clinic input] zlib.adler32 data: Py_buffer value: unsigned_int(bitwise=True) = 1 Starting value of the checksum. / Compute an Adler-32 checksum of data. The returned checksum is an integer. [clinic start generated code]*/ static PyObject * zlib_adler32_impl(PyObject *module, Py_buffer *data, unsigned int value) /*[clinic end generated code: output=422106f5ca8c92c0 input=6ff4557872160e88]*/ { /* Releasing the GIL for very small buffers is inefficient and may lower performance */ if (data->len > 1024*5) { unsigned char *buf = data->buf; Py_ssize_t len = data->len; Py_BEGIN_ALLOW_THREADS /* Avoid truncation of length for very large buffers. adler32() takes length as an unsigned int, which may be narrower than Py_ssize_t. */ while ((size_t)len > UINT_MAX) { value = adler32(value, buf, UINT_MAX); buf += (size_t) UINT_MAX; len -= (size_t) UINT_MAX; } value = adler32(value, buf, (unsigned int)len); Py_END_ALLOW_THREADS } else { value = adler32(value, data->buf, (unsigned int)data->len); } return PyLong_FromUnsignedLong(value & 0xffffffffU); } /*[clinic input] zlib.adler32_combine -> unsigned_int adler1: unsigned_int(bitwise=True) Adler-32 checksum for sequence A adler2: unsigned_int(bitwise=True) Adler-32 checksum for sequence B len2: object(subclass_of='&PyLong_Type') Length of sequence B / Combine two Adler-32 checksums into one. Given the Adler-32 checksum 'adler1' of a sequence A and the Adler-32 checksum 'adler2' of a sequence B of length 'len2', return the Adler-32 checksum of A and B concatenated. [clinic start generated code]*/ static unsigned int zlib_adler32_combine_impl(PyObject *module, unsigned int adler1, unsigned int adler2, PyObject *len2) /*[clinic end generated code: output=61842cefb16afb1b input=51bb045c95130c6f]*/ { #if defined(Z_WANT64) z_off64_t len = convert_to_z_off_t(len2); #else z_off_t len = convert_to_z_off_t(len2); #endif if (PyErr_Occurred()) { return (unsigned int)-1; } return adler32_combine(adler1, adler2, len); } /*[clinic input] zlib.crc32 -> unsigned_int data: Py_buffer value: unsigned_int(bitwise=True) = 0 Starting value of the checksum. / Compute a CRC-32 checksum of data. The returned checksum is an integer. [clinic start generated code]*/ static unsigned int zlib_crc32_impl(PyObject *module, Py_buffer *data, unsigned int value) /*[clinic end generated code: output=b217562e4fe6d6a6 input=1229cb2fb5ea948a]*/ { /* Releasing the GIL for very small buffers is inefficient and may lower performance */ if (data->len > 1024*5) { unsigned char *buf = data->buf; Py_ssize_t len = data->len; Py_BEGIN_ALLOW_THREADS /* Avoid truncation of length for very large buffers. crc32() takes length as an unsigned int, which may be narrower than Py_ssize_t. We further limit size due to bugs in Apple's macOS zlib. See https://github.com/python/cpython/issues/105967. */ #define ZLIB_CRC_CHUNK_SIZE 0x40000000 #if ZLIB_CRC_CHUNK_SIZE > INT_MAX # error "unsupported less than 32-bit platform?" #endif while ((size_t)len > ZLIB_CRC_CHUNK_SIZE) { value = crc32(value, buf, ZLIB_CRC_CHUNK_SIZE); buf += (size_t) ZLIB_CRC_CHUNK_SIZE; len -= (size_t) ZLIB_CRC_CHUNK_SIZE; } #undef ZLIB_CRC_CHUNK_SIZE value = crc32(value, buf, (unsigned int)len); Py_END_ALLOW_THREADS } else { value = crc32(value, data->buf, (unsigned int)data->len); } return value; } /*[clinic input] zlib.crc32_combine -> unsigned_int crc1: unsigned_int(bitwise=True) CRC-32 checksum for sequence A crc2: unsigned_int(bitwise=True) CRC-32 checksum for sequence B len2: object(subclass_of='&PyLong_Type') Length of sequence B / Combine two CRC-32 checksums into one. Given the CRC-32 checksum 'crc1' of a sequence A and the CRC-32 checksum 'crc2' of a sequence B of length 'len2', return the CRC-32 checksum of A and B concatenated. [clinic start generated code]*/ static unsigned int zlib_crc32_combine_impl(PyObject *module, unsigned int crc1, unsigned int crc2, PyObject *len2) /*[clinic end generated code: output=c4def907c602e6eb input=9c8a065d9040dc66]*/ { #if defined(Z_WANT64) z_off64_t len = convert_to_z_off_t(len2); #else z_off_t len = convert_to_z_off_t(len2); #endif if (PyErr_Occurred()) { return (unsigned int)-1; } return crc32_combine(crc1, crc2, len); } static PyObject * zlib_getattr(PyObject *self, PyObject *args) { PyObject *name; if (!PyArg_UnpackTuple(args, "__getattr__", 1, 1, &name)) { return NULL; } if (PyUnicode_Check(name) && PyUnicode_EqualToUTF8(name, "__version__")) { if (PyErr_WarnEx(PyExc_DeprecationWarning, "'__version__' is deprecated and slated for removal in Python 3.20", 1) < 0) { return NULL; } return PyUnicode_FromString("1.0"); } PyErr_Format(PyExc_AttributeError, "module 'zlib' has no attribute %R", name); return NULL; } static PyMethodDef zlib_methods[] = { ZLIB_ADLER32_METHODDEF ZLIB_ADLER32_COMBINE_METHODDEF ZLIB_COMPRESS_METHODDEF ZLIB_COMPRESSOBJ_METHODDEF ZLIB_CRC32_METHODDEF ZLIB_CRC32_COMBINE_METHODDEF ZLIB_DECOMPRESS_METHODDEF ZLIB_DECOMPRESSOBJ_METHODDEF {"__getattr__", zlib_getattr, METH_VARARGS, "Module __getattr__"}, {NULL, NULL} }; static PyType_Slot Comptype_slots[] = { {Py_tp_dealloc, Comp_dealloc}, {Py_tp_traverse, compobject_traverse}, {Py_tp_methods, comp_methods}, {0, 0}, }; static PyType_Spec Comptype_spec = { .name = "zlib.Compress", .basicsize = sizeof(compobject), .flags = ( Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_IMMUTABLETYPE | Py_TPFLAGS_HAVE_GC ), .slots= Comptype_slots, }; static PyType_Slot Decomptype_slots[] = { {Py_tp_dealloc, Decomp_dealloc}, {Py_tp_traverse, compobject_traverse}, {Py_tp_methods, Decomp_methods}, {Py_tp_members, Decomp_members}, {Py_tp_getset, Decomp_getset}, {0, 0}, }; static PyType_Spec Decomptype_spec = { .name = "zlib.Decompress", .basicsize = sizeof(compobject), .flags = ( Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_IMMUTABLETYPE | Py_TPFLAGS_HAVE_GC ), .slots = Decomptype_slots, }; static PyType_Slot ZlibDecompressor_type_slots[] = { {Py_tp_dealloc, ZlibDecompressor_dealloc}, {Py_tp_traverse, ZlibDecompressor_traverse}, {Py_tp_members, ZlibDecompressor_members}, {Py_tp_getset, ZlibDecompressor_getset}, {Py_tp_new, zlib__ZlibDecompressor}, {Py_tp_doc, (char *)zlib__ZlibDecompressor__doc__}, {Py_tp_methods, ZlibDecompressor_methods}, {0, 0}, }; static PyType_Spec ZlibDecompressor_type_spec = { .name = "zlib._ZlibDecompressor", .basicsize = sizeof(ZlibDecompressor), // Calling PyType_GetModuleState() on a subclass is not safe. // ZlibDecompressor_type_spec does not have Py_TPFLAGS_BASETYPE flag // which prevents to create a subclass. // So calling PyType_GetModuleState() in this file is always safe. .flags = ( Py_TPFLAGS_DEFAULT | Py_TPFLAGS_IMMUTABLETYPE | Py_TPFLAGS_HAVE_GC ), .slots = ZlibDecompressor_type_slots, }; PyDoc_STRVAR(zlib_module_documentation, "The functions in this module allow compression and decompression using the\n" "zlib library, which is based on GNU zip.\n" "\n" "adler32(string[, start]) -- Compute an Adler-32 checksum.\n" "adler32_combine(adler1, adler2, len2, /) -- Combine two Adler-32 checksums.\n" "compress(data[, level]) -- Compress data, with compression level 0-9 or -1.\n" "compressobj([level[, ...]]) -- Return a compressor object.\n" "crc32(string[, start]) -- Compute a CRC-32 checksum.\n" "crc32_combine(crc1, crc2, len2, /) -- Combine two CRC-32 checksums.\n" "decompress(string,[wbits],[bufsize]) -- Decompresses a compressed string.\n" "decompressobj([wbits[, zdict]]) -- Return a decompressor object.\n" "\n" "'wbits' is window buffer size and container format.\n" "Compressor objects support compress() and flush() methods; decompressor\n" "objects support decompress() and flush()."); static int zlib_clear(PyObject *mod) { zlibstate *state = get_zlib_state(mod); Py_CLEAR(state->Comptype); Py_CLEAR(state->Decomptype); Py_CLEAR(state->ZlibDecompressorType); Py_CLEAR(state->ZlibError); return 0; } static int zlib_traverse(PyObject *mod, visitproc visit, void *arg) { zlibstate *state = get_zlib_state(mod); Py_VISIT(state->Comptype); Py_VISIT(state->Decomptype); Py_VISIT(state->ZlibDecompressorType); Py_VISIT(state->ZlibError); return 0; } static void zlib_free(void *mod) { zlib_clear((PyObject *)mod); } static int zlib_exec(PyObject *mod) { zlibstate *state = get_zlib_state(mod); state->Comptype = (PyTypeObject *)PyType_FromModuleAndSpec( mod, &Comptype_spec, NULL); if (state->Comptype == NULL) { return -1; } state->Decomptype = (PyTypeObject *)PyType_FromModuleAndSpec( mod, &Decomptype_spec, NULL); if (state->Decomptype == NULL) { return -1; } state->ZlibDecompressorType = (PyTypeObject *)PyType_FromModuleAndSpec( mod, &ZlibDecompressor_type_spec, NULL); if (state->ZlibDecompressorType == NULL) { return -1; } state->ZlibError = PyErr_NewException("zlib.error", NULL, NULL); if (PyModule_AddObjectRef(mod, "error", state->ZlibError) < 0) { return -1; } if (PyModule_AddObjectRef(mod, "_ZlibDecompressor", (PyObject *)state->ZlibDecompressorType) < 0) { return -1; } #define ZLIB_ADD_INT_MACRO(c) \ do { \ if ((PyModule_AddIntConstant(mod, #c, c)) < 0) { \ return -1; \ } \ } while(0) ZLIB_ADD_INT_MACRO(MAX_WBITS); ZLIB_ADD_INT_MACRO(DEFLATED); ZLIB_ADD_INT_MACRO(DEF_MEM_LEVEL); ZLIB_ADD_INT_MACRO(DEF_BUF_SIZE); // compression levels ZLIB_ADD_INT_MACRO(Z_NO_COMPRESSION); ZLIB_ADD_INT_MACRO(Z_BEST_SPEED); ZLIB_ADD_INT_MACRO(Z_BEST_COMPRESSION); ZLIB_ADD_INT_MACRO(Z_DEFAULT_COMPRESSION); // compression strategies ZLIB_ADD_INT_MACRO(Z_FILTERED); ZLIB_ADD_INT_MACRO(Z_HUFFMAN_ONLY); #ifdef Z_RLE // 1.2.0.1 ZLIB_ADD_INT_MACRO(Z_RLE); #endif #ifdef Z_FIXED // 1.2.2.2 ZLIB_ADD_INT_MACRO(Z_FIXED); #endif ZLIB_ADD_INT_MACRO(Z_DEFAULT_STRATEGY); // allowed flush values ZLIB_ADD_INT_MACRO(Z_NO_FLUSH); ZLIB_ADD_INT_MACRO(Z_PARTIAL_FLUSH); ZLIB_ADD_INT_MACRO(Z_SYNC_FLUSH); ZLIB_ADD_INT_MACRO(Z_FULL_FLUSH); ZLIB_ADD_INT_MACRO(Z_FINISH); #ifdef Z_BLOCK // 1.2.0.5 for inflate, 1.2.3.4 for deflate ZLIB_ADD_INT_MACRO(Z_BLOCK); #endif #ifdef Z_TREES // 1.2.3.4, only for inflate ZLIB_ADD_INT_MACRO(Z_TREES); #endif if (PyModule_Add(mod, "ZLIB_VERSION", PyUnicode_FromString(ZLIB_VERSION)) < 0) { return -1; } if (PyModule_Add(mod, "ZLIB_RUNTIME_VERSION", PyUnicode_FromString(zlibVersion())) < 0) { return -1; } #ifdef ZLIBNG_VERSION if (PyModule_Add(mod, "ZLIBNG_VERSION", PyUnicode_FromString(ZLIBNG_VERSION)) < 0) { return -1; } #endif return 0; } static PyModuleDef_Slot zlib_slots[] = { {Py_mod_exec, zlib_exec}, {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, {Py_mod_gil, Py_MOD_GIL_NOT_USED}, {0, NULL} }; static struct PyModuleDef zlibmodule = { PyModuleDef_HEAD_INIT, .m_name = "zlib", .m_doc = zlib_module_documentation, .m_size = sizeof(zlibstate), .m_methods = zlib_methods, .m_slots = zlib_slots, .m_traverse = zlib_traverse, .m_clear = zlib_clear, .m_free = zlib_free, }; PyMODINIT_FUNC PyInit_zlib(void) { return PyModuleDef_Init(&zlibmodule); } /* This module makes GNU readline available to Python. It has ideas * contributed by Lee Busby, LLNL, and William Magro, Cornell Theory * Center. The completer interface was inspired by Lele Gaifax. More * recently, it was largely rewritten by Guido van Rossum. */ #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 #endif /* Standard definitions */ #include "Python.h" #include "pycore_pyatomic_ft_wrappers.h" #include "pycore_pylifecycle.h" // _Py_SetLocaleFromEnv() #include // errno #include // SIGWINCH #include // free() #include // strdup() #ifdef HAVE_SYS_SELECT_H # include // select() #endif #if defined(HAVE_SETLOCALE) /* GNU readline() mistakenly sets the LC_CTYPE locale. * This is evil. Only the user or the app's main() should do this! * We must save and restore the locale around the rl_initialize() call. */ #define SAVE_LOCALE # include // setlocale() #endif #ifdef SAVE_LOCALE # define RESTORE_LOCALE(sl) { setlocale(LC_CTYPE, sl); free(sl); } #else # define RESTORE_LOCALE(sl) #endif #ifdef WITH_EDITLINE # include #else /* GNU readline definitions */ # undef HAVE_CONFIG_H /* Else readline/chardefs.h includes strings.h */ # include # include #endif #ifdef HAVE_RL_COMPLETION_MATCHES #define completion_matches(x, y) \ rl_completion_matches((x), ((rl_compentry_func_t *)(y))) #else #if defined(_RL_FUNCTION_TYPEDEF) extern char **completion_matches(char *, rl_compentry_func_t *); #else #if !defined(__APPLE__) extern char **completion_matches(char *, CPFunction *); #endif #endif #endif /* * It is possible to link the readline module to the readline * emulation library of editline/libedit. * * This emulation library is not 100% API compatible with the "real" readline * and cannot be detected at compile-time, * hence we use a runtime check to detect if the Python readline module is * linked to libedit. * * Currently there is one known API incompatibility: * - 'get_history' has a 1-based index with GNU readline, and a 0-based * index with older versions of libedit's emulation. * - Note that replace_history and remove_history use a 0-based index * with both implementations. */ static int using_libedit_emulation = 0; static const char libedit_version_tag[] = "EditLine wrapper"; static int8_t libedit_history_start = 0; static int8_t libedit_append_replace_history_offset = 0; #ifdef HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK static void on_completion_display_matches_hook(char **matches, int num_matches, int max_length); #endif /* Memory allocated for rl_completer_word_break_characters (see issue #17289 for the motivation). */ static char *completer_word_break_characters; typedef struct { /* Specify hook functions in Python */ PyObject *completion_display_matches_hook; PyObject *startup_hook; PyObject *pre_input_hook; PyObject *completer; /* Specify a word completer in Python */ PyObject *begidx; PyObject *endidx; } readlinestate; static inline readlinestate* get_readline_state(PyObject *module) { void *state = PyModule_GetState(module); assert(state != NULL); return (readlinestate *)state; } /*[clinic input] module readline [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=ad49da781b9c8721]*/ static int readline_clear(PyObject *m) { readlinestate *state = get_readline_state(m); Py_CLEAR(state->completion_display_matches_hook); Py_CLEAR(state->startup_hook); Py_CLEAR(state->pre_input_hook); Py_CLEAR(state->completer); Py_CLEAR(state->begidx); Py_CLEAR(state->endidx); return 0; } static int readline_traverse(PyObject *m, visitproc visit, void *arg) { readlinestate *state = get_readline_state(m); Py_VISIT(state->completion_display_matches_hook); Py_VISIT(state->startup_hook); Py_VISIT(state->pre_input_hook); Py_VISIT(state->completer); Py_VISIT(state->begidx); Py_VISIT(state->endidx); return 0; } static void readline_free(void *m) { readline_clear((PyObject *)m); } static PyModuleDef readlinemodule; static inline readlinestate* get_hook_module_state(void) { PyObject *mod = PyState_FindModule(&readlinemodule); if (mod == NULL){ PyErr_Clear(); return NULL; } Py_INCREF(mod); readlinestate *state = get_readline_state(mod); Py_DECREF(mod); return state; } /* Convert to/from multibyte C strings */ static PyObject * encode(PyObject *b) { return PyUnicode_EncodeLocale(b, "surrogateescape"); } static PyObject * decode(const char *s) { return PyUnicode_DecodeLocale(s, "surrogateescape"); } /* Explicitly disable bracketed paste in the interactive interpreter, even if it's set in the inputrc, is enabled by default (eg GNU Readline 8.1), or a user calls readline.read_init_file(). The Python REPL has not implemented bracketed paste support. Also, bracketed mode writes the "\x1b[?2004h" escape sequence into stdout which causes test failures in applications that don't support it. It can still be explicitly enabled by calling readline.parse_and_bind("set enable-bracketed-paste on"). See bpo-42819 for more details. This should be removed if bracketed paste mode is implemented (bpo-39820). */ static void disable_bracketed_paste(void) { if (!using_libedit_emulation) { rl_variable_bind ("enable-bracketed-paste", "off"); } } /* Exported function to send one line to readline's init file parser */ /*[clinic input] @critical_section readline.parse_and_bind string: object / Execute the init line provided in the string argument. [clinic start generated code]*/ static PyObject * readline_parse_and_bind_impl(PyObject *module, PyObject *string) /*[clinic end generated code: output=828d9b6630d434f5 input=cefdc0f9f62f9fcc]*/ { char *copy; PyObject *encoded = encode(string); if (encoded == NULL) { return NULL; } /* Make a copy -- rl_parse_and_bind() modifies its argument */ /* Bernard Herzog */ copy = PyMem_Malloc(1 + PyBytes_GET_SIZE(encoded)); if (copy == NULL) { Py_DECREF(encoded); return PyErr_NoMemory(); } strcpy(copy, PyBytes_AS_STRING(encoded)); Py_DECREF(encoded); rl_parse_and_bind(copy); PyMem_Free(copy); /* Free the copy */ Py_RETURN_NONE; } /* Exported function to parse a readline init file */ /*[clinic input] @critical_section readline.read_init_file filename as filename_obj: object = None / Execute a readline initialization file. The default filename is the last filename used. [clinic start generated code]*/ static PyObject * readline_read_init_file_impl(PyObject *module, PyObject *filename_obj) /*[clinic end generated code: output=8e059b676142831e input=62b767adfab6cc15]*/ { PyObject *filename_bytes; if (filename_obj != Py_None) { if (!PyUnicode_FSConverter(filename_obj, &filename_bytes)) return NULL; if (PySys_Audit("open", "OCi", filename_obj, 'r', 0) < 0) { Py_DECREF(filename_bytes); return NULL; } errno = rl_read_init_file(PyBytes_AS_STRING(filename_bytes)); Py_DECREF(filename_bytes); } else { /* We have the choice to either try to exactly reproduce the * logic to find the filename, ignore it, or provide a dummy value. * In contract to the history file manipulations, there's no * clear default to choose. */ if (PySys_Audit("open", "sCi", "", 'r', 0) < 0) { return NULL; } errno = rl_read_init_file(NULL); } if (errno) return PyErr_SetFromErrno(PyExc_OSError); disable_bracketed_paste(); Py_RETURN_NONE; } /* Exported function to load a readline history file */ /*[clinic input] @critical_section readline.read_history_file filename as filename_obj: object = None / Load a readline history file. The default filename is ~/.history. [clinic start generated code]*/ static PyObject * readline_read_history_file_impl(PyObject *module, PyObject *filename_obj) /*[clinic end generated code: output=66a951836fb54fbb input=5d86fd7813172a67]*/ { PyObject *filename_bytes; if (filename_obj != Py_None) { if (!PyUnicode_FSConverter(filename_obj, &filename_bytes)) return NULL; if (PySys_Audit("open", "OCi", filename_obj, 'r', 0) < 0) { Py_DECREF(filename_bytes); return NULL; } errno = read_history(PyBytes_AS_STRING(filename_bytes)); Py_DECREF(filename_bytes); } else { /* Use the documented default filename here, * even though readline expands it different internally. */ if (PySys_Audit("open", "sCi", "~/.history", 'r', 0) < 0) { return NULL; } errno = read_history(NULL); } if (errno) return PyErr_SetFromErrno(PyExc_OSError); Py_RETURN_NONE; } static int _history_length = -1; /* do not truncate history by default */ /* Exported function to save a readline history file */ /*[clinic input] @critical_section readline.write_history_file filename as filename_obj: object = None / Save a readline history file. The default filename is ~/.history. [clinic start generated code]*/ static PyObject * readline_write_history_file_impl(PyObject *module, PyObject *filename_obj) /*[clinic end generated code: output=fbcad13d8ef59ae6 input=34aaada95120cfaa]*/ { PyObject *filename_bytes; const char *filename; int err; if (filename_obj != Py_None) { if (!PyUnicode_FSConverter(filename_obj, &filename_bytes)) return NULL; filename = PyBytes_AS_STRING(filename_bytes); if (PySys_Audit("open", "OCi", filename_obj, 'w', 0) < 0) { Py_DECREF(filename_bytes); return NULL; } } else { filename_bytes = NULL; filename = NULL; /* Use the documented default filename here, * even though readline expands it different internally. */ if (PySys_Audit("open", "sCi", "~/.history", 'w', 0) < 0) { return NULL; } } errno = err = write_history(filename); int history_length = FT_ATOMIC_LOAD_INT_RELAXED(_history_length); if (!err && history_length >= 0) history_truncate_file(filename, history_length); Py_XDECREF(filename_bytes); errno = err; if (errno) return PyErr_SetFromErrno(PyExc_OSError); Py_RETURN_NONE; } #ifdef HAVE_RL_APPEND_HISTORY /* Exported function to save part of a readline history file */ /*[clinic input] @critical_section readline.append_history_file nelements: int filename as filename_obj: object = None / Append the last nelements items of the history list to file. The default filename is ~/.history. [clinic start generated code]*/ static PyObject * readline_append_history_file_impl(PyObject *module, int nelements, PyObject *filename_obj) /*[clinic end generated code: output=5df06fc9da56e4e4 input=78a6061a8d3a0275]*/ { if (nelements < 0) { PyErr_SetString(PyExc_ValueError, "nelements must be non-negative"); return NULL; } PyObject *filename_bytes; const char *filename; int err; if (filename_obj != Py_None) { if (!PyUnicode_FSConverter(filename_obj, &filename_bytes)) return NULL; filename = PyBytes_AS_STRING(filename_bytes); if (PySys_Audit("open", "OCi", filename_obj, 'a', 0) < 0) { Py_DECREF(filename_bytes); return NULL; } } else { filename_bytes = NULL; filename = NULL; /* Use the documented default filename here, * even though readline expands it different internally. */ if (PySys_Audit("open", "sCi", "~/.history", 'a', 0) < 0) { return NULL; } } errno = err = append_history( nelements - libedit_append_replace_history_offset, filename); int history_length = FT_ATOMIC_LOAD_INT_RELAXED(_history_length); if (!err && history_length >= 0) history_truncate_file(filename, history_length); Py_XDECREF(filename_bytes); errno = err; if (errno) return PyErr_SetFromErrno(PyExc_OSError); Py_RETURN_NONE; } #endif /* Set history length */ /*[clinic input] readline.set_history_length length: int / Set the maximal number of lines which will be written to the history file. A negative length is used to inhibit history truncation. [clinic start generated code]*/ static PyObject * readline_set_history_length_impl(PyObject *module, int length) /*[clinic end generated code: output=e161a53e45987dc7 input=b8901bf16488b760]*/ { FT_ATOMIC_STORE_INT_RELAXED(_history_length, length); Py_RETURN_NONE; } /* Get history length */ /*[clinic input] readline.get_history_length Return the maximum number of lines that will be written to the history file. [clinic start generated code]*/ static PyObject * readline_get_history_length_impl(PyObject *module) /*[clinic end generated code: output=83a2eeae35b6d2b9 input=5dce2eeba4327817]*/ { int history_length = FT_ATOMIC_LOAD_INT_RELAXED(_history_length); return PyLong_FromLong(history_length); } /* Generic hook function setter */ static PyObject * set_hook(const char *funcname, PyObject **hook_var, PyObject *function) { if (function == Py_None) { Py_CLEAR(*hook_var); } else if (PyCallable_Check(function)) { Py_XSETREF(*hook_var, Py_NewRef(function)); } else { PyErr_Format(PyExc_TypeError, "set_%.50s(func): argument not callable", funcname); return NULL; } Py_RETURN_NONE; } /*[clinic input] @critical_section readline.set_completion_display_matches_hook function: object = None / Set or remove the completion display function. The function is called as function(substitution, [matches], longest_match_length) once each time matches need to be displayed. [clinic start generated code]*/ static PyObject * readline_set_completion_display_matches_hook_impl(PyObject *module, PyObject *function) /*[clinic end generated code: output=516e5cb8db75a328 input=ea4191e4a07d28d3]*/ { readlinestate *state = get_readline_state(module); PyObject *result = set_hook("completion_display_matches_hook", &state->completion_display_matches_hook, function); #ifdef HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK /* We cannot set this hook globally, since it replaces the default completion display. */ rl_completion_display_matches_hook = state->completion_display_matches_hook ? #if defined(HAVE_RL_COMPDISP_FUNC_T) (rl_compdisp_func_t *)on_completion_display_matches_hook : 0; #else (VFunction *)on_completion_display_matches_hook : 0; #endif #endif return result; } /*[clinic input] @critical_section readline.set_startup_hook function: object = None / Set or remove the function invoked by the rl_startup_hook callback. The function is called with no arguments just before readline prints the first prompt. [clinic start generated code]*/ static PyObject * readline_set_startup_hook_impl(PyObject *module, PyObject *function) /*[clinic end generated code: output=02cd0e0c4fa082ad input=11fce34992f1125e]*/ { readlinestate *state = get_readline_state(module); return set_hook("startup_hook", &state->startup_hook, function); } #ifdef HAVE_RL_PRE_INPUT_HOOK /* Set pre-input hook */ /*[clinic input] @critical_section readline.set_pre_input_hook function: object = None / Set or remove the function invoked by the rl_pre_input_hook callback. The function is called with no arguments after the first prompt has been printed and just before readline starts reading input characters. [clinic start generated code]*/ static PyObject * readline_set_pre_input_hook_impl(PyObject *module, PyObject *function) /*[clinic end generated code: output=fe1a96505096f464 input=96d3d5ff4a0c7c28]*/ { readlinestate *state = get_readline_state(module); return set_hook("pre_input_hook", &state->pre_input_hook, function); } /* Get pre-input hook */ /*[clinic input] readline.get_pre_input_hook Get the current pre-input hook function. [clinic start generated code]*/ static PyObject * readline_get_pre_input_hook_impl(PyObject *module) /*[clinic end generated code: output=ad56b77a8e8981ca input=fb1e1b1fbd94e4e5]*/ { readlinestate *state = get_readline_state(module); if (state->pre_input_hook == NULL) { Py_RETURN_NONE; } return Py_NewRef(state->pre_input_hook); } #endif /* Get the completion type for the scope of the tab-completion */ /*[clinic input] readline.get_completion_type Get the type of completion being attempted. [clinic start generated code]*/ static PyObject * readline_get_completion_type_impl(PyObject *module) /*[clinic end generated code: output=5c54d58a04997c07 input=04b92bc7a82dac91]*/ { return PyLong_FromLong(rl_completion_type); } /* Get the beginning index for the scope of the tab-completion */ /*[clinic input] readline.get_begidx Get the beginning index of the completion scope. [clinic start generated code]*/ static PyObject * readline_get_begidx_impl(PyObject *module) /*[clinic end generated code: output=362616ee8ed1b2b1 input=e083b81c8eb4bac3]*/ { readlinestate *state = get_readline_state(module); return Py_NewRef(state->begidx); } /* Get the ending index for the scope of the tab-completion */ /*[clinic input] readline.get_endidx Get the ending index of the completion scope. [clinic start generated code]*/ static PyObject * readline_get_endidx_impl(PyObject *module) /*[clinic end generated code: output=7f763350b12d7517 input=d4c7e34a625fd770]*/ { readlinestate *state = get_readline_state(module); return Py_NewRef(state->endidx); } /* Set the tab-completion word-delimiters that readline uses */ /*[clinic input] @critical_section readline.set_completer_delims string: object / Set the word delimiters for completion. [clinic start generated code]*/ static PyObject * readline_set_completer_delims_impl(PyObject *module, PyObject *string) /*[clinic end generated code: output=017e48e9704a2f64 input=6c87bb1cbed7fcf1]*/ { char *break_chars; PyObject *encoded = encode(string); if (encoded == NULL) { return NULL; } /* Keep a reference to the allocated memory in the module state in case some other module modifies rl_completer_word_break_characters (see issue #17289). */ break_chars = strdup(PyBytes_AS_STRING(encoded)); Py_DECREF(encoded); if (break_chars) { free(completer_word_break_characters); completer_word_break_characters = break_chars; #ifdef WITH_EDITLINE rl_basic_word_break_characters = break_chars; #else if (using_libedit_emulation) { rl_basic_word_break_characters = break_chars; } #endif rl_completer_word_break_characters = break_chars; Py_RETURN_NONE; } else return PyErr_NoMemory(); } /* _py_free_history_entry_lock_held: Utility function to free a history entry. */ #if defined(RL_READLINE_VERSION) && RL_READLINE_VERSION >= 0x0500 /* Readline version >= 5.0 introduced a timestamp field into the history entry structure; this needs to be freed to avoid a memory leak. This version of readline also introduced the handy 'free_history_entry' function, which takes care of the timestamp. */ static void _py_free_history_entry_lock_held(HIST_ENTRY *entry) { histdata_t data = free_history_entry(entry); free(data); } #else /* No free_history_entry function; free everything manually. */ static void _py_free_history_entry_lock_held(HIST_ENTRY *entry) { if (entry->line) free((void *)entry->line); if (entry->data) free(entry->data); free(entry); } #endif /*[clinic input] @critical_section readline.remove_history_item pos as entry_number: int / Remove history item given by its zero-based position. [clinic start generated code]*/ static PyObject * readline_remove_history_item_impl(PyObject *module, int entry_number) /*[clinic end generated code: output=ab114f029208c7e8 input=847d7cc7e7c25852]*/ { HIST_ENTRY *entry; if (entry_number < 0) { PyErr_SetString(PyExc_ValueError, "History index cannot be negative"); return NULL; } entry = remove_history(entry_number); if (!entry) { PyErr_Format(PyExc_ValueError, "No history item at position %d", entry_number); return NULL; } /* free memory allocated for the history entry */ _py_free_history_entry_lock_held(entry); Py_RETURN_NONE; } /*[clinic input] @critical_section readline.replace_history_item pos as entry_number: int line: unicode / Replaces history item given by its position with contents of line. pos is zero-based. [clinic start generated code]*/ static PyObject * readline_replace_history_item_impl(PyObject *module, int entry_number, PyObject *line) /*[clinic end generated code: output=f8cec2770ca125eb input=b44c8dcdc2dd87fe]*/ { PyObject *encoded; HIST_ENTRY *old_entry; if (entry_number < 0) { PyErr_SetString(PyExc_ValueError, "History index cannot be negative"); return NULL; } encoded = encode(line); if (encoded == NULL) { return NULL; } old_entry = replace_history_entry( entry_number + libedit_append_replace_history_offset, PyBytes_AS_STRING(encoded), (void *)NULL); Py_DECREF(encoded); if (!old_entry) { PyErr_Format(PyExc_ValueError, "No history item at position %d", entry_number); return NULL; } /* free memory allocated for the old history entry */ _py_free_history_entry_lock_held(old_entry); Py_RETURN_NONE; } /* Add a line to the history buffer */ /*[clinic input] @critical_section readline.add_history string: object / Add an item to the history buffer. [clinic start generated code]*/ static PyObject * readline_add_history_impl(PyObject *module, PyObject *string) /*[clinic end generated code: output=89047062042ac344 input=faa7053b8612513b]*/ { PyObject *encoded = encode(string); if (encoded == NULL) { return NULL; } add_history(PyBytes_AS_STRING(encoded)); Py_DECREF(encoded); Py_RETURN_NONE; } static int should_auto_add_history = 1; /* Enable or disable automatic history */ /*[clinic input] readline.set_auto_history enabled as _should_auto_add_history: bool / Enables or disables automatic history. [clinic start generated code]*/ static PyObject * readline_set_auto_history_impl(PyObject *module, int _should_auto_add_history) /*[clinic end generated code: output=619c6968246fd82b input=3d413073a1a03355]*/ { should_auto_add_history = _should_auto_add_history; Py_RETURN_NONE; } /* Get the tab-completion word-delimiters that readline uses */ /*[clinic input] @critical_section readline.get_completer_delims Get the word delimiters for completion. [clinic start generated code]*/ static PyObject * readline_get_completer_delims_impl(PyObject *module) /*[clinic end generated code: output=6b060280fa68ef43 input=80583cdf8176bcdd]*/ { return decode(rl_completer_word_break_characters); } /* Set the completer function */ /*[clinic input] @critical_section readline.set_completer function: object = None / Set or remove the completer function. The function is called as function(text, state), for state in 0, 1, 2, ..., until it returns a non-string. It should return the next possible completion starting with 'text'. [clinic start generated code]*/ static PyObject * readline_set_completer_impl(PyObject *module, PyObject *function) /*[clinic end generated code: output=171a2a60f81d3204 input=97f539d8d0bfcb95]*/ { readlinestate *state = get_readline_state(module); return set_hook("completer", &state->completer, function); } /*[clinic input] readline.get_completer Get the current completer function. [clinic start generated code]*/ static PyObject * readline_get_completer_impl(PyObject *module) /*[clinic end generated code: output=6e6bbd8226d14475 input=6457522e56d70d13]*/ { readlinestate *state = get_readline_state(module); if (state->completer == NULL) { Py_RETURN_NONE; } return Py_NewRef(state->completer); } /* Private function to get current length of history. XXX It may be * possible to replace this with a direct use of history_length instead, * but it's not clear whether BSD's libedit keeps history_length up to date. * See issue #8065.*/ static int _py_get_history_length_lock_held(void) { HISTORY_STATE *hist_st = history_get_history_state(); int length = hist_st->length; /* the history docs don't say so, but the address of hist_st changes each time history_get_history_state is called which makes me think it's freshly malloc'd memory... on the other hand, the address of the last line stays the same as long as history isn't extended, so it appears to be malloc'd but managed by the history package... */ free(hist_st); return length; } /* Exported function to get any element of history */ /*[clinic input] @critical_section readline.get_history_item index as idx: int / Return the current contents of history item at one-based index. [clinic start generated code]*/ static PyObject * readline_get_history_item_impl(PyObject *module, int idx) /*[clinic end generated code: output=83d3e53ea5f34b3d input=2835b50c7bde705f]*/ { HIST_ENTRY *hist_ent; if (using_libedit_emulation) { /* Older versions of libedit's readline emulation * use 0-based indexes, while readline and newer * versions of libedit use 1-based indexes. */ int length = _py_get_history_length_lock_held(); idx = idx - 1 + libedit_history_start; /* * Apple's readline emulation crashes when * the index is out of range, therefore * test for that and fail gracefully. */ if (idx < (0 + libedit_history_start) || idx >= (length + libedit_history_start)) { Py_RETURN_NONE; } } if ((hist_ent = history_get(idx))) return decode(hist_ent->line); else { Py_RETURN_NONE; } } /* Exported function to get current length of history */ /*[clinic input] @critical_section readline.get_current_history_length Return the current (not the maximum) length of history. [clinic start generated code]*/ static PyObject * readline_get_current_history_length_impl(PyObject *module) /*[clinic end generated code: output=436b294f12ba1e3f input=22e9fd0abbc2fd8d]*/ { return PyLong_FromLong((long)_py_get_history_length_lock_held()); } /* Exported function to read the current line buffer */ /*[clinic input] @critical_section readline.get_line_buffer Return the current contents of the line buffer. [clinic start generated code]*/ static PyObject * readline_get_line_buffer_impl(PyObject *module) /*[clinic end generated code: output=d22f9025ecad80e4 input=8e02e0fe081feece]*/ { return decode(rl_line_buffer); } #ifdef HAVE_RL_COMPLETION_APPEND_CHARACTER /* Exported function to clear the current history */ /*[clinic input] @critical_section readline.clear_history Clear the current readline history. [clinic start generated code]*/ static PyObject * readline_clear_history_impl(PyObject *module) /*[clinic end generated code: output=1f2dbb0dfa5d5ebb input=b2c6b11551593053]*/ { clear_history(); Py_RETURN_NONE; } #endif /* Exported function to insert text into the line buffer */ /*[clinic input] @critical_section readline.insert_text string: object / Insert text into the line buffer at the cursor position. [clinic start generated code]*/ static PyObject * readline_insert_text_impl(PyObject *module, PyObject *string) /*[clinic end generated code: output=4bf4e176f68750e0 input=2f401f4316df33c2]*/ { PyObject *encoded = encode(string); if (encoded == NULL) { return NULL; } rl_insert_text(PyBytes_AS_STRING(encoded)); Py_DECREF(encoded); Py_RETURN_NONE; } /* Redisplay the line buffer */ /*[clinic input] @permit_long_summary @critical_section readline.redisplay Change what's displayed on the screen to reflect contents of the line buffer. [clinic start generated code]*/ static PyObject * readline_redisplay_impl(PyObject *module) /*[clinic end generated code: output=a8b9725827c3c34b input=fb6ce76959c6f0ec]*/ { rl_redisplay(); Py_RETURN_NONE; } #include "clinic/readline.c.h" /* Table of functions exported by the module */ static struct PyMethodDef readline_methods[] = { READLINE_PARSE_AND_BIND_METHODDEF READLINE_GET_LINE_BUFFER_METHODDEF READLINE_INSERT_TEXT_METHODDEF READLINE_REDISPLAY_METHODDEF READLINE_READ_INIT_FILE_METHODDEF READLINE_READ_HISTORY_FILE_METHODDEF READLINE_WRITE_HISTORY_FILE_METHODDEF #ifdef HAVE_RL_APPEND_HISTORY READLINE_APPEND_HISTORY_FILE_METHODDEF #endif READLINE_GET_HISTORY_ITEM_METHODDEF READLINE_GET_CURRENT_HISTORY_LENGTH_METHODDEF READLINE_SET_HISTORY_LENGTH_METHODDEF READLINE_GET_HISTORY_LENGTH_METHODDEF READLINE_SET_COMPLETER_METHODDEF READLINE_GET_COMPLETER_METHODDEF READLINE_GET_COMPLETION_TYPE_METHODDEF READLINE_GET_BEGIDX_METHODDEF READLINE_GET_ENDIDX_METHODDEF READLINE_SET_COMPLETER_DELIMS_METHODDEF READLINE_SET_AUTO_HISTORY_METHODDEF READLINE_ADD_HISTORY_METHODDEF READLINE_REMOVE_HISTORY_ITEM_METHODDEF READLINE_REPLACE_HISTORY_ITEM_METHODDEF READLINE_GET_COMPLETER_DELIMS_METHODDEF READLINE_SET_COMPLETION_DISPLAY_MATCHES_HOOK_METHODDEF READLINE_SET_STARTUP_HOOK_METHODDEF #ifdef HAVE_RL_PRE_INPUT_HOOK READLINE_SET_PRE_INPUT_HOOK_METHODDEF READLINE_GET_PRE_INPUT_HOOK_METHODDEF #endif #ifdef HAVE_RL_COMPLETION_APPEND_CHARACTER READLINE_CLEAR_HISTORY_METHODDEF #endif {0, 0} }; /* C function to call the Python hooks. */ static int on_hook(PyObject *func) { int result = 0; if (func != NULL) { PyObject *r; r = PyObject_CallNoArgs(func); if (r == NULL) goto error; if (r == Py_None) result = 0; else { result = PyLong_AsInt(r); if (result == -1 && PyErr_Occurred()) goto error; } Py_DECREF(r); goto done; error: PyErr_Clear(); Py_XDECREF(r); done: return result; } return result; } static int #if defined(_RL_FUNCTION_TYPEDEF) || !defined(Py_RL_STARTUP_HOOK_TAKES_ARGS) on_startup_hook(void) #else on_startup_hook(const char *Py_UNUSED(text), int Py_UNUSED(state)) #endif { int r; PyGILState_STATE gilstate = PyGILState_Ensure(); readlinestate *state = get_hook_module_state(); if (state == NULL) { PyGILState_Release(gilstate); return -1; } r = on_hook(state->startup_hook); PyGILState_Release(gilstate); return r; } #ifdef HAVE_RL_PRE_INPUT_HOOK static int #if defined(_RL_FUNCTION_TYPEDEF) || !defined(Py_RL_STARTUP_HOOK_TAKES_ARGS) on_pre_input_hook(void) #else on_pre_input_hook(const char *Py_UNUSED(text), int Py_UNUSED(state)) #endif { int r; PyGILState_STATE gilstate = PyGILState_Ensure(); readlinestate *state = get_hook_module_state(); if (state == NULL) { PyGILState_Release(gilstate); return -1; } r = on_hook(state->pre_input_hook); PyGILState_Release(gilstate); return r; } #endif /* C function to call the Python completion_display_matches */ #ifdef HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK static void on_completion_display_matches_hook(char **matches, int num_matches, int max_length) { int i; PyObject *sub, *m=NULL, *s=NULL, *r=NULL; PyGILState_STATE gilstate = PyGILState_Ensure(); readlinestate *state = get_hook_module_state(); if (state == NULL) { PyGILState_Release(gilstate); return; } m = PyList_New(num_matches); if (m == NULL) goto error; for (i = 0; i < num_matches; i++) { s = decode(matches[i+1]); if (s == NULL) goto error; PyList_SET_ITEM(m, i, s); } sub = decode(matches[0]); r = PyObject_CallFunction(state->completion_display_matches_hook, "NNi", sub, m, max_length); m=NULL; if (r == NULL || (r != Py_None && PyLong_AsLong(r) == -1 && PyErr_Occurred())) { goto error; } Py_CLEAR(r); if (0) { error: PyErr_Clear(); Py_XDECREF(m); Py_XDECREF(r); } PyGILState_Release(gilstate); } #endif #ifdef HAVE_RL_RESIZE_TERMINAL static volatile sig_atomic_t sigwinch_received; static PyOS_sighandler_t sigwinch_ohandler; static void readline_sigwinch_handler(int signum) { sigwinch_received = 1; if (sigwinch_ohandler && sigwinch_ohandler != SIG_IGN && sigwinch_ohandler != SIG_DFL) sigwinch_ohandler(signum); #ifndef HAVE_SIGACTION /* If the handler was installed with signal() rather than sigaction(), we need to reinstall it. */ PyOS_setsig(SIGWINCH, readline_sigwinch_handler); #endif } #endif /* C function to call the Python completer. */ static char * on_completion(const char *text, int state) { char *result = NULL; PyGILState_STATE gilstate = PyGILState_Ensure(); readlinestate *module_state = get_hook_module_state(); if (module_state == NULL) { PyGILState_Release(gilstate); return NULL; } if (module_state->completer != NULL) { PyObject *r = NULL, *t; rl_attempted_completion_over = 1; t = decode(text); r = PyObject_CallFunction(module_state->completer, "Ni", t, state); if (r == NULL) goto error; if (r == Py_None) { result = NULL; } else { PyObject *encoded = encode(r); if (encoded == NULL) goto error; result = strdup(PyBytes_AS_STRING(encoded)); Py_DECREF(encoded); } Py_DECREF(r); goto done; error: PyErr_Clear(); Py_XDECREF(r); done: PyGILState_Release(gilstate); return result; } PyGILState_Release(gilstate); return result; } /* A more flexible constructor that saves the "begidx" and "endidx" * before calling the normal completer */ static char ** flex_complete(const char *text, int start, int end) { char **result; char saved; size_t start_size, end_size; wchar_t *s; PyGILState_STATE gilstate = PyGILState_Ensure(); readlinestate *state = get_hook_module_state(); #ifdef HAVE_RL_COMPLETION_APPEND_CHARACTER rl_completion_append_character ='\0'; #endif #ifdef HAVE_RL_COMPLETION_SUPPRESS_APPEND rl_completion_suppress_append = 0; #endif saved = rl_line_buffer[start]; rl_line_buffer[start] = 0; s = Py_DecodeLocale(rl_line_buffer, &start_size); rl_line_buffer[start] = saved; if (s == NULL) { goto done; } PyMem_RawFree(s); saved = rl_line_buffer[end]; rl_line_buffer[end] = 0; s = Py_DecodeLocale(rl_line_buffer + start, &end_size); rl_line_buffer[end] = saved; if (s == NULL) { goto done; } PyMem_RawFree(s); start = (int)start_size; end = start + (int)end_size; done: if (state) { Py_XDECREF(state->begidx); Py_XDECREF(state->endidx); state->begidx = PyLong_FromLong((long) start); state->endidx = PyLong_FromLong((long) end); } result = completion_matches((char *)text, *on_completion); PyGILState_Release(gilstate); return result; } /* Helper to initialize GNU readline properly. Return -1 on memory allocation failure, return 0 on success. */ static int setup_readline(readlinestate *mod_state) { #ifdef SAVE_LOCALE char *saved_locale = strdup(setlocale(LC_CTYPE, NULL)); if (!saved_locale) { return -1; } #endif /* The name must be defined before initialization */ rl_readline_name = "python"; /* the libedit readline emulation resets key bindings etc * when calling rl_initialize. So call it upfront */ if (using_libedit_emulation) rl_initialize(); /* Detect if libedit's readline emulation uses 0-based * indexing or 1-based indexing. */ add_history("1"); if (history_get(1) == NULL) { libedit_history_start = 0; } else { libedit_history_start = 1; } /* Some libedit implementations use 1 based indexing on * replace_history_entry where libreadline uses 0 based. * The API our module presents is supposed to be 0 based. * It's a mad mad mad mad world. */ { add_history("2"); HIST_ENTRY *old_entry = replace_history_entry(1, "X", NULL); _py_free_history_entry_lock_held(old_entry); HIST_ENTRY *item = history_get(libedit_history_start); if (item && item->line && strcmp(item->line, "X")) { libedit_append_replace_history_offset = 0; } else { libedit_append_replace_history_offset = 1; } } clear_history(); using_history(); /* Force rebind of TAB to insert-tab */ rl_bind_key('\t', rl_insert); /* Bind both ESC-TAB and ESC-ESC to the completion function */ rl_bind_key_in_map ('\t', rl_complete, emacs_meta_keymap); rl_bind_key_in_map ('\033', rl_complete, emacs_meta_keymap); #ifdef HAVE_RL_RESIZE_TERMINAL /* Set up signal handler for window resize */ sigwinch_ohandler = PyOS_setsig(SIGWINCH, readline_sigwinch_handler); #endif /* Set our hook functions */ rl_startup_hook = on_startup_hook; #ifdef HAVE_RL_PRE_INPUT_HOOK rl_pre_input_hook = on_pre_input_hook; #endif /* Set our completion function */ rl_attempted_completion_function = flex_complete; /* Set Python word break characters */ completer_word_break_characters = strdup(" \t\n`~!@#$%^&*()-=+[{]}\\|;:'\",<>/?"); /* All nonalphanums except '.' */ #ifdef WITH_EDITLINE // libedit uses rl_basic_word_break_characters instead of // rl_completer_word_break_characters as complete delimiter rl_basic_word_break_characters = completer_word_break_characters; #else if (using_libedit_emulation) { rl_basic_word_break_characters = completer_word_break_characters; } #endif rl_completer_word_break_characters = completer_word_break_characters; mod_state->begidx = PyLong_FromLong(0L); mod_state->endidx = PyLong_FromLong(0L); if (!using_libedit_emulation) { if (!isatty(STDOUT_FILENO)) { /* Issue #19884: stdout is not a terminal. Disable meta modifier keys to not write the ANSI sequence "\033[1034h" into stdout. On terminals supporting 8 bit characters like TERM=xterm-256color (which is now the default Fedora since Fedora 18), the meta key is used to enable support of 8 bit characters (ANSI sequence "\033[1034h"). With libedit, this call makes readline() crash. */ rl_variable_bind ("enable-meta-key", "off"); } } /* Initialize (allows .inputrc to override) * * XXX: A bug in the readline-2.2 library causes a memory leak * inside this function. Nothing we can do about it. */ if (using_libedit_emulation) rl_read_init_file(NULL); else rl_initialize(); disable_bracketed_paste(); RESTORE_LOCALE(saved_locale) return 0; } /* Wrapper around GNU readline that handles signals differently. */ static char *completed_input_string; static void rlhandler(char *text) { completed_input_string = text; rl_callback_handler_remove(); } static char * readline_until_enter_or_signal(const char *prompt, int *signal) { // Defined in Parser/myreadline.c extern PyThreadState *_PyOS_ReadlineTState; char * not_done_reading = ""; fd_set selectset; *signal = 0; #ifdef HAVE_RL_CATCH_SIGNAL rl_catch_signals = 0; #endif rl_callback_handler_install (prompt, rlhandler); FD_ZERO(&selectset); completed_input_string = not_done_reading; while (completed_input_string == not_done_reading) { int has_input = 0, err = 0; while (!has_input) { struct timeval timeout = {0, 100000}; // 100 ms (0.1 seconds) /* [Bug #1552726] Only limit the pause if an input hook has been defined. */ struct timeval *timeoutp = NULL; if (PyOS_InputHook) timeoutp = &timeout; #ifdef HAVE_RL_RESIZE_TERMINAL /* Update readline's view of the window size after SIGWINCH */ if (sigwinch_received) { sigwinch_received = 0; rl_resize_terminal(); } #endif FD_SET(fileno(rl_instream), &selectset); /* select resets selectset if no input was available */ has_input = select(fileno(rl_instream) + 1, &selectset, NULL, NULL, timeoutp); err = errno; if(PyOS_InputHook) PyOS_InputHook(); } if (has_input > 0) { rl_callback_read_char(); } else if (err == EINTR) { int s; PyEval_RestoreThread(_PyOS_ReadlineTState); s = PyErr_CheckSignals(); PyEval_SaveThread(); if (s < 0) { rl_free_line_state(); #if defined(RL_READLINE_VERSION) && RL_READLINE_VERSION >= 0x0700 rl_callback_sigcleanup(); #endif rl_cleanup_after_signal(); rl_callback_handler_remove(); *signal = 1; completed_input_string = NULL; } } } return completed_input_string; } static char * call_readline(FILE *sys_stdin, FILE *sys_stdout, const char *prompt) { size_t n; char *p; int signal; #ifdef SAVE_LOCALE char *saved_locale = strdup(setlocale(LC_CTYPE, NULL)); if (!saved_locale) Py_FatalError("not enough memory to save locale"); _Py_SetLocaleFromEnv(LC_CTYPE); #endif if (sys_stdin != rl_instream || sys_stdout != rl_outstream) { rl_instream = sys_stdin; rl_outstream = sys_stdout; #ifdef HAVE_RL_COMPLETION_APPEND_CHARACTER rl_prep_terminal (1); #endif } p = readline_until_enter_or_signal(prompt, &signal); /* we got an interrupt signal */ if (signal) { RESTORE_LOCALE(saved_locale) return NULL; } /* We got an EOF, return an empty string. */ if (p == NULL) { p = PyMem_RawMalloc(1); if (p != NULL) *p = '\0'; RESTORE_LOCALE(saved_locale) return p; } /* we have a valid line */ n = strlen(p); if (should_auto_add_history && n > 0) { const char *line; int length = _py_get_history_length_lock_held(); if (length > 0) { HIST_ENTRY *hist_ent; if (using_libedit_emulation) { /* handle older 0-based or newer 1-based indexing */ hist_ent = history_get(length + libedit_history_start - 1); } else hist_ent = history_get(length); line = hist_ent ? hist_ent->line : ""; } else line = ""; if (strcmp(p, line)) add_history(p); } /* Copy the malloc'ed buffer into a PyMem_Malloc'ed one and release the original. */ char *q = p; p = PyMem_RawMalloc(n+2); if (p != NULL) { memcpy(p, q, n); p[n] = '\n'; p[n+1] = '\0'; } free(q); RESTORE_LOCALE(saved_locale) return p; } /* Initialize the module */ PyDoc_STRVAR(doc_module, "Importing this module enables command line editing using GNU readline."); PyDoc_STRVAR(doc_module_le, "Importing this module enables command line editing using libedit readline."); static struct PyModuleDef readlinemodule = { PyModuleDef_HEAD_INIT, "readline", doc_module, sizeof(readlinestate), readline_methods, NULL, readline_traverse, readline_clear, readline_free }; PyMODINIT_FUNC PyInit_readline(void) { const char *backend = "readline"; PyObject *m; readlinestate *mod_state; if (strncmp(rl_library_version, libedit_version_tag, strlen(libedit_version_tag)) == 0) { using_libedit_emulation = 1; } if (using_libedit_emulation) { readlinemodule.m_doc = doc_module_le; backend = "editline"; } m = PyModule_Create(&readlinemodule); if (m == NULL) return NULL; #ifdef Py_GIL_DISABLED PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); #endif if (PyModule_AddIntConstant(m, "_READLINE_VERSION", RL_READLINE_VERSION) < 0) { goto error; } if (PyModule_AddIntConstant(m, "_READLINE_RUNTIME_VERSION", rl_readline_version) < 0) { goto error; } if (PyModule_AddStringConstant(m, "_READLINE_LIBRARY_VERSION", rl_library_version) < 0) { goto error; } if (PyModule_AddStringConstant(m, "backend", backend) < 0) { goto error; } mod_state = (readlinestate *) PyModule_GetState(m); if (mod_state == NULL){ goto error; } PyOS_ReadlineFunctionPointer = call_readline; if (setup_readline(mod_state) < 0) { PyErr_NoMemory(); goto error; } if (PyErr_Occurred()){ goto error; } return m; error: Py_DECREF(m); return NULL; } /* interpreters module */ /* low-level access to interpreter primitives */ #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 #endif #include "Python.h" #include "pycore_crossinterp.h" // _PyXIData_t #include "pycore_interp.h" // _PyInterpreterState_LookUpID() #include "pycore_pystate.h" // _PyInterpreterState_GetIDObject() #ifdef MS_WINDOWS #ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN #endif #include // SwitchToThread() #elif defined(HAVE_SCHED_H) #include // sched_yield() #endif #define REGISTERS_HEAP_TYPES #define HAS_FALLBACK #define HAS_UNBOUND_ITEMS #include "_interpreters_common.h" #undef HAS_UNBOUND_ITEMS #undef HAS_FALLBACK #undef REGISTERS_HEAP_TYPES /* This module has the following process-global state: _globals (static struct globals): mutex (PyMutex) module_count (int) channels (struct _channels): numopen (int64_t) next_id; (int64_t) mutex (PyThread_type_lock) head (linked list of struct _channelref *): cid (int64_t) objcount (Py_ssize_t) next (struct _channelref *): ... chan (struct _channel *): open (int) mutex (PyThread_type_lock) closing (struct _channel_closing *): ref (struct _channelref *): ... ends (struct _channelends *): numsendopen (int64_t) numrecvopen (int64_t) send (struct _channelend *): interpid (int64_t) open (int) next (struct _channelend *) recv (struct _channelend *): ... queue (struct _channelqueue *): count (int64_t) first (struct _channelitem *): next (struct _channelitem *): ... data (_PyXIData_t *): data (void *) obj (PyObject *) interpid (int64_t) new_object (xid_newobjfunc) free (xid_freefunc) last (struct _channelitem *): ... The above state includes the following allocations by the module: * 1 top-level mutex (to protect the rest of the state) * for each channel: * 1 struct _channelref * 1 struct _channel * 0-1 struct _channel_closing * 1 struct _channelends * 2 struct _channelend * 1 struct _channelqueue * for each item in each channel: * 1 struct _channelitem * 1 _PyXIData_t The only objects in that global state are the references held by each channel's queue, which are safely managed via the _PyXIData_*() API.. The module does not create any objects that are shared globally. */ #define MODULE_NAME _interpchannels #define MODULE_NAME_STR Py_STRINGIFY(MODULE_NAME) #define MODINIT_FUNC_NAME RESOLVE_MODINIT_FUNC_NAME(MODULE_NAME) #define GLOBAL_MALLOC(TYPE) \ PyMem_RawMalloc(sizeof(TYPE)) #define GLOBAL_FREE(VAR) \ PyMem_RawFree(VAR) #define XID_IGNORE_EXC 1 #define XID_FREE 2 static int _release_xid_data(_PyXIData_t *data, int flags) { int ignoreexc = flags & XID_IGNORE_EXC; PyObject *exc; if (ignoreexc) { exc = PyErr_GetRaisedException(); } int res; if (flags & XID_FREE) { res = _PyXIData_ReleaseAndRawFree(data); } else { res = _PyXIData_Release(data); } if (res < 0) { /* The owning interpreter is already destroyed. */ if (ignoreexc) { // XXX Emit a warning? PyErr_Clear(); } } if (flags & XID_FREE) { /* Either way, we free the data. */ } if (ignoreexc) { PyErr_SetRaisedException(exc); } return res; } static PyInterpreterState * _get_current_interp(void) { // PyInterpreterState_Get() aborts if lookup fails, so don't need // to check the result for NULL. return PyInterpreterState_Get(); } static PyObject * _get_current_module(void) { PyObject *name = PyUnicode_FromString(MODULE_NAME_STR); if (name == NULL) { return NULL; } PyObject *mod = PyImport_GetModule(name); Py_DECREF(name); if (mod == NULL) { return NULL; } assert(mod != Py_None); return mod; } static PyObject * get_module_from_owned_type(PyTypeObject *cls) { assert(cls != NULL); return _get_current_module(); // XXX Use the more efficient API now that we use heap types: //return PyType_GetModule(cls); } static struct PyModuleDef moduledef; static PyObject * get_module_from_type(PyTypeObject *cls) { assert(cls != NULL); return _get_current_module(); // XXX Use the more efficient API now that we use heap types: //return PyType_GetModuleByDef(cls, &moduledef); } static PyObject * add_new_exception(PyObject *mod, const char *name, PyObject *base) { assert(!PyObject_HasAttrStringWithError(mod, name)); PyObject *exctype = PyErr_NewException(name, base, NULL); if (exctype == NULL) { return NULL; } int res = PyModule_AddType(mod, (PyTypeObject *)exctype); if (res < 0) { Py_DECREF(exctype); return NULL; } return exctype; } #define ADD_NEW_EXCEPTION(MOD, NAME, BASE) \ add_new_exception(MOD, MODULE_NAME_STR "." Py_STRINGIFY(NAME), BASE) static int wait_for_lock(PyThread_type_lock mutex, PY_TIMEOUT_T timeout) { PyLockStatus res = PyThread_acquire_lock_timed_with_retries(mutex, timeout); if (res == PY_LOCK_INTR) { /* KeyboardInterrupt, etc. */ assert(PyErr_Occurred()); return -1; } else if (res == PY_LOCK_FAILURE) { assert(!PyErr_Occurred()); assert(timeout > 0); PyErr_SetString(PyExc_TimeoutError, "timed out"); return -1; } assert(res == PY_LOCK_ACQUIRED); PyThread_release_lock(mutex); return 0; } static int ensure_highlevel_module_loaded(void) { PyObject *highlevel = PyImport_ImportModule("concurrent.interpreters._channels"); if (highlevel == NULL) { PyErr_Clear(); highlevel = PyImport_ImportModule("test.support.channels"); if (highlevel == NULL) { return -1; } } Py_DECREF(highlevel); return 0; } /* module state *************************************************************/ typedef struct { /* Added at runtime by interpreters module. */ PyTypeObject *send_channel_type; PyTypeObject *recv_channel_type; /* heap types */ PyTypeObject *ChannelInfoType; PyTypeObject *ChannelIDType; /* exceptions */ PyObject *ChannelError; PyObject *ChannelNotFoundError; PyObject *ChannelClosedError; PyObject *ChannelEmptyError; PyObject *ChannelNotEmptyError; } module_state; static inline module_state * get_module_state(PyObject *mod) { assert(mod != NULL); module_state *state = PyModule_GetState(mod); assert(state != NULL); return state; } static module_state * _get_current_module_state(void) { PyObject *mod = _get_current_module(); if (mod == NULL) { mod = PyImport_ImportModule(MODULE_NAME_STR); if (mod == NULL) { return NULL; } } module_state *state = get_module_state(mod); Py_DECREF(mod); return state; } static int traverse_module_state(module_state *state, visitproc visit, void *arg) { /* external types */ Py_VISIT(state->send_channel_type); Py_VISIT(state->recv_channel_type); /* heap types */ Py_VISIT(state->ChannelInfoType); Py_VISIT(state->ChannelIDType); /* exceptions */ Py_VISIT(state->ChannelError); Py_VISIT(state->ChannelNotFoundError); Py_VISIT(state->ChannelClosedError); Py_VISIT(state->ChannelEmptyError); Py_VISIT(state->ChannelNotEmptyError); return 0; } static void clear_xid_types(module_state *state) { /* external types */ if (state->send_channel_type != NULL) { (void)clear_xid_class(state->send_channel_type); Py_CLEAR(state->send_channel_type); } if (state->recv_channel_type != NULL) { (void)clear_xid_class(state->recv_channel_type); Py_CLEAR(state->recv_channel_type); } /* heap types */ if (state->ChannelIDType != NULL) { (void)clear_xid_class(state->ChannelIDType); Py_CLEAR(state->ChannelIDType); } } static int clear_module_state(module_state *state) { clear_xid_types(state); /* heap types */ Py_CLEAR(state->ChannelInfoType); /* exceptions */ Py_CLEAR(state->ChannelError); Py_CLEAR(state->ChannelNotFoundError); Py_CLEAR(state->ChannelClosedError); Py_CLEAR(state->ChannelEmptyError); Py_CLEAR(state->ChannelNotEmptyError); return 0; } /* channel-specific code ****************************************************/ #define CHANNEL_SEND 1 #define CHANNEL_BOTH 0 #define CHANNEL_RECV -1 /* channel errors */ #define ERR_CHANNEL_NOT_FOUND -2 #define ERR_CHANNEL_CLOSED -3 #define ERR_CHANNEL_INTERP_CLOSED -4 #define ERR_CHANNEL_EMPTY -5 #define ERR_CHANNEL_NOT_EMPTY -6 #define ERR_CHANNEL_MUTEX_INIT -7 #define ERR_CHANNELS_MUTEX_INIT -8 #define ERR_NO_NEXT_CHANNEL_ID -9 #define ERR_CHANNEL_CLOSED_WAITING -10 static int exceptions_init(PyObject *mod) { module_state *state = get_module_state(mod); if (state == NULL) { return -1; } #define ADD(NAME, BASE) \ do { \ assert(state->NAME == NULL); \ state->NAME = ADD_NEW_EXCEPTION(mod, NAME, BASE); \ if (state->NAME == NULL) { \ return -1; \ } \ } while (0) // A channel-related operation failed. ADD(ChannelError, PyExc_RuntimeError); // An operation tried to use a channel that doesn't exist. ADD(ChannelNotFoundError, state->ChannelError); // An operation tried to use a closed channel. ADD(ChannelClosedError, state->ChannelError); // An operation tried to pop from an empty channel. ADD(ChannelEmptyError, state->ChannelError); // An operation tried to close a non-empty channel. ADD(ChannelNotEmptyError, state->ChannelError); #undef ADD return 0; } static int handle_channel_error(int err, PyObject *mod, int64_t cid) { if (err == 0) { assert(!PyErr_Occurred()); return 0; } assert(err < 0); module_state *state = get_module_state(mod); assert(state != NULL); if (err == ERR_CHANNEL_NOT_FOUND) { PyErr_Format(state->ChannelNotFoundError, "channel %" PRId64 " not found", cid); } else if (err == ERR_CHANNEL_CLOSED) { PyErr_Format(state->ChannelClosedError, "channel %" PRId64 " is closed", cid); } else if (err == ERR_CHANNEL_CLOSED_WAITING) { PyErr_Format(state->ChannelClosedError, "channel %" PRId64 " has closed", cid); } else if (err == ERR_CHANNEL_INTERP_CLOSED) { PyErr_Format(state->ChannelClosedError, "channel %" PRId64 " is already closed", cid); } else if (err == ERR_CHANNEL_EMPTY) { PyErr_Format(state->ChannelEmptyError, "channel %" PRId64 " is empty", cid); } else if (err == ERR_CHANNEL_NOT_EMPTY) { PyErr_Format(state->ChannelNotEmptyError, "channel %" PRId64 " may not be closed " "if not empty (try force=True)", cid); } else if (err == ERR_CHANNEL_MUTEX_INIT) { PyErr_SetString(state->ChannelError, "can't initialize mutex for new channel"); } else if (err == ERR_CHANNELS_MUTEX_INIT) { PyErr_SetString(state->ChannelError, "can't initialize mutex for channel management"); } else if (err == ERR_NO_NEXT_CHANNEL_ID) { PyErr_SetString(state->ChannelError, "failed to get a channel ID"); } else { assert(PyErr_Occurred()); } return 1; } /* the channel queue */ typedef uintptr_t _channelitem_id_t; typedef struct wait_info { PyThread_type_lock mutex; enum { WAITING_NO_STATUS = 0, WAITING_ACQUIRED = 1, WAITING_RELEASING = 2, WAITING_RELEASED = 3, } status; int received; _channelitem_id_t itemid; } _waiting_t; static int _waiting_init(_waiting_t *waiting) { PyThread_type_lock mutex = PyThread_allocate_lock(); if (mutex == NULL) { PyErr_NoMemory(); return -1; } *waiting = (_waiting_t){ .mutex = mutex, .status = WAITING_NO_STATUS, }; return 0; } static void _waiting_clear(_waiting_t *waiting) { assert(waiting->status != WAITING_ACQUIRED && waiting->status != WAITING_RELEASING); if (waiting->mutex != NULL) { PyThread_free_lock(waiting->mutex); waiting->mutex = NULL; } } static _channelitem_id_t _waiting_get_itemid(_waiting_t *waiting) { return waiting->itemid; } static void _waiting_acquire(_waiting_t *waiting) { assert(waiting->status == WAITING_NO_STATUS); PyThread_acquire_lock(waiting->mutex, NOWAIT_LOCK); waiting->status = WAITING_ACQUIRED; } static void _waiting_release(_waiting_t *waiting, int received) { assert(waiting->mutex != NULL); assert(waiting->status == WAITING_ACQUIRED); assert(!waiting->received); waiting->status = WAITING_RELEASING; if (waiting->received != received) { assert(received == 1); waiting->received = received; } waiting->status = WAITING_RELEASED; PyThread_release_lock(waiting->mutex); } static void _waiting_finish_releasing(_waiting_t *waiting) { while (waiting->status == WAITING_RELEASING) { #ifdef MS_WINDOWS SwitchToThread(); #elif defined(HAVE_SCHED_H) sched_yield(); #endif } } struct _channelitem; typedef struct _channelitem { /* The interpreter that added the item to the queue. The actual bound interpid is found in item->data. This is necessary because item->data might be NULL, meaning the interpreter has been destroyed. */ int64_t interpid; _PyXIData_t *data; _waiting_t *waiting; unboundop_t unboundop; struct _channelitem *next; } _channelitem; static inline _channelitem_id_t _channelitem_ID(_channelitem *item) { return (_channelitem_id_t)item; } static void _channelitem_init(_channelitem *item, int64_t interpid, _PyXIData_t *data, _waiting_t *waiting, unboundop_t unboundop) { if (interpid < 0) { interpid = _get_interpid(data); } else { assert(data == NULL || _PyXIData_INTERPID(data) < 0 || interpid == _PyXIData_INTERPID(data)); } *item = (_channelitem){ .interpid = interpid, .data = data, .waiting = waiting, .unboundop = unboundop, }; if (waiting != NULL) { waiting->itemid = _channelitem_ID(item); } } static void _channelitem_clear_data(_channelitem *item, int removed) { if (item->data != NULL) { // It was allocated in channel_send(). (void)_release_xid_data(item->data, XID_IGNORE_EXC | XID_FREE); item->data = NULL; } if (item->waiting != NULL && removed) { if (item->waiting->status == WAITING_ACQUIRED) { _waiting_release(item->waiting, 0); } item->waiting = NULL; } } static void _channelitem_clear(_channelitem *item) { item->next = NULL; _channelitem_clear_data(item, 1); } static _channelitem * _channelitem_new(int64_t interpid, _PyXIData_t *data, _waiting_t *waiting, unboundop_t unboundop) { _channelitem *item = GLOBAL_MALLOC(_channelitem); if (item == NULL) { PyErr_NoMemory(); return NULL; } _channelitem_init(item, interpid, data, waiting, unboundop); return item; } static void _channelitem_free(_channelitem *item) { _channelitem_clear(item); GLOBAL_FREE(item); } static void _channelitem_free_all(_channelitem *item) { while (item != NULL) { _channelitem *last = item; item = item->next; _channelitem_free(last); } } static void _channelitem_popped(_channelitem *item, _PyXIData_t **p_data, _waiting_t **p_waiting, int *p_unboundop) { assert(item->waiting == NULL || item->waiting->status == WAITING_ACQUIRED); *p_data = item->data; *p_waiting = item->waiting; *p_unboundop = item->unboundop; // We clear them here, so they won't be released in _channelitem_clear(). item->data = NULL; item->waiting = NULL; _channelitem_free(item); } static int _channelitem_clear_interpreter(_channelitem *item) { assert(item->interpid >= 0); if (item->data == NULL) { // Its interpreter was already cleared (or it was never bound). // For UNBOUND_REMOVE it should have been freed at that time. assert(item->unboundop != UNBOUND_REMOVE); return 0; } assert(_PyXIData_INTERPID(item->data) == item->interpid); switch (item->unboundop) { case UNBOUND_REMOVE: // The caller must free/clear it. return 1; case UNBOUND_ERROR: case UNBOUND_REPLACE: // We won't need the cross-interpreter data later // so we completely throw it away. _channelitem_clear_data(item, 0); return 0; default: Py_FatalError("not reachable"); return -1; } } typedef struct _channelqueue { int64_t count; _channelitem *first; _channelitem *last; } _channelqueue; static _channelqueue * _channelqueue_new(void) { _channelqueue *queue = GLOBAL_MALLOC(_channelqueue); if (queue == NULL) { PyErr_NoMemory(); return NULL; } queue->count = 0; queue->first = NULL; queue->last = NULL; return queue; } static void _channelqueue_clear(_channelqueue *queue) { _channelitem_free_all(queue->first); queue->count = 0; queue->first = NULL; queue->last = NULL; } static void _channelqueue_free(_channelqueue *queue) { _channelqueue_clear(queue); GLOBAL_FREE(queue); } static int _channelqueue_put(_channelqueue *queue, int64_t interpid, _PyXIData_t *data, _waiting_t *waiting, unboundop_t unboundop) { _channelitem *item = _channelitem_new(interpid, data, waiting, unboundop); if (item == NULL) { return -1; } queue->count += 1; if (queue->first == NULL) { queue->first = item; } else { queue->last->next = item; } queue->last = item; if (waiting != NULL) { _waiting_acquire(waiting); } return 0; } static int _channelqueue_get(_channelqueue *queue, _PyXIData_t **p_data, _waiting_t **p_waiting, int *p_unboundop) { _channelitem *item = queue->first; if (item == NULL) { return ERR_CHANNEL_EMPTY; } queue->first = item->next; if (queue->last == item) { queue->last = NULL; } queue->count -= 1; _channelitem_popped(item, p_data, p_waiting, p_unboundop); return 0; } static int _channelqueue_find(_channelqueue *queue, _channelitem_id_t itemid, _channelitem **p_item, _channelitem **p_prev) { _channelitem *prev = NULL; _channelitem *item = NULL; if (queue->first != NULL) { if (_channelitem_ID(queue->first) == itemid) { item = queue->first; } else { prev = queue->first; while (prev->next != NULL) { if (_channelitem_ID(prev->next) == itemid) { item = prev->next; break; } prev = prev->next; } if (item == NULL) { prev = NULL; } } } if (p_item != NULL) { *p_item = item; } if (p_prev != NULL) { *p_prev = prev; } return (item != NULL); } static void _channelqueue_remove(_channelqueue *queue, _channelitem_id_t itemid, _PyXIData_t **p_data, _waiting_t **p_waiting) { _channelitem *prev = NULL; _channelitem *item = NULL; int found = _channelqueue_find(queue, itemid, &item, &prev); if (!found) { return; } assert(item->waiting != NULL); assert(!item->waiting->received); if (prev == NULL) { assert(queue->first == item); queue->first = item->next; } else { assert(queue->first != item); assert(prev->next == item); prev->next = item->next; } item->next = NULL; if (queue->last == item) { queue->last = prev; } queue->count -= 1; unboundop_t unboundop; _channelitem_popped(item, p_data, p_waiting, &unboundop); } static void _channelqueue_clear_interpreter(_channelqueue *queue, int64_t interpid) { _channelitem *prev = NULL; _channelitem *next = queue->first; while (next != NULL) { _channelitem *item = next; next = item->next; int remove = (item->interpid == interpid) ? _channelitem_clear_interpreter(item) : 0; if (remove) { _channelitem_free(item); if (prev == NULL) { queue->first = next; } else { prev->next = next; } queue->count -= 1; } else { prev = item; } } } /* channel-interpreter associations */ struct _channelend; typedef struct _channelend { struct _channelend *next; int64_t interpid; int open; } _channelend; static _channelend * _channelend_new(int64_t interpid) { _channelend *end = GLOBAL_MALLOC(_channelend); if (end == NULL) { PyErr_NoMemory(); return NULL; } end->next = NULL; end->interpid = interpid; end->open = 1; return end; } static void _channelend_free(_channelend *end) { GLOBAL_FREE(end); } static void _channelend_free_all(_channelend *end) { while (end != NULL) { _channelend *last = end; end = end->next; _channelend_free(last); } } static _channelend * _channelend_find(_channelend *first, int64_t interpid, _channelend **pprev) { _channelend *prev = NULL; _channelend *end = first; while (end != NULL) { if (end->interpid == interpid) { break; } prev = end; end = end->next; } if (pprev != NULL) { *pprev = prev; } return end; } typedef struct _channelassociations { // Note that the list entries are never removed for interpreter // for which the channel is closed. This should not be a problem in // practice. Also, a channel isn't automatically closed when an // interpreter is destroyed. int64_t numsendopen; int64_t numrecvopen; _channelend *send; _channelend *recv; } _channelends; static _channelends * _channelends_new(void) { _channelends *ends = GLOBAL_MALLOC(_channelends); if (ends== NULL) { return NULL; } ends->numsendopen = 0; ends->numrecvopen = 0; ends->send = NULL; ends->recv = NULL; return ends; } static void _channelends_clear(_channelends *ends) { _channelend_free_all(ends->send); ends->send = NULL; ends->numsendopen = 0; _channelend_free_all(ends->recv); ends->recv = NULL; ends->numrecvopen = 0; } static void _channelends_free(_channelends *ends) { _channelends_clear(ends); GLOBAL_FREE(ends); } static _channelend * _channelends_add(_channelends *ends, _channelend *prev, int64_t interpid, int send) { _channelend *end = _channelend_new(interpid); if (end == NULL) { return NULL; } if (prev == NULL) { if (send) { ends->send = end; } else { ends->recv = end; } } else { prev->next = end; } if (send) { ends->numsendopen += 1; } else { ends->numrecvopen += 1; } return end; } static int _channelends_associate(_channelends *ends, int64_t interpid, int send) { _channelend *prev; _channelend *end = _channelend_find(send ? ends->send : ends->recv, interpid, &prev); if (end != NULL) { if (!end->open) { return ERR_CHANNEL_CLOSED; } // already associated return 0; } if (_channelends_add(ends, prev, interpid, send) == NULL) { return -1; } return 0; } static int _channelends_is_open(_channelends *ends) { if (ends->numsendopen != 0 || ends->numrecvopen != 0) { // At least one interpreter is still associated with the channel // (and hasn't been released). return 1; } // XXX This is wrong if an end can ever be removed. if (ends->send == NULL && ends->recv == NULL) { // The channel has never had any interpreters associated with it. return 1; } return 0; } static void _channelends_release_end(_channelends *ends, _channelend *end, int send) { end->open = 0; if (send) { ends->numsendopen -= 1; } else { ends->numrecvopen -= 1; } } static int _channelends_release_interpreter(_channelends *ends, int64_t interpid, int which) { _channelend *prev; _channelend *end; if (which >= 0) { // send/both end = _channelend_find(ends->send, interpid, &prev); if (end == NULL) { // never associated so add it end = _channelends_add(ends, prev, interpid, 1); if (end == NULL) { return -1; } } _channelends_release_end(ends, end, 1); } if (which <= 0) { // recv/both end = _channelend_find(ends->recv, interpid, &prev); if (end == NULL) { // never associated so add it end = _channelends_add(ends, prev, interpid, 0); if (end == NULL) { return -1; } } _channelends_release_end(ends, end, 0); } return 0; } static void _channelends_release_all(_channelends *ends, int which, int force) { // XXX Handle the ends. // XXX Handle force is True. // Ensure all the "send"-associated interpreters are closed. _channelend *end; for (end = ends->send; end != NULL; end = end->next) { _channelends_release_end(ends, end, 1); } // Ensure all the "recv"-associated interpreters are closed. for (end = ends->recv; end != NULL; end = end->next) { _channelends_release_end(ends, end, 0); } } static void _channelends_clear_interpreter(_channelends *ends, int64_t interpid) { // XXX Actually remove the entries? _channelend *end; end = _channelend_find(ends->send, interpid, NULL); if (end != NULL) { _channelends_release_end(ends, end, 1); } end = _channelend_find(ends->recv, interpid, NULL); if (end != NULL) { _channelends_release_end(ends, end, 0); } } /* each channel's state */ struct _channel; struct _channel_closing; static void _channel_clear_closing(struct _channel *); static void _channel_finish_closing(struct _channel *); typedef struct _channel { PyThread_type_lock mutex; _channelqueue *queue; _channelends *ends; struct _channeldefaults { unboundop_t unboundop; xidata_fallback_t fallback; } defaults; int open; struct _channel_closing *closing; } _channel_state; static _channel_state * _channel_new(PyThread_type_lock mutex, struct _channeldefaults defaults) { assert(check_unbound(defaults.unboundop)); _channel_state *chan = GLOBAL_MALLOC(_channel_state); if (chan == NULL) { return NULL; } chan->mutex = mutex; chan->queue = _channelqueue_new(); if (chan->queue == NULL) { GLOBAL_FREE(chan); return NULL; } chan->ends = _channelends_new(); if (chan->ends == NULL) { _channelqueue_free(chan->queue); GLOBAL_FREE(chan); return NULL; } chan->defaults = defaults; chan->open = 1; chan->closing = NULL; return chan; } static void _channel_free(_channel_state *chan) { _channel_clear_closing(chan); PyThread_acquire_lock(chan->mutex, WAIT_LOCK); _channelqueue_free(chan->queue); _channelends_free(chan->ends); PyThread_release_lock(chan->mutex); PyThread_free_lock(chan->mutex); GLOBAL_FREE(chan); } static int _channel_add(_channel_state *chan, int64_t interpid, _PyXIData_t *data, _waiting_t *waiting, unboundop_t unboundop) { int res = -1; PyThread_acquire_lock(chan->mutex, WAIT_LOCK); if (!chan->open) { res = ERR_CHANNEL_CLOSED; goto done; } if (_channelends_associate(chan->ends, interpid, 1) != 0) { res = ERR_CHANNEL_INTERP_CLOSED; goto done; } if (_channelqueue_put(chan->queue, interpid, data, waiting, unboundop) != 0) { goto done; } // Any errors past this point must cause a _waiting_release() call. res = 0; done: PyThread_release_lock(chan->mutex); return res; } static int _channel_next(_channel_state *chan, int64_t interpid, _PyXIData_t **p_data, _waiting_t **p_waiting, int *p_unboundop) { int err = 0; PyThread_acquire_lock(chan->mutex, WAIT_LOCK); if (!chan->open) { err = ERR_CHANNEL_CLOSED; goto done; } if (_channelends_associate(chan->ends, interpid, 0) != 0) { err = ERR_CHANNEL_INTERP_CLOSED; goto done; } int empty = _channelqueue_get(chan->queue, p_data, p_waiting, p_unboundop); assert(!PyErr_Occurred()); if (empty) { assert(empty == ERR_CHANNEL_EMPTY); if (chan->closing != NULL) { chan->open = 0; } err = ERR_CHANNEL_EMPTY; goto done; } done: PyThread_release_lock(chan->mutex); if (chan->queue->count == 0) { _channel_finish_closing(chan); } return err; } static void _channel_remove(_channel_state *chan, _channelitem_id_t itemid) { _PyXIData_t *data = NULL; _waiting_t *waiting = NULL; PyThread_acquire_lock(chan->mutex, WAIT_LOCK); _channelqueue_remove(chan->queue, itemid, &data, &waiting); PyThread_release_lock(chan->mutex); (void)_release_xid_data(data, XID_IGNORE_EXC | XID_FREE); if (waiting != NULL) { _waiting_release(waiting, 0); } if (chan->queue->count == 0) { _channel_finish_closing(chan); } } static int _channel_release_interpreter(_channel_state *chan, int64_t interpid, int end) { PyThread_acquire_lock(chan->mutex, WAIT_LOCK); int res = -1; if (!chan->open) { res = ERR_CHANNEL_CLOSED; goto done; } if (_channelends_release_interpreter(chan->ends, interpid, end) != 0) { goto done; } chan->open = _channelends_is_open(chan->ends); // XXX Clear the queue if not empty? // XXX Activate the "closing" mechanism? res = 0; done: PyThread_release_lock(chan->mutex); return res; } static int _channel_release_all(_channel_state *chan, int end, int force) { int res = -1; PyThread_acquire_lock(chan->mutex, WAIT_LOCK); if (!chan->open) { res = ERR_CHANNEL_CLOSED; goto done; } if (!force && chan->queue->count > 0) { res = ERR_CHANNEL_NOT_EMPTY; goto done; } // XXX Clear the queue? chan->open = 0; // We *could* also just leave these in place, since we've marked // the channel as closed already. _channelends_release_all(chan->ends, end, force); res = 0; done: PyThread_release_lock(chan->mutex); return res; } static void _channel_clear_interpreter(_channel_state *chan, int64_t interpid) { PyThread_acquire_lock(chan->mutex, WAIT_LOCK); _channelqueue_clear_interpreter(chan->queue, interpid); _channelends_clear_interpreter(chan->ends, interpid); chan->open = _channelends_is_open(chan->ends); PyThread_release_lock(chan->mutex); } /* the set of channels */ struct _channelref; typedef struct _channelref { int64_t cid; _channel_state *chan; struct _channelref *next; // The number of ChannelID objects referring to this channel. Py_ssize_t objcount; } _channelref; static _channelref * _channelref_new(int64_t cid, _channel_state *chan) { _channelref *ref = GLOBAL_MALLOC(_channelref); if (ref == NULL) { return NULL; } ref->cid = cid; ref->chan = chan; ref->next = NULL; ref->objcount = 0; return ref; } //static void //_channelref_clear(_channelref *ref) //{ // ref->cid = -1; // ref->chan = NULL; // ref->next = NULL; // ref->objcount = 0; //} static void _channelref_free(_channelref *ref) { if (ref->chan != NULL) { _channel_clear_closing(ref->chan); } //_channelref_clear(ref); GLOBAL_FREE(ref); } static _channelref * _channelref_find(_channelref *first, int64_t cid, _channelref **pprev) { _channelref *prev = NULL; _channelref *ref = first; while (ref != NULL) { if (ref->cid == cid) { break; } prev = ref; ref = ref->next; } if (pprev != NULL) { *pprev = prev; } return ref; } typedef struct _channels { PyThread_type_lock mutex; _channelref *head; int64_t numopen; int64_t next_id; } _channels; static void _channels_init(_channels *channels, PyThread_type_lock mutex) { assert(mutex != NULL); assert(channels->mutex == NULL); *channels = (_channels){ .mutex = mutex, .head = NULL, .numopen = 0, .next_id = 0, }; } static void _channels_fini(_channels *channels, PyThread_type_lock *p_mutex) { PyThread_type_lock mutex = channels->mutex; assert(mutex != NULL); PyThread_acquire_lock(mutex, WAIT_LOCK); assert(channels->numopen == 0); assert(channels->head == NULL); *channels = (_channels){0}; PyThread_release_lock(mutex); *p_mutex = mutex; } static int64_t _channels_next_id(_channels *channels) // needs lock { int64_t cid = channels->next_id; if (cid < 0) { /* overflow */ return -1; } channels->next_id += 1; return cid; } static int _channels_lookup(_channels *channels, int64_t cid, PyThread_type_lock *pmutex, _channel_state **res) { int err = -1; _channel_state *chan = NULL; PyThread_acquire_lock(channels->mutex, WAIT_LOCK); if (pmutex != NULL) { *pmutex = NULL; } _channelref *ref = _channelref_find(channels->head, cid, NULL); if (ref == NULL) { err = ERR_CHANNEL_NOT_FOUND; goto done; } if (ref->chan == NULL || !ref->chan->open) { err = ERR_CHANNEL_CLOSED; goto done; } if (pmutex != NULL) { // The mutex will be closed by the caller. *pmutex = channels->mutex; } chan = ref->chan; err = 0; done: if (pmutex == NULL || *pmutex == NULL) { PyThread_release_lock(channels->mutex); } *res = chan; return err; } static int64_t _channels_add(_channels *channels, _channel_state *chan) { int64_t cid = -1; PyThread_acquire_lock(channels->mutex, WAIT_LOCK); // Create a new ref. int64_t _cid = _channels_next_id(channels); if (_cid < 0) { cid = ERR_NO_NEXT_CHANNEL_ID; goto done; } _channelref *ref = _channelref_new(_cid, chan); if (ref == NULL) { goto done; } // Add it to the list. // We assume that the channel is a new one (not already in the list). ref->next = channels->head; channels->head = ref; channels->numopen += 1; cid = _cid; done: PyThread_release_lock(channels->mutex); return cid; } /* forward */ static int _channel_set_closing(_channelref *, PyThread_type_lock); static int _channels_close(_channels *channels, int64_t cid, _channel_state **pchan, int end, int force) { int res = -1; PyThread_acquire_lock(channels->mutex, WAIT_LOCK); if (pchan != NULL) { *pchan = NULL; } _channelref *ref = _channelref_find(channels->head, cid, NULL); if (ref == NULL) { res = ERR_CHANNEL_NOT_FOUND; goto done; } if (ref->chan == NULL) { res = ERR_CHANNEL_CLOSED; goto done; } else if (!force && end == CHANNEL_SEND && ref->chan->closing != NULL) { res = ERR_CHANNEL_CLOSED; goto done; } else { int err = _channel_release_all(ref->chan, end, force); if (err != 0) { if (end == CHANNEL_SEND && err == ERR_CHANNEL_NOT_EMPTY) { if (ref->chan->closing != NULL) { res = ERR_CHANNEL_CLOSED; goto done; } // Mark the channel as closing and return. The channel // will be cleaned up in _channel_next(). PyErr_Clear(); int err = _channel_set_closing(ref, channels->mutex); if (err != 0) { res = err; goto done; } if (pchan != NULL) { *pchan = ref->chan; } res = 0; } else { res = err; } goto done; } if (pchan != NULL) { *pchan = ref->chan; } else { _channel_free(ref->chan); } ref->chan = NULL; } res = 0; done: PyThread_release_lock(channels->mutex); return res; } static void _channels_remove_ref(_channels *channels, _channelref *ref, _channelref *prev, _channel_state **pchan) { if (ref == channels->head) { channels->head = ref->next; } else { prev->next = ref->next; } channels->numopen -= 1; if (pchan != NULL) { *pchan = ref->chan; } _channelref_free(ref); } static int _channels_remove(_channels *channels, int64_t cid, _channel_state **pchan) { int res = -1; PyThread_acquire_lock(channels->mutex, WAIT_LOCK); if (pchan != NULL) { *pchan = NULL; } _channelref *prev = NULL; _channelref *ref = _channelref_find(channels->head, cid, &prev); if (ref == NULL) { res = ERR_CHANNEL_NOT_FOUND; goto done; } _channels_remove_ref(channels, ref, prev, pchan); res = 0; done: PyThread_release_lock(channels->mutex); return res; } static int _channels_add_id_object(_channels *channels, int64_t cid) { int res = -1; PyThread_acquire_lock(channels->mutex, WAIT_LOCK); _channelref *ref = _channelref_find(channels->head, cid, NULL); if (ref == NULL) { res = ERR_CHANNEL_NOT_FOUND; goto done; } ref->objcount += 1; res = 0; done: PyThread_release_lock(channels->mutex); return res; } static void _channels_release_cid_object(_channels *channels, int64_t cid) { PyThread_acquire_lock(channels->mutex, WAIT_LOCK); _channelref *prev = NULL; _channelref *ref = _channelref_find(channels->head, cid, &prev); if (ref == NULL) { // Already destroyed. goto done; } ref->objcount -= 1; // Destroy if no longer used. if (ref->objcount == 0) { _channel_state *chan = NULL; _channels_remove_ref(channels, ref, prev, &chan); if (chan != NULL) { _channel_free(chan); } } done: PyThread_release_lock(channels->mutex); } struct channel_id_and_info { int64_t id; struct _channeldefaults defaults; }; static struct channel_id_and_info * _channels_list_all(_channels *channels, int64_t *count) { struct channel_id_and_info *cids = NULL; PyThread_acquire_lock(channels->mutex, WAIT_LOCK); struct channel_id_and_info *ids = PyMem_NEW(struct channel_id_and_info, (Py_ssize_t)(channels->numopen)); if (ids == NULL) { goto done; } _channelref *ref = channels->head; for (int64_t i=0; ref != NULL; ref = ref->next, i++) { ids[i] = (struct channel_id_and_info){ .id = ref->cid, .defaults = ref->chan->defaults, }; } *count = channels->numopen; cids = ids; done: PyThread_release_lock(channels->mutex); return cids; } static void _channels_clear_interpreter(_channels *channels, int64_t interpid) { PyThread_acquire_lock(channels->mutex, WAIT_LOCK); _channelref *ref = channels->head; for (; ref != NULL; ref = ref->next) { if (ref->chan != NULL) { _channel_clear_interpreter(ref->chan, interpid); } } PyThread_release_lock(channels->mutex); } /* support for closing non-empty channels */ struct _channel_closing { _channelref *ref; }; static int _channel_set_closing(_channelref *ref, PyThread_type_lock mutex) { _channel_state *chan = ref->chan; if (chan == NULL) { // already closed return 0; } int res = -1; PyThread_acquire_lock(chan->mutex, WAIT_LOCK); if (chan->closing != NULL) { res = ERR_CHANNEL_CLOSED; goto done; } chan->closing = GLOBAL_MALLOC(struct _channel_closing); if (chan->closing == NULL) { goto done; } chan->closing->ref = ref; res = 0; done: PyThread_release_lock(chan->mutex); return res; } static void _channel_clear_closing(_channel_state *chan) { PyThread_acquire_lock(chan->mutex, WAIT_LOCK); if (chan->closing != NULL) { GLOBAL_FREE(chan->closing); chan->closing = NULL; } PyThread_release_lock(chan->mutex); } static void _channel_finish_closing(_channel_state *chan) { struct _channel_closing *closing = chan->closing; if (closing == NULL) { return; } _channelref *ref = closing->ref; _channel_clear_closing(chan); // Do the things that would have been done in _channels_close(). ref->chan = NULL; _channel_free(chan); } /* "high"-level channel-related functions */ // Create a new channel. static int64_t channel_create(_channels *channels, struct _channeldefaults defaults) { PyThread_type_lock mutex = PyThread_allocate_lock(); if (mutex == NULL) { return ERR_CHANNEL_MUTEX_INIT; } _channel_state *chan = _channel_new(mutex, defaults); if (chan == NULL) { PyThread_free_lock(mutex); return -1; } int64_t cid = _channels_add(channels, chan); if (cid < 0) { _channel_free(chan); } return cid; } // Completely destroy the channel. static int channel_destroy(_channels *channels, int64_t cid) { _channel_state *chan = NULL; int err = _channels_remove(channels, cid, &chan); if (err != 0) { return err; } if (chan != NULL) { _channel_free(chan); } return 0; } // Push an object onto the channel. // The current interpreter gets associated with the send end of the channel. // Optionally request to be notified when it is received. static int channel_send(_channels *channels, int64_t cid, PyObject *obj, _waiting_t *waiting, unboundop_t unboundop, xidata_fallback_t fallback) { PyThreadState *tstate = _PyThreadState_GET(); PyInterpreterState *interp = tstate->interp; int64_t interpid = PyInterpreterState_GetID(interp); // Look up the channel. PyThread_type_lock mutex = NULL; _channel_state *chan = NULL; int err = _channels_lookup(channels, cid, &mutex, &chan); if (err != 0) { return err; } assert(chan != NULL); // Past this point we are responsible for releasing the mutex. if (chan->closing != NULL) { PyThread_release_lock(mutex); return ERR_CHANNEL_CLOSED; } // Convert the object to cross-interpreter data. _PyXIData_t *data = _PyXIData_New(); if (data == NULL) { PyThread_release_lock(mutex); return -1; } if (_PyObject_GetXIData(tstate, obj, fallback, data) != 0) { PyThread_release_lock(mutex); GLOBAL_FREE(data); return -1; } // Add the data to the channel. int res = _channel_add(chan, interpid, data, waiting, unboundop); PyThread_release_lock(mutex); if (res != 0) { // We may chain an exception here: (void)_release_xid_data(data, 0); GLOBAL_FREE(data); return res; } return 0; } // Basically, un-send an object. static void channel_clear_sent(_channels *channels, int64_t cid, _waiting_t *waiting) { // Look up the channel. PyThread_type_lock mutex = NULL; _channel_state *chan = NULL; int err = _channels_lookup(channels, cid, &mutex, &chan); if (err != 0) { // The channel was already closed, etc. assert(waiting->status == WAITING_RELEASED); return; // Ignore the error. } assert(chan != NULL); // Past this point we are responsible for releasing the mutex. _channelitem_id_t itemid = _waiting_get_itemid(waiting); _channel_remove(chan, itemid); PyThread_release_lock(mutex); } // Like channel_send(), but strictly wait for the object to be received. static int channel_send_wait(_channels *channels, int64_t cid, PyObject *obj, unboundop_t unboundop, PY_TIMEOUT_T timeout, xidata_fallback_t fallback) { // We use a stack variable here, so we must ensure that &waiting // is not held by any channel item at the point this function exits. _waiting_t waiting; if (_waiting_init(&waiting) < 0) { assert(PyErr_Occurred()); return -1; } /* Queue up the object. */ int res = channel_send(channels, cid, obj, &waiting, unboundop, fallback); if (res < 0) { assert(waiting.status == WAITING_NO_STATUS); goto finally; } /* Wait until the object is received. */ if (wait_for_lock(waiting.mutex, timeout) < 0) { assert(PyErr_Occurred()); _waiting_finish_releasing(&waiting); /* The send() call is failing now, so make sure the item won't be received. */ channel_clear_sent(channels, cid, &waiting); assert(waiting.status == WAITING_RELEASED); if (!waiting.received) { res = -1; goto finally; } // XXX Emit a warning if not a TimeoutError? PyErr_Clear(); } else { _waiting_finish_releasing(&waiting); assert(waiting.status == WAITING_RELEASED); if (!waiting.received) { res = ERR_CHANNEL_CLOSED_WAITING; goto finally; } } /* success! */ res = 0; finally: _waiting_clear(&waiting); return res; } // Pop the next object off the channel. Fail if empty. // The current interpreter gets associated with the recv end of the channel. // XXX Support a "wait" mutex? static int channel_recv(_channels *channels, int64_t cid, PyObject **res, int *p_unboundop) { int err; *res = NULL; PyInterpreterState *interp = _get_current_interp(); if (interp == NULL) { // XXX Is this always an error? if (PyErr_Occurred()) { return -1; } return 0; } int64_t interpid = PyInterpreterState_GetID(interp); // Look up the channel. PyThread_type_lock mutex = NULL; _channel_state *chan = NULL; err = _channels_lookup(channels, cid, &mutex, &chan); if (err != 0) { return err; } assert(chan != NULL); // Past this point we are responsible for releasing the mutex. // Pop off the next item from the channel. _PyXIData_t *data = NULL; _waiting_t *waiting = NULL; err = _channel_next(chan, interpid, &data, &waiting, p_unboundop); PyThread_release_lock(mutex); if (err != 0) { return err; } else if (data == NULL) { // The item was unbound. assert(!PyErr_Occurred()); *res = NULL; return 0; } // Convert the data back to an object. PyObject *obj = _PyXIData_NewObject(data); if (obj == NULL) { assert(PyErr_Occurred()); // It was allocated in channel_send(), so we free it. (void)_release_xid_data(data, XID_IGNORE_EXC | XID_FREE); if (waiting != NULL) { _waiting_release(waiting, 0); } return -1; } // It was allocated in channel_send(), so we free it. int release_res = _release_xid_data(data, XID_FREE); if (release_res < 0) { // The source interpreter has been destroyed already. assert(PyErr_Occurred()); Py_DECREF(obj); if (waiting != NULL) { _waiting_release(waiting, 0); } return -1; } // Notify the sender. if (waiting != NULL) { _waiting_release(waiting, 1); } *res = obj; return 0; } // Disallow send/recv for the current interpreter. // The channel is marked as closed if no other interpreters // are currently associated. static int channel_release(_channels *channels, int64_t cid, int send, int recv) { PyInterpreterState *interp = _get_current_interp(); if (interp == NULL) { return -1; } int64_t interpid = PyInterpreterState_GetID(interp); // Look up the channel. PyThread_type_lock mutex = NULL; _channel_state *chan = NULL; int err = _channels_lookup(channels, cid, &mutex, &chan); if (err != 0) { return err; } // Past this point we are responsible for releasing the mutex. // Close one or both of the two ends. int res = _channel_release_interpreter(chan, interpid, send-recv); PyThread_release_lock(mutex); return res; } // Close the channel (for all interpreters). Fail if it's already closed. // Close immediately if it's empty. Otherwise, disallow sending and // finally close once empty. Optionally, immediately clear and close it. static int channel_close(_channels *channels, int64_t cid, int end, int force) { return _channels_close(channels, cid, NULL, end, force); } // Return true if the identified interpreter is associated // with the given end of the channel. static int channel_is_associated(_channels *channels, int64_t cid, int64_t interpid, int send) { _channel_state *chan = NULL; int err = _channels_lookup(channels, cid, NULL, &chan); if (err != 0) { return err; } else if (send && chan->closing != NULL) { return ERR_CHANNEL_CLOSED; } _channelend *end = _channelend_find(send ? chan->ends->send : chan->ends->recv, interpid, NULL); return (end != NULL && end->open); } static int channel_get_defaults(_channels *channels, int64_t cid, struct _channeldefaults *defaults) { PyThread_type_lock mutex = NULL; _channel_state *channel = NULL; int err = _channels_lookup(channels, cid, &mutex, &channel); if (err != 0) { return err; } *defaults = channel->defaults; PyThread_release_lock(mutex); return 0; } static int _channel_get_count(_channels *channels, int64_t cid, Py_ssize_t *p_count) { PyThread_type_lock mutex = NULL; _channel_state *chan = NULL; int err = _channels_lookup(channels, cid, &mutex, &chan); if (err != 0) { return err; } assert(chan != NULL); int64_t count = chan->queue->count; PyThread_release_lock(mutex); *p_count = (Py_ssize_t)count; return 0; } /* channel info */ struct channel_info { struct { // 1: closed; -1: closing int closed; struct { Py_ssize_t nsend_only; // not released Py_ssize_t nsend_only_released; Py_ssize_t nrecv_only; // not released Py_ssize_t nrecv_only_released; Py_ssize_t nboth; // not released Py_ssize_t nboth_released; Py_ssize_t nboth_send_released; Py_ssize_t nboth_recv_released; } all; struct { // 1: associated; -1: released int send; int recv; } cur; } status; int64_t count; }; static int _channel_get_info(_channels *channels, int64_t cid, struct channel_info *info) { int err = 0; *info = (struct channel_info){0}; // Get the current interpreter. PyInterpreterState *interp = _get_current_interp(); if (interp == NULL) { return -1; } int64_t interpid = PyInterpreterState_GetID(interp); // Hold the global lock until we're done. PyThread_acquire_lock(channels->mutex, WAIT_LOCK); // Find the channel. _channelref *ref = _channelref_find(channels->head, cid, NULL); if (ref == NULL) { err = ERR_CHANNEL_NOT_FOUND; goto finally; } _channel_state *chan = ref->chan; // Check if open. if (chan == NULL) { info->status.closed = 1; goto finally; } if (!chan->open) { assert(chan->queue->count == 0); info->status.closed = 1; goto finally; } if (chan->closing != NULL) { assert(chan->queue->count > 0); info->status.closed = -1; } else { info->status.closed = 0; } // Get the number of queued objects. info->count = chan->queue->count; // Get the ends statuses. assert(info->status.cur.send == 0); assert(info->status.cur.recv == 0); _channelend *send = chan->ends->send; while (send != NULL) { if (send->interpid == interpid) { info->status.cur.send = send->open ? 1 : -1; } if (send->open) { info->status.all.nsend_only += 1; } else { info->status.all.nsend_only_released += 1; } send = send->next; } _channelend *recv = chan->ends->recv; while (recv != NULL) { if (recv->interpid == interpid) { info->status.cur.recv = recv->open ? 1 : -1; } // XXX This is O(n*n). Why do we have 2 linked lists? _channelend *send = chan->ends->send; while (send != NULL) { if (send->interpid == recv->interpid) { break; } send = send->next; } if (send == NULL) { if (recv->open) { info->status.all.nrecv_only += 1; } else { info->status.all.nrecv_only_released += 1; } } else { if (recv->open) { if (send->open) { info->status.all.nboth += 1; info->status.all.nsend_only -= 1; } else { info->status.all.nboth_recv_released += 1; info->status.all.nsend_only_released -= 1; } } else { if (send->open) { info->status.all.nboth_send_released += 1; info->status.all.nsend_only -= 1; } else { info->status.all.nboth_released += 1; info->status.all.nsend_only_released -= 1; } } } recv = recv->next; } finally: PyThread_release_lock(channels->mutex); return err; } PyDoc_STRVAR(channel_info_doc, "ChannelInfo\n\ \n\ A named tuple of a channel's state."); static PyStructSequence_Field channel_info_fields[] = { {"open", "both ends are open"}, {"closing", "send is closed, recv is non-empty"}, {"closed", "both ends are closed"}, {"count", "queued objects"}, {"num_interp_send", "interpreters bound to the send end"}, {"num_interp_send_released", "interpreters bound to the send end and released"}, {"num_interp_recv", "interpreters bound to the send end"}, {"num_interp_recv_released", "interpreters bound to the send end and released"}, {"num_interp_both", "interpreters bound to both ends"}, {"num_interp_both_released", "interpreters bound to both ends and released_from_both"}, {"num_interp_both_send_released", "interpreters bound to both ends and released_from_the send end"}, {"num_interp_both_recv_released", "interpreters bound to both ends and released_from_the recv end"}, {"send_associated", "current interpreter is bound to the send end"}, {"send_released", "current interpreter *was* bound to the send end"}, {"recv_associated", "current interpreter is bound to the recv end"}, {"recv_released", "current interpreter *was* bound to the recv end"}, {0} }; static PyStructSequence_Desc channel_info_desc = { .name = MODULE_NAME_STR ".ChannelInfo", .doc = channel_info_doc, .fields = channel_info_fields, .n_in_sequence = 8, }; static PyObject * new_channel_info(PyObject *mod, struct channel_info *info) { module_state *state = get_module_state(mod); if (state == NULL) { return NULL; } assert(state->ChannelInfoType != NULL); PyObject *self = PyStructSequence_New(state->ChannelInfoType); if (self == NULL) { return NULL; } int pos = 0; #define SET_BOOL(val) \ PyStructSequence_SET_ITEM(self, pos++, \ Py_NewRef(val ? Py_True : Py_False)) #define SET_COUNT(val) \ do { \ PyObject *obj = PyLong_FromLongLong(val); \ if (obj == NULL) { \ Py_CLEAR(self); \ return NULL; \ } \ PyStructSequence_SET_ITEM(self, pos++, obj); \ } while(0) SET_BOOL(info->status.closed == 0); SET_BOOL(info->status.closed == -1); SET_BOOL(info->status.closed == 1); SET_COUNT(info->count); SET_COUNT(info->status.all.nsend_only); SET_COUNT(info->status.all.nsend_only_released); SET_COUNT(info->status.all.nrecv_only); SET_COUNT(info->status.all.nrecv_only_released); SET_COUNT(info->status.all.nboth); SET_COUNT(info->status.all.nboth_released); SET_COUNT(info->status.all.nboth_send_released); SET_COUNT(info->status.all.nboth_recv_released); SET_BOOL(info->status.cur.send == 1); SET_BOOL(info->status.cur.send == -1); SET_BOOL(info->status.cur.recv == 1); SET_BOOL(info->status.cur.recv == -1); #undef SET_COUNT #undef SET_BOOL assert(!PyErr_Occurred()); return self; } /* ChannelID class */ typedef struct channelid { PyObject_HEAD int64_t cid; int end; int resolve; _channels *channels; } channelid; #define channelid_CAST(op) ((channelid *)(op)) struct channel_id_converter_data { PyObject *module; int64_t cid; int end; }; static int channel_id_converter(PyObject *arg, void *ptr) { int64_t cid; int end = 0; struct channel_id_converter_data *data = ptr; module_state *state = get_module_state(data->module); assert(state != NULL); if (PyObject_TypeCheck(arg, state->ChannelIDType)) { cid = ((channelid *)arg)->cid; end = ((channelid *)arg)->end; } else if (PyIndex_Check(arg)) { cid = PyLong_AsLongLong(arg); if (cid == -1 && PyErr_Occurred()) { return 0; } if (cid < 0) { PyErr_Format(PyExc_ValueError, "channel ID must be a non-negative int, got %R", arg); return 0; } } else { PyErr_Format(PyExc_TypeError, "channel ID must be an int, got %.100s", Py_TYPE(arg)->tp_name); return 0; } data->cid = cid; data->end = end; return 1; } static int newchannelid(PyTypeObject *cls, int64_t cid, int end, _channels *channels, int force, int resolve, channelid **res) { *res = NULL; channelid *self = PyObject_New(channelid, cls); if (self == NULL) { return -1; } self->cid = cid; self->end = end; self->resolve = resolve; self->channels = channels; int err = _channels_add_id_object(channels, cid); if (err != 0) { if (force && err == ERR_CHANNEL_NOT_FOUND) { assert(!PyErr_Occurred()); } else { Py_DECREF((PyObject *)self); return err; } } *res = self; return 0; } static _channels * _global_channels(void); static PyObject * _channelid_new(PyObject *mod, PyTypeObject *cls, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"id", "send", "recv", "force", "_resolve", NULL}; int64_t cid; int end; struct channel_id_converter_data cid_data = { .module = mod, }; int send = -1; int recv = -1; int force = 0; int resolve = 0; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|$pppp:ChannelID.__new__", kwlist, channel_id_converter, &cid_data, &send, &recv, &force, &resolve)) { return NULL; } cid = cid_data.cid; end = cid_data.end; // Handle "send" and "recv". if (send == 0 && recv == 0) { PyErr_SetString(PyExc_ValueError, "'send' and 'recv' cannot both be False"); return NULL; } else if (send == 1) { if (recv == 0 || recv == -1) { end = CHANNEL_SEND; } else { assert(recv == 1); end = 0; } } else if (recv == 1) { assert(send == 0 || send == -1); end = CHANNEL_RECV; } PyObject *cidobj = NULL; int err = newchannelid(cls, cid, end, _global_channels(), force, resolve, (channelid **)&cidobj); if (handle_channel_error(err, mod, cid)) { assert(cidobj == NULL); return NULL; } assert(cidobj != NULL); return cidobj; } static void channelid_dealloc(PyObject *op) { channelid *self = channelid_CAST(op); int64_t cid = self->cid; _channels *channels = self->channels; PyTypeObject *tp = Py_TYPE(self); tp->tp_free(self); /* "Instances of heap-allocated types hold a reference to their type." * See: https://docs.python.org/3.11/howto/isolating-extensions.html#garbage-collection-protocol * See: https://docs.python.org/3.11/c-api/typeobj.html#c.PyTypeObject.tp_traverse */ // XXX Why don't we implement Py_TPFLAGS_HAVE_GC, e.g. Py_tp_traverse, // like we do for _abc._abc_data? Py_DECREF(tp); _channels_release_cid_object(channels, cid); } static PyObject * channelid_repr(PyObject *self) { PyTypeObject *type = Py_TYPE(self); const char *name = _PyType_Name(type); channelid *cidobj = channelid_CAST(self); const char *fmt; if (cidobj->end == CHANNEL_SEND) { fmt = "%s(%" PRId64 ", send=True)"; } else if (cidobj->end == CHANNEL_RECV) { fmt = "%s(%" PRId64 ", recv=True)"; } else { fmt = "%s(%" PRId64 ")"; } return PyUnicode_FromFormat(fmt, name, cidobj->cid); } static PyObject * channelid_str(PyObject *self) { channelid *cidobj = channelid_CAST(self); return PyUnicode_FromFormat("%" PRId64 "", cidobj->cid); } static PyObject * channelid_int(PyObject *self) { channelid *cidobj = channelid_CAST(self); return PyLong_FromLongLong(cidobj->cid); } static Py_hash_t channelid_hash(PyObject *self) { channelid *cidobj = channelid_CAST(self); PyObject *pyid = PyLong_FromLongLong(cidobj->cid); if (pyid == NULL) { return -1; } Py_hash_t hash = PyObject_Hash(pyid); Py_DECREF(pyid); return hash; } static PyObject * channelid_richcompare(PyObject *self, PyObject *other, int op) { PyObject *res = NULL; if (op != Py_EQ && op != Py_NE) { Py_RETURN_NOTIMPLEMENTED; } PyObject *mod = get_module_from_type(Py_TYPE(self)); if (mod == NULL) { return NULL; } module_state *state = get_module_state(mod); if (state == NULL) { goto done; } if (!PyObject_TypeCheck(self, state->ChannelIDType)) { res = Py_NewRef(Py_NotImplemented); goto done; } channelid *cidobj = channelid_CAST(self); int equal; if (PyObject_TypeCheck(other, state->ChannelIDType)) { channelid *othercidobj = (channelid *)other; // fast safe cast equal = (cidobj->end == othercidobj->end) && (cidobj->cid == othercidobj->cid); } else if (PyLong_Check(other)) { /* Fast path */ int overflow; long long othercid = PyLong_AsLongLongAndOverflow(other, &overflow); if (othercid == -1 && PyErr_Occurred()) { goto done; } equal = !overflow && (othercid >= 0) && (cidobj->cid == othercid); } else if (PyNumber_Check(other)) { PyObject *pyid = PyLong_FromLongLong(cidobj->cid); if (pyid == NULL) { goto done; } res = PyObject_RichCompare(pyid, other, op); Py_DECREF(pyid); goto done; } else { res = Py_NewRef(Py_NotImplemented); goto done; } if ((op == Py_EQ && equal) || (op == Py_NE && !equal)) { res = Py_NewRef(Py_True); } else { res = Py_NewRef(Py_False); } done: Py_DECREF(mod); return res; } static PyTypeObject * _get_current_channelend_type(int end); static PyObject * _channelobj_from_cidobj(PyObject *cidobj, int end) { PyObject *cls = (PyObject *)_get_current_channelend_type(end); if (cls == NULL) { return NULL; } PyObject *chan = PyObject_CallFunctionObjArgs(cls, cidobj, NULL); Py_DECREF(cls); if (chan == NULL) { return NULL; } return chan; } struct _channelid_xid { int64_t cid; int end; int resolve; }; static PyObject * _channelid_from_xid(_PyXIData_t *data) { struct _channelid_xid *xid = (struct _channelid_xid *)_PyXIData_DATA(data); // It might not be imported yet, so we can't use _get_current_module(). PyObject *mod = PyImport_ImportModule(MODULE_NAME_STR); if (mod == NULL) { return NULL; } assert(mod != Py_None); module_state *state = get_module_state(mod); if (state == NULL) { return NULL; } // Note that we do not preserve the "resolve" flag. PyObject *cidobj = NULL; int err = newchannelid(state->ChannelIDType, xid->cid, xid->end, _global_channels(), 0, 0, (channelid **)&cidobj); if (err != 0) { assert(cidobj == NULL); (void)handle_channel_error(err, mod, xid->cid); goto done; } assert(cidobj != NULL); if (xid->end == 0) { goto done; } if (!xid->resolve) { goto done; } /* Try returning a high-level channel end but fall back to the ID. */ PyObject *chan = _channelobj_from_cidobj(cidobj, xid->end); if (chan == NULL) { PyErr_Clear(); goto done; } Py_DECREF(cidobj); cidobj = chan; done: Py_DECREF(mod); return cidobj; } static int _channelid_shared(PyThreadState *tstate, PyObject *obj, _PyXIData_t *data) { if (_PyXIData_InitWithSize( data, tstate->interp, sizeof(struct _channelid_xid), obj, _channelid_from_xid ) < 0) { return -1; } struct _channelid_xid *xid = (struct _channelid_xid *)_PyXIData_DATA(data); channelid *cidobj = channelid_CAST(obj); xid->cid = cidobj->cid; xid->end = cidobj->end; xid->resolve = cidobj->resolve; return 0; } static PyObject * channelid_end(PyObject *self, void *end) { int force = 1; channelid *cidobj = channelid_CAST(self); if (end != NULL) { PyObject *obj = NULL; int err = newchannelid(Py_TYPE(self), cidobj->cid, *(int *)end, cidobj->channels, force, cidobj->resolve, (channelid **)&obj); if (err != 0) { assert(obj == NULL); PyObject *mod = get_module_from_type(Py_TYPE(self)); if (mod == NULL) { return NULL; } (void)handle_channel_error(err, mod, cidobj->cid); Py_DECREF(mod); return NULL; } assert(obj != NULL); return obj; } if (cidobj->end == CHANNEL_SEND) { return PyUnicode_InternFromString("send"); } if (cidobj->end == CHANNEL_RECV) { return PyUnicode_InternFromString("recv"); } return PyUnicode_InternFromString("both"); } static int _channelid_end_send = CHANNEL_SEND; static int _channelid_end_recv = CHANNEL_RECV; static PyGetSetDef channelid_getsets[] = { {"end", channelid_end, NULL, PyDoc_STR("'send', 'recv', or 'both'")}, {"send", channelid_end, NULL, PyDoc_STR("the 'send' end of the channel"), &_channelid_end_send}, {"recv", channelid_end, NULL, PyDoc_STR("the 'recv' end of the channel"), &_channelid_end_recv}, {NULL} }; PyDoc_STRVAR(channelid_doc, "A channel ID identifies a channel and may be used as an int."); static PyType_Slot channelid_typeslots[] = { {Py_tp_dealloc, channelid_dealloc}, {Py_tp_doc, (void *)channelid_doc}, {Py_tp_repr, channelid_repr}, {Py_tp_str, channelid_str}, {Py_tp_hash, channelid_hash}, {Py_tp_richcompare, channelid_richcompare}, {Py_tp_getset, channelid_getsets}, // number slots {Py_nb_int, channelid_int}, {Py_nb_index, channelid_int}, {0, NULL}, }; static PyType_Spec channelid_typespec = { .name = MODULE_NAME_STR ".ChannelID", .basicsize = sizeof(channelid), .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_IMMUTABLETYPE), .slots = channelid_typeslots, }; static PyTypeObject * add_channelid_type(PyObject *mod) { PyTypeObject *cls = (PyTypeObject *)PyType_FromModuleAndSpec( mod, &channelid_typespec, NULL); if (cls == NULL) { return NULL; } if (PyModule_AddType(mod, cls) < 0) { Py_DECREF(cls); return NULL; } if (ensure_xid_class(cls, GETDATA(_channelid_shared)) < 0) { Py_DECREF(cls); return NULL; } return cls; } /* SendChannel and RecvChannel classes */ // XXX Use a new __xid__ protocol instead? static PyTypeObject * _get_current_channelend_type(int end) { module_state *state = _get_current_module_state(); if (state == NULL) { return NULL; } PyTypeObject *cls; if (end == CHANNEL_SEND) { cls = state->send_channel_type; } else { assert(end == CHANNEL_RECV); cls = state->recv_channel_type; } if (cls == NULL) { // Force the module to be loaded, to register the type. if (ensure_highlevel_module_loaded() < 0) { return NULL; } if (end == CHANNEL_SEND) { cls = state->send_channel_type; } else { cls = state->recv_channel_type; } assert(cls != NULL); } return cls; } static PyObject * _channelend_from_xid(_PyXIData_t *data) { channelid *cidobj = (channelid *)_channelid_from_xid(data); if (cidobj == NULL) { return NULL; } PyTypeObject *cls = _get_current_channelend_type(cidobj->end); if (cls == NULL) { Py_DECREF(cidobj); return NULL; } PyObject *obj = PyObject_CallOneArg((PyObject *)cls, (PyObject *)cidobj); Py_DECREF(cidobj); return obj; } static int _channelend_shared(PyThreadState *tstate, PyObject *obj, _PyXIData_t *data) { PyObject *cidobj = PyObject_GetAttrString(obj, "_id"); if (cidobj == NULL) { return -1; } int res = _channelid_shared(tstate, cidobj, data); Py_DECREF(cidobj); if (res < 0) { return -1; } _PyXIData_SET_NEW_OBJECT(data, _channelend_from_xid); return 0; } static int set_channelend_types(PyObject *mod, PyTypeObject *send, PyTypeObject *recv) { module_state *state = get_module_state(mod); if (state == NULL) { return -1; } // Clear the old values if the .py module was reloaded. if (state->send_channel_type != NULL) { (void)clear_xid_class(state->send_channel_type); Py_CLEAR(state->send_channel_type); } if (state->recv_channel_type != NULL) { (void)clear_xid_class(state->recv_channel_type); Py_CLEAR(state->recv_channel_type); } // Add and register the types. state->send_channel_type = (PyTypeObject *)Py_NewRef(send); state->recv_channel_type = (PyTypeObject *)Py_NewRef(recv); if (ensure_xid_class(send, GETDATA(_channelend_shared)) < 0) { Py_CLEAR(state->send_channel_type); Py_CLEAR(state->recv_channel_type); return -1; } if (ensure_xid_class(recv, GETDATA(_channelend_shared)) < 0) { (void)clear_xid_class(state->send_channel_type); Py_CLEAR(state->send_channel_type); Py_CLEAR(state->recv_channel_type); return -1; } return 0; } /* module level code ********************************************************/ /* globals is the process-global state for the module. It holds all the data that we need to share between interpreters, so it cannot hold PyObject values. */ static struct globals { PyMutex mutex; int module_count; _channels channels; } _globals = {0}; static int _globals_init(void) { PyMutex_Lock(&_globals.mutex); assert(_globals.module_count >= 0); _globals.module_count++; if (_globals.module_count == 1) { // Called for the first time. PyThread_type_lock mutex = PyThread_allocate_lock(); if (mutex == NULL) { _globals.module_count--; PyMutex_Unlock(&_globals.mutex); return ERR_CHANNELS_MUTEX_INIT; } _channels_init(&_globals.channels, mutex); } PyMutex_Unlock(&_globals.mutex); return 0; } static void _globals_fini(void) { PyMutex_Lock(&_globals.mutex); assert(_globals.module_count > 0); _globals.module_count--; if (_globals.module_count == 0) { PyThread_type_lock mutex; _channels_fini(&_globals.channels, &mutex); assert(mutex != NULL); PyThread_free_lock(mutex); } PyMutex_Unlock(&_globals.mutex); } static _channels * _global_channels(void) { return &_globals.channels; } static void clear_interpreter(void *data) { if (_globals.module_count == 0) { return; } PyInterpreterState *interp = (PyInterpreterState *)data; assert(interp == _get_current_interp()); int64_t interpid = PyInterpreterState_GetID(interp); _channels_clear_interpreter(&_globals.channels, interpid); } static PyObject * channelsmod_create(PyObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"unboundop", "fallback", NULL}; int unboundarg = -1; int fallbackarg = -1; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|ii:create", kwlist, &unboundarg, &fallbackarg)) { return NULL; } struct _channeldefaults defaults = {0}; if (resolve_unboundop(unboundarg, UNBOUND_REPLACE, &defaults.unboundop) < 0) { return NULL; } if (resolve_fallback(fallbackarg, _PyXIDATA_FULL_FALLBACK, &defaults.fallback) < 0) { return NULL; } int64_t cid = channel_create(&_globals.channels, defaults); if (cid < 0) { (void)handle_channel_error(-1, self, cid); return NULL; } module_state *state = get_module_state(self); if (state == NULL) { return NULL; } channelid *cidobj = NULL; int err = newchannelid(state->ChannelIDType, cid, 0, &_globals.channels, 0, 0, &cidobj); if (handle_channel_error(err, self, cid)) { assert(cidobj == NULL); err = channel_destroy(&_globals.channels, cid); if (handle_channel_error(err, self, cid)) { // XXX issue a warning? } return NULL; } assert(cidobj != NULL); assert(cidobj->channels != NULL); return (PyObject *)cidobj; } PyDoc_STRVAR(channelsmod_create_doc, "channel_create(unboundop) -> cid\n\ \n\ Create a new cross-interpreter channel and return a unique generated ID."); static PyObject * channelsmod_destroy(PyObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"cid", NULL}; int64_t cid; struct channel_id_converter_data cid_data = { .module = self, }; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&:channel_destroy", kwlist, channel_id_converter, &cid_data)) { return NULL; } cid = cid_data.cid; int err = channel_destroy(&_globals.channels, cid); if (handle_channel_error(err, self, cid)) { return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(channelsmod_destroy_doc, "channel_destroy(cid)\n\ \n\ Close and finalize the channel. Afterward attempts to use the channel\n\ will behave as though it never existed."); static PyObject * channelsmod_list_all(PyObject *self, PyObject *Py_UNUSED(ignored)) { int64_t count = 0; struct channel_id_and_info *cids = _channels_list_all(&_globals.channels, &count); if (cids == NULL) { if (count == 0) { return PyList_New(0); } return NULL; } PyObject *ids = PyList_New((Py_ssize_t)count); if (ids == NULL) { goto finally; } module_state *state = get_module_state(self); if (state == NULL) { Py_DECREF(ids); ids = NULL; goto finally; } struct channel_id_and_info *cur = cids; for (int64_t i=0; i < count; cur++, i++) { PyObject *cidobj = NULL; int err = newchannelid(state->ChannelIDType, cur->id, 0, &_globals.channels, 0, 0, (channelid **)&cidobj); if (handle_channel_error(err, self, cur->id)) { assert(cidobj == NULL); Py_SETREF(ids, NULL); break; } assert(cidobj != NULL); PyObject *item = Py_BuildValue("Oii", cidobj, cur->defaults.unboundop, cur->defaults.fallback); Py_DECREF(cidobj); if (item == NULL) { Py_SETREF(ids, NULL); break; } PyList_SET_ITEM(ids, (Py_ssize_t)i, item); } finally: PyMem_Free(cids); return ids; } PyDoc_STRVAR(channelsmod_list_all_doc, "channel_list_all() -> [cid]\n\ \n\ Return the list of all IDs for active channels."); static PyObject * channelsmod_list_interpreters(PyObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"cid", "send", NULL}; int64_t cid; /* Channel ID */ struct channel_id_converter_data cid_data = { .module = self, }; int send = 0; /* Send or receive end? */ int64_t interpid; PyObject *ids, *interpid_obj; PyInterpreterState *interp; if (!PyArg_ParseTupleAndKeywords( args, kwds, "O&$p:channel_list_interpreters", kwlist, channel_id_converter, &cid_data, &send)) { return NULL; } cid = cid_data.cid; ids = PyList_New(0); if (ids == NULL) { goto except; } interp = PyInterpreterState_Head(); while (interp != NULL) { interpid = PyInterpreterState_GetID(interp); assert(interpid >= 0); int res = channel_is_associated(&_globals.channels, cid, interpid, send); if (res < 0) { (void)handle_channel_error(res, self, cid); goto except; } if (res) { interpid_obj = _PyInterpreterState_GetIDObject(interp); if (interpid_obj == NULL) { goto except; } res = PyList_Insert(ids, 0, interpid_obj); Py_DECREF(interpid_obj); if (res < 0) { goto except; } } interp = PyInterpreterState_Next(interp); } goto finally; except: Py_CLEAR(ids); finally: return ids; } PyDoc_STRVAR(channelsmod_list_interpreters_doc, "channel_list_interpreters(cid, *, send) -> [id]\n\ \n\ Return the list of all interpreter IDs associated with an end of the channel.\n\ \n\ The 'send' argument should be a boolean indicating whether to use the send or\n\ receive end."); static PyObject * channelsmod_send(PyObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"cid", "obj", "unboundop", "fallback", "blocking", "timeout", NULL}; struct channel_id_converter_data cid_data = { .module = self, }; PyObject *obj; int unboundarg = -1; int fallbackarg = -1; int blocking = 1; PyObject *timeout_obj = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&O|ii$pO:channel_send", kwlist, channel_id_converter, &cid_data, &obj, &unboundarg, &fallbackarg, &blocking, &timeout_obj)) { return NULL; } int64_t cid = cid_data.cid; PY_TIMEOUT_T timeout; if (PyThread_ParseTimeoutArg(timeout_obj, blocking, &timeout) < 0) { return NULL; } struct _channeldefaults defaults = {-1, -1}; if (unboundarg < 0 || fallbackarg < 0) { int err = channel_get_defaults(&_globals.channels, cid, &defaults); if (handle_channel_error(err, self, cid)) { return NULL; } } unboundop_t unboundop; if (resolve_unboundop(unboundarg, defaults.unboundop, &unboundop) < 0) { return NULL; } xidata_fallback_t fallback; if (resolve_fallback(fallbackarg, defaults.fallback, &fallback) < 0) { return NULL; } /* Queue up the object. */ int err = 0; if (blocking) { err = channel_send_wait( &_globals.channels, cid, obj, unboundop, timeout, fallback); } else { err = channel_send( &_globals.channels, cid, obj, NULL, unboundop, fallback); } if (handle_channel_error(err, self, cid)) { return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(channelsmod_send_doc, "channel_send(cid, obj, *, blocking=True, timeout=None)\n\ \n\ Add the object's data to the channel's queue.\n\ By default this waits for the object to be received."); static PyObject * channelsmod_send_buffer(PyObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"cid", "obj", "unboundop", "fallback", "blocking", "timeout", NULL}; struct channel_id_converter_data cid_data = { .module = self, }; PyObject *obj; int unboundarg = -1; int fallbackarg = -1; int blocking = -1; PyObject *timeout_obj = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&O|ii$pO:channel_send_buffer", kwlist, channel_id_converter, &cid_data, &obj, &unboundarg, &fallbackarg, &blocking, &timeout_obj)) { return NULL; } int64_t cid = cid_data.cid; PY_TIMEOUT_T timeout; if (PyThread_ParseTimeoutArg(timeout_obj, blocking, &timeout) < 0) { return NULL; } struct _channeldefaults defaults = {-1, -1}; if (unboundarg < 0 || fallbackarg < 0) { int err = channel_get_defaults(&_globals.channels, cid, &defaults); if (handle_channel_error(err, self, cid)) { return NULL; } } unboundop_t unboundop; if (resolve_unboundop(unboundarg, defaults.unboundop, &unboundop) < 0) { return NULL; } xidata_fallback_t fallback; if (resolve_fallback(fallbackarg, defaults.fallback, &fallback) < 0) { return NULL; } PyObject *tempobj = PyMemoryView_FromObject(obj); if (tempobj == NULL) { return NULL; } /* Queue up the object. */ int err = 0; if (blocking) { err = channel_send_wait( &_globals.channels, cid, tempobj, unboundop, timeout, fallback); } else { err = channel_send( &_globals.channels, cid, tempobj, NULL, unboundop, fallback); } Py_DECREF(tempobj); if (handle_channel_error(err, self, cid)) { return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(channelsmod_send_buffer_doc, "channel_send_buffer(cid, obj, *, blocking=True, timeout=None)\n\ \n\ Add the object's buffer to the channel's queue.\n\ By default this waits for the object to be received."); static PyObject * channelsmod_recv(PyObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"cid", "default", NULL}; int64_t cid; struct channel_id_converter_data cid_data = { .module = self, }; PyObject *dflt = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O:channel_recv", kwlist, channel_id_converter, &cid_data, &dflt)) { return NULL; } cid = cid_data.cid; PyObject *obj = NULL; unboundop_t unboundop = 0; int err = channel_recv(&_globals.channels, cid, &obj, &unboundop); if (err == ERR_CHANNEL_EMPTY && dflt != NULL) { // Use the default. obj = Py_NewRef(dflt); err = 0; } else if (handle_channel_error(err, self, cid)) { return NULL; } else if (obj == NULL) { // The item was unbound. return Py_BuildValue("Oi", Py_None, unboundop); } PyObject *res = Py_BuildValue("OO", obj, Py_None); Py_DECREF(obj); return res; } PyDoc_STRVAR(channelsmod_recv_doc, "channel_recv(cid, [default]) -> (obj, unboundop)\n\ \n\ Return a new object from the data at the front of the channel's queue.\n\ \n\ If there is nothing to receive then raise ChannelEmptyError, unless\n\ a default value is provided. In that case return it."); static PyObject * channelsmod_close(PyObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"cid", "send", "recv", "force", NULL}; int64_t cid; struct channel_id_converter_data cid_data = { .module = self, }; int send = 0; int recv = 0; int force = 0; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|$ppp:channel_close", kwlist, channel_id_converter, &cid_data, &send, &recv, &force)) { return NULL; } cid = cid_data.cid; int err = channel_close(&_globals.channels, cid, send-recv, force); if (handle_channel_error(err, self, cid)) { return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(channelsmod_close_doc, "channel_close(cid, *, send=None, recv=None, force=False)\n\ \n\ Close the channel for all interpreters.\n\ \n\ If the channel is empty then the keyword args are ignored and both\n\ ends are immediately closed. Otherwise, if 'force' is True then\n\ all queued items are released and both ends are immediately\n\ closed.\n\ \n\ If the channel is not empty *and* 'force' is False then following\n\ happens:\n\ \n\ * recv is True (regardless of send):\n\ - raise ChannelNotEmptyError\n\ * recv is None and send is None:\n\ - raise ChannelNotEmptyError\n\ * send is True and recv is not True:\n\ - fully close the 'send' end\n\ - close the 'recv' end to interpreters not already receiving\n\ - fully close it once empty\n\ \n\ Closing an already closed channel results in a ChannelClosedError.\n\ \n\ Once the channel's ID has no more ref counts in any interpreter\n\ the channel will be destroyed."); static PyObject * channelsmod_release(PyObject *self, PyObject *args, PyObject *kwds) { // Note that only the current interpreter is affected. static char *kwlist[] = {"cid", "send", "recv", "force", NULL}; int64_t cid; struct channel_id_converter_data cid_data = { .module = self, }; int send = 0; int recv = 0; int force = 0; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|$ppp:channel_release", kwlist, channel_id_converter, &cid_data, &send, &recv, &force)) { return NULL; } cid = cid_data.cid; if (send == 0 && recv == 0) { send = 1; recv = 1; } // XXX Handle force is True. // XXX Fix implicit release. int err = channel_release(&_globals.channels, cid, send, recv); if (handle_channel_error(err, self, cid)) { return NULL; } Py_RETURN_NONE; } PyDoc_STRVAR(channelsmod_release_doc, "channel_release(cid, *, send=None, recv=None, force=True)\n\ \n\ Close the channel for the current interpreter. 'send' and 'recv'\n\ (bool) may be used to indicate the ends to close. By default both\n\ ends are closed. Closing an already closed end is a noop."); static PyObject * channelsmod_get_count(PyObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"cid", NULL}; struct channel_id_converter_data cid_data = { .module = self, }; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&:get_count", kwlist, channel_id_converter, &cid_data)) { return NULL; } int64_t cid = cid_data.cid; Py_ssize_t count = -1; int err = _channel_get_count(&_globals.channels, cid, &count); if (handle_channel_error(err, self, cid)) { return NULL; } assert(count >= 0); return PyLong_FromSsize_t(count); } PyDoc_STRVAR(channelsmod_get_count_doc, "get_count(cid)\n\ \n\ Return the number of items in the channel."); static PyObject * channelsmod_get_info(PyObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"cid", NULL}; struct channel_id_converter_data cid_data = { .module = self, }; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&:_get_info", kwlist, channel_id_converter, &cid_data)) { return NULL; } int64_t cid = cid_data.cid; struct channel_info info; int err = _channel_get_info(&_globals.channels, cid, &info); if (handle_channel_error(err, self, cid)) { return NULL; } return new_channel_info(self, &info); } PyDoc_STRVAR(channelsmod_get_info_doc, "get_info(cid)\n\ \n\ Return details about the channel."); static PyObject * channelsmod_get_channel_defaults(PyObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"cid", NULL}; struct channel_id_converter_data cid_data = { .module = self, }; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&:get_channel_defaults", kwlist, channel_id_converter, &cid_data)) { return NULL; } int64_t cid = cid_data.cid; struct _channeldefaults defaults = {0}; int err = channel_get_defaults(&_globals.channels, cid, &defaults); if (handle_channel_error(err, self, cid)) { return NULL; } PyObject *res = Py_BuildValue("ii", defaults.unboundop, defaults.fallback); return res; } PyDoc_STRVAR(channelsmod_get_channel_defaults_doc, "get_channel_defaults(cid)\n\ \n\ Return the channel's default values, set when it was created."); static PyObject * channelsmod__channel_id(PyObject *self, PyObject *args, PyObject *kwds) { module_state *state = get_module_state(self); if (state == NULL) { return NULL; } PyTypeObject *cls = state->ChannelIDType; PyObject *mod = get_module_from_owned_type(cls); assert(mod == self); Py_DECREF(mod); return _channelid_new(self, cls, args, kwds); } static PyObject * channelsmod__register_end_types(PyObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"send", "recv", NULL}; PyObject *send; PyObject *recv; if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO:_register_end_types", kwlist, &send, &recv)) { return NULL; } if (!PyType_Check(send)) { PyErr_SetString(PyExc_TypeError, "expected a type for 'send'"); return NULL; } if (!PyType_Check(recv)) { PyErr_SetString(PyExc_TypeError, "expected a type for 'recv'"); return NULL; } PyTypeObject *cls_send = (PyTypeObject *)send; PyTypeObject *cls_recv = (PyTypeObject *)recv; if (set_channelend_types(self, cls_send, cls_recv) < 0) { return NULL; } Py_RETURN_NONE; } static PyMethodDef module_functions[] = { {"create", _PyCFunction_CAST(channelsmod_create), METH_VARARGS | METH_KEYWORDS, channelsmod_create_doc}, {"destroy", _PyCFunction_CAST(channelsmod_destroy), METH_VARARGS | METH_KEYWORDS, channelsmod_destroy_doc}, {"list_all", channelsmod_list_all, METH_NOARGS, channelsmod_list_all_doc}, {"list_interpreters", _PyCFunction_CAST(channelsmod_list_interpreters), METH_VARARGS | METH_KEYWORDS, channelsmod_list_interpreters_doc}, {"send", _PyCFunction_CAST(channelsmod_send), METH_VARARGS | METH_KEYWORDS, channelsmod_send_doc}, {"send_buffer", _PyCFunction_CAST(channelsmod_send_buffer), METH_VARARGS | METH_KEYWORDS, channelsmod_send_buffer_doc}, {"recv", _PyCFunction_CAST(channelsmod_recv), METH_VARARGS | METH_KEYWORDS, channelsmod_recv_doc}, {"close", _PyCFunction_CAST(channelsmod_close), METH_VARARGS | METH_KEYWORDS, channelsmod_close_doc}, {"release", _PyCFunction_CAST(channelsmod_release), METH_VARARGS | METH_KEYWORDS, channelsmod_release_doc}, {"get_count", _PyCFunction_CAST(channelsmod_get_count), METH_VARARGS | METH_KEYWORDS, channelsmod_get_count_doc}, {"get_info", _PyCFunction_CAST(channelsmod_get_info), METH_VARARGS | METH_KEYWORDS, channelsmod_get_info_doc}, {"get_channel_defaults", _PyCFunction_CAST(channelsmod_get_channel_defaults), METH_VARARGS | METH_KEYWORDS, channelsmod_get_channel_defaults_doc}, {"_channel_id", _PyCFunction_CAST(channelsmod__channel_id), METH_VARARGS | METH_KEYWORDS, NULL}, {"_register_end_types", _PyCFunction_CAST(channelsmod__register_end_types), METH_VARARGS | METH_KEYWORDS, NULL}, {NULL, NULL} /* sentinel */ }; /* initialization function */ PyDoc_STRVAR(module_doc, "This module provides primitive operations to manage Python interpreters.\n\ The 'interpreters' module provides a more convenient interface."); static int module_exec(PyObject *mod) { int err = _globals_init(); if (handle_channel_error(err, mod, -1)) { return -1; } module_state *state = get_module_state(mod); if (state == NULL) { goto error; } /* Add exception types */ if (exceptions_init(mod) != 0) { goto error; } /* Add other types */ // ChannelInfo state->ChannelInfoType = PyStructSequence_NewType(&channel_info_desc); if (state->ChannelInfoType == NULL) { goto error; } if (PyModule_AddType(mod, state->ChannelInfoType) < 0) { goto error; } // ChannelID state->ChannelIDType = add_channelid_type(mod); if (state->ChannelIDType == NULL) { goto error; } /* Make sure chnnels drop objects owned by this interpreter. */ PyInterpreterState *interp = _get_current_interp(); PyUnstable_AtExit(interp, clear_interpreter, (void *)interp); return 0; error: if (state != NULL) { clear_xid_types(state); } _globals_fini(); return -1; } static struct PyModuleDef_Slot module_slots[] = { {Py_mod_exec, module_exec}, {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, {Py_mod_gil, Py_MOD_GIL_NOT_USED}, {0, NULL}, }; static int module_traverse(PyObject *mod, visitproc visit, void *arg) { module_state *state = get_module_state(mod); assert(state != NULL); return traverse_module_state(state, visit, arg); } static int module_clear(PyObject *mod) { module_state *state = get_module_state(mod); assert(state != NULL); // Now we clear the module state. return clear_module_state(state); } static void module_free(void *mod) { module_state *state = get_module_state((PyObject *)mod); assert(state != NULL); // Now we clear the module state. (void)clear_module_state(state); _globals_fini(); } static struct PyModuleDef moduledef = { .m_base = PyModuleDef_HEAD_INIT, .m_name = MODULE_NAME_STR, .m_doc = module_doc, .m_size = sizeof(module_state), .m_methods = module_functions, .m_slots = module_slots, .m_traverse = module_traverse, .m_clear = module_clear, .m_free = module_free, }; PyMODINIT_FUNC MODINIT_FUNC_NAME(void) { return PyModuleDef_Init(&moduledef); } /* interpreters module */ /* low-level access to interpreter primitives */ #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 #endif #include "Python.h" #include "pycore_code.h" // _PyCode_HAS_EXECUTORS() #include "pycore_crossinterp.h" // _PyXIData_t #include "pycore_pyerrors.h" // _PyErr_GetRaisedException() #include "pycore_interp.h" // _PyInterpreterState_IDIncref() #include "pycore_modsupport.h" // _PyArg_BadArgument() #include "pycore_namespace.h" // _PyNamespace_New() #include "pycore_pybuffer.h" // _PyBuffer_ReleaseInInterpreterAndRawFree() #include "pycore_pylifecycle.h" // _PyInterpreterConfig_AsDict() #include "pycore_pystate.h" // _PyInterpreterState_IsRunningMain() #include "marshal.h" // PyMarshal_ReadObjectFromString() #include "_interpreters_common.h" #include "clinic/_interpretersmodule.c.h" #define MODULE_NAME _interpreters #define MODULE_NAME_STR Py_STRINGIFY(MODULE_NAME) #define MODINIT_FUNC_NAME RESOLVE_MODINIT_FUNC_NAME(MODULE_NAME) /*[clinic input] module _interpreters [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=bfd967980a0de892]*/ static PyInterpreterState * _get_current_interp(void) { // PyInterpreterState_Get() aborts if lookup fails, so don't need // to check the result for NULL. return PyInterpreterState_Get(); } #define look_up_interp _PyInterpreterState_LookUpIDObject static PyObject * _get_current_module(void) { PyObject *name = PyUnicode_FromString(MODULE_NAME_STR); if (name == NULL) { return NULL; } PyObject *mod = PyImport_GetModule(name); Py_DECREF(name); if (mod == NULL) { return NULL; } assert(mod != Py_None); return mod; } static int is_running_main(PyInterpreterState *interp) { if (_PyInterpreterState_IsRunningMain(interp)) { return 1; } // Unlike with the general C-API, we can be confident that someone // using this module for the main interpreter is doing so through // the main program. Thus we can make this extra check. This benefits // applications that embed Python but haven't been updated yet // to call _PyInterpreterState_SetRunningMain(). if (_Py_IsMainInterpreter(interp)) { return 1; } return 0; } static inline int is_notshareable_raised(PyThreadState *tstate) { PyObject *exctype = _PyXIData_GetNotShareableErrorType(tstate); return _PyErr_ExceptionMatches(tstate, exctype); } static void unwrap_not_shareable(PyThreadState *tstate, _PyXI_failure *failure) { if (_PyXI_UnwrapNotShareableError(tstate, failure) < 0) { _PyErr_Clear(tstate); } } /* Cross-interpreter Buffer Views *******************************************/ /* When a memoryview object is "shared" between interpreters, * its underlying "buffer" memory is actually shared, rather than just * copied. This facilitates efficient use of that data where otherwise * interpreters are strictly isolated. However, this also means that * the underlying data is subject to the complexities of thread-safety, * which the user must manage carefully. * * When the memoryview is "shared", it is essentially copied in the same * way as PyMemory_FromObject() does, but in another interpreter. * The Py_buffer value is copied like normal, including the "buf" pointer, * with one key exception. * * When a Py_buffer is released and it holds a reference to an object, * that object gets a chance to call its bf_releasebuffer() (if any) * before the object is decref'ed. The same is true with the memoryview * tp_dealloc, which essentially calls PyBuffer_Release(). * * The problem for a Py_buffer shared between two interpreters is that * the naive approach breaks interpreter isolation. Operations on an * object must only happen while that object's interpreter is active. * If the copied mv->view.obj pointed to the original memoryview then * the PyBuffer_Release() would happen under the wrong interpreter. * * To work around this, we set mv->view.obj on the copied memoryview * to a wrapper object with the only job of releasing the original * buffer in a cross-interpreter-safe way. */ // XXX Note that there is still an issue to sort out, where the original // interpreter is destroyed but code in another interpreter is still // using dependent buffers. Using such buffers segfaults. This will // require a careful fix. In the meantime, users will have to be // diligent about avoiding the problematic situation. typedef struct { PyObject base; Py_buffer *view; int64_t interpid; } xibufferview; static PyObject * xibufferview_from_buffer(PyTypeObject *cls, Py_buffer *view, int64_t interpid) { assert(interpid >= 0); Py_buffer *copied = PyMem_RawMalloc(sizeof(Py_buffer)); if (copied == NULL) { return NULL; } /* This steals the view->obj reference */ *copied = *view; xibufferview *self = PyObject_Malloc(sizeof(xibufferview)); if (self == NULL) { PyMem_RawFree(copied); return NULL; } PyObject_Init(&self->base, cls); *self = (xibufferview){ .base = self->base, .view = copied, .interpid = interpid, }; return (PyObject *)self; } static void xibufferview_dealloc(PyObject *op) { xibufferview *self = (xibufferview *)op; if (self->view != NULL) { PyInterpreterState *interp = _PyInterpreterState_LookUpID(self->interpid); if (interp == NULL) { /* The interpreter is no longer alive. */ PyErr_Clear(); PyMem_RawFree(self->view); } else { if (_PyBuffer_ReleaseInInterpreterAndRawFree(interp, self->view) < 0) { // XXX Emit a warning? PyErr_Clear(); } } } PyTypeObject *tp = Py_TYPE(self); tp->tp_free(self); /* "Instances of heap-allocated types hold a reference to their type." * See: https://docs.python.org/3.11/howto/isolating-extensions.html#garbage-collection-protocol * See: https://docs.python.org/3.11/c-api/typeobj.html#c.PyTypeObject.tp_traverse */ // XXX Why don't we implement Py_TPFLAGS_HAVE_GC, e.g. Py_tp_traverse, // like we do for _abc._abc_data? Py_DECREF(tp); } static int xibufferview_getbuf(PyObject *op, Py_buffer *view, int flags) { /* Only PyMemoryView_FromObject() should ever call this, via _memoryview_from_xid() below. */ xibufferview *self = (xibufferview *)op; *view = *self->view; /* This is the workaround mentioned earlier. */ view->obj = op; // XXX Should we leave it alone? view->internal = NULL; return 0; } static PyType_Slot XIBufferViewType_slots[] = { {Py_tp_dealloc, xibufferview_dealloc}, {Py_bf_getbuffer, xibufferview_getbuf}, // We don't bother with Py_bf_releasebuffer since we don't need it. {0, NULL}, }; static PyType_Spec XIBufferViewType_spec = { .name = MODULE_NAME_STR ".CrossInterpreterBufferView", .basicsize = sizeof(xibufferview), .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_IMMUTABLETYPE), .slots = XIBufferViewType_slots, }; static PyTypeObject * _get_current_xibufferview_type(void); struct xibuffer { Py_buffer view; int used; }; static PyObject * _memoryview_from_xid(_PyXIData_t *data) { assert(_PyXIData_DATA(data) != NULL); assert(_PyXIData_OBJ(data) == NULL); assert(_PyXIData_INTERPID(data) >= 0); struct xibuffer *view = (struct xibuffer *)_PyXIData_DATA(data); assert(!view->used); PyTypeObject *cls = _get_current_xibufferview_type(); if (cls == NULL) { return NULL; } PyObject *obj = xibufferview_from_buffer( cls, &view->view, _PyXIData_INTERPID(data)); if (obj == NULL) { return NULL; } PyObject *res = PyMemoryView_FromObject(obj); if (res == NULL) { Py_DECREF(obj); return NULL; } view->used = 1; return res; } static void _pybuffer_shared_free(void* data) { struct xibuffer *view = (struct xibuffer *)data; if (!view->used) { PyBuffer_Release(&view->view); } PyMem_RawFree(data); } static int _pybuffer_shared(PyThreadState *tstate, PyObject *obj, _PyXIData_t *data) { struct xibuffer *view = PyMem_RawMalloc(sizeof(struct xibuffer)); if (view == NULL) { return -1; } view->used = 0; /* This will increment the memoryview's export count, which won't get * decremented until the view sent to other interpreters is released. */ if (PyObject_GetBuffer(obj, &view->view, PyBUF_FULL_RO) < 0) { PyMem_RawFree(view); return -1; } /* The view holds a reference to the object, so we don't worry * about also tracking it on the cross-interpreter data. */ _PyXIData_Init(data, tstate->interp, view, NULL, _memoryview_from_xid); data->free = _pybuffer_shared_free; return 0; } static int register_memoryview_xid(PyObject *mod, PyTypeObject **p_state) { // XIBufferView assert(*p_state == NULL); PyTypeObject *cls = (PyTypeObject *)PyType_FromModuleAndSpec( mod, &XIBufferViewType_spec, NULL); if (cls == NULL) { return -1; } if (PyModule_AddType(mod, cls) < 0) { Py_DECREF(cls); return -1; } *p_state = cls; // Register XID for the builtin memoryview type. if (ensure_xid_class(&PyMemoryView_Type, GETDATA(_pybuffer_shared)) < 0) { return -1; } // We don't ever bother un-registering memoryview. return 0; } /* module state *************************************************************/ typedef struct { int _notused; /* heap types */ PyTypeObject *XIBufferViewType; } module_state; static inline module_state * get_module_state(PyObject *mod) { assert(mod != NULL); module_state *state = PyModule_GetState(mod); assert(state != NULL); return state; } static module_state * _get_current_module_state(void) { PyObject *mod = _get_current_module(); if (mod == NULL) { mod = PyImport_ImportModule(MODULE_NAME_STR); if (mod == NULL) { return NULL; } } module_state *state = get_module_state(mod); Py_DECREF(mod); return state; } static int traverse_module_state(module_state *state, visitproc visit, void *arg) { /* heap types */ Py_VISIT(state->XIBufferViewType); return 0; } static int clear_module_state(module_state *state) { /* heap types */ Py_CLEAR(state->XIBufferViewType); return 0; } static PyTypeObject * _get_current_xibufferview_type(void) { module_state *state = _get_current_module_state(); if (state == NULL) { return NULL; } return state->XIBufferViewType; } /* interpreter-specific code ************************************************/ static int init_named_config(PyInterpreterConfig *config, const char *name) { if (name == NULL || strcmp(name, "") == 0 || strcmp(name, "default") == 0) { name = "isolated"; } if (strcmp(name, "isolated") == 0) { *config = (PyInterpreterConfig)_PyInterpreterConfig_INIT; } else if (strcmp(name, "legacy") == 0) { *config = (PyInterpreterConfig)_PyInterpreterConfig_LEGACY_INIT; } else if (strcmp(name, "empty") == 0) { *config = (PyInterpreterConfig){0}; } else { PyErr_Format(PyExc_ValueError, "unsupported config name '%s'", name); return -1; } return 0; } static int config_from_object(PyObject *configobj, PyInterpreterConfig *config) { if (configobj == NULL || configobj == Py_None) { if (init_named_config(config, NULL) < 0) { return -1; } } else if (PyUnicode_Check(configobj)) { const char *utf8name = PyUnicode_AsUTF8(configobj); if (utf8name == NULL) { return -1; } if (init_named_config(config, utf8name) < 0) { return -1; } } else { PyObject *dict = PyObject_GetAttrString(configobj, "__dict__"); if (dict == NULL) { PyErr_Format(PyExc_TypeError, "bad config %R", configobj); return -1; } int res = _PyInterpreterConfig_InitFromDict(config, dict); Py_DECREF(dict); if (res < 0) { return -1; } } return 0; } struct interp_call { _PyXIData_t *func; _PyXIData_t *args; _PyXIData_t *kwargs; struct { _PyXIData_t func; _PyXIData_t args; _PyXIData_t kwargs; } _preallocated; }; static void _interp_call_clear(struct interp_call *call) { if (call->func != NULL) { _PyXIData_Clear(NULL, call->func); } if (call->args != NULL) { _PyXIData_Clear(NULL, call->args); } if (call->kwargs != NULL) { _PyXIData_Clear(NULL, call->kwargs); } *call = (struct interp_call){0}; } static int _interp_call_pack(PyThreadState *tstate, struct interp_call *call, PyObject *func, PyObject *args, PyObject *kwargs) { xidata_fallback_t fallback = _PyXIDATA_FULL_FALLBACK; assert(call->func == NULL); assert(call->args == NULL); assert(call->kwargs == NULL); // Handle the func. if (!PyCallable_Check(func)) { _PyErr_Format(tstate, PyExc_TypeError, "expected a callable, got %R", func); return -1; } if (_PyFunction_GetXIData(tstate, func, &call->_preallocated.func) < 0) { PyObject *exc = _PyErr_GetRaisedException(tstate); if (_PyPickle_GetXIData(tstate, func, &call->_preallocated.func) < 0) { _PyErr_SetRaisedException(tstate, exc); return -1; } Py_DECREF(exc); } call->func = &call->_preallocated.func; // Handle the args. if (args == NULL || args == Py_None) { // Leave it empty. } else { assert(PyTuple_Check(args)); if (PyTuple_GET_SIZE(args) > 0) { if (_PyObject_GetXIData( tstate, args, fallback, &call->_preallocated.args) < 0) { _interp_call_clear(call); return -1; } call->args = &call->_preallocated.args; } } // Handle the kwargs. if (kwargs == NULL || kwargs == Py_None) { // Leave it empty. } else { assert(PyDict_Check(kwargs)); if (PyDict_GET_SIZE(kwargs) > 0) { if (_PyObject_GetXIData( tstate, kwargs, fallback, &call->_preallocated.kwargs) < 0) { _interp_call_clear(call); return -1; } call->kwargs = &call->_preallocated.kwargs; } } return 0; } static void wrap_notshareable(PyThreadState *tstate, const char *label) { if (!is_notshareable_raised(tstate)) { return; } assert(label != NULL && strlen(label) > 0); PyObject *cause = _PyErr_GetRaisedException(tstate); _PyXIData_FormatNotShareableError(tstate, "%s not shareable", label); PyObject *exc = _PyErr_GetRaisedException(tstate); PyException_SetCause(exc, cause); _PyErr_SetRaisedException(tstate, exc); } static int _interp_call_unpack(struct interp_call *call, PyObject **p_func, PyObject **p_args, PyObject **p_kwargs) { PyThreadState *tstate = PyThreadState_Get(); // Unpack the func. PyObject *func = _PyXIData_NewObject(call->func); if (func == NULL) { wrap_notshareable(tstate, "func"); return -1; } // Unpack the args. PyObject *args; if (call->args == NULL) { args = PyTuple_New(0); if (args == NULL) { Py_DECREF(func); return -1; } } else { args = _PyXIData_NewObject(call->args); if (args == NULL) { wrap_notshareable(tstate, "args"); Py_DECREF(func); return -1; } assert(PyTuple_Check(args)); } // Unpack the kwargs. PyObject *kwargs = NULL; if (call->kwargs != NULL) { kwargs = _PyXIData_NewObject(call->kwargs); if (kwargs == NULL) { wrap_notshareable(tstate, "kwargs"); Py_DECREF(func); Py_DECREF(args); return -1; } assert(PyDict_Check(kwargs)); } *p_func = func; *p_args = args; *p_kwargs = kwargs; return 0; } static int _make_call(struct interp_call *call, PyObject **p_result, _PyXI_failure *failure) { assert(call != NULL && call->func != NULL); PyThreadState *tstate = _PyThreadState_GET(); // Get the func and args. PyObject *func = NULL, *args = NULL, *kwargs = NULL; if (_interp_call_unpack(call, &func, &args, &kwargs) < 0) { assert(func == NULL); assert(args == NULL); assert(kwargs == NULL); _PyXI_InitFailure(failure, _PyXI_ERR_OTHER, NULL); unwrap_not_shareable(tstate, failure); return -1; } assert(!_PyErr_Occurred(tstate)); // Make the call. PyObject *resobj = PyObject_Call(func, args, kwargs); Py_DECREF(func); Py_XDECREF(args); Py_XDECREF(kwargs); if (resobj == NULL) { return -1; } *p_result = resobj; return 0; } static int _run_script(_PyXIData_t *script, PyObject *ns, _PyXI_failure *failure) { PyObject *code = _PyXIData_NewObject(script); if (code == NULL) { _PyXI_InitFailure(failure, _PyXI_ERR_NOT_SHAREABLE, NULL); return -1; } PyObject *result = PyEval_EvalCode(code, ns, ns); Py_DECREF(code); if (result == NULL) { _PyXI_InitFailure(failure, _PyXI_ERR_UNCAUGHT_EXCEPTION, NULL); return -1; } assert(result == Py_None); Py_DECREF(result); // We throw away the result. return 0; } struct run_result { PyObject *result; PyObject *excinfo; }; static void _run_result_clear(struct run_result *runres) { Py_CLEAR(runres->result); Py_CLEAR(runres->excinfo); } static int _run_in_interpreter(PyThreadState *tstate, PyInterpreterState *interp, _PyXIData_t *script, struct interp_call *call, PyObject *shareables, struct run_result *runres) { assert(!_PyErr_Occurred(tstate)); int res = -1; _PyXI_failure *failure = _PyXI_NewFailure(); if (failure == NULL) { return -1; } _PyXI_session *session = _PyXI_NewSession(); if (session == NULL) { _PyXI_FreeFailure(failure); return -1; } _PyXI_session_result result = {0}; // Prep and switch interpreters. if (_PyXI_Enter(session, interp, shareables, &result) < 0) { // If an error occurred at this step, it means that interp // was not prepared and switched. _PyXI_FreeSession(session); _PyXI_FreeFailure(failure); assert(result.excinfo == NULL); return -1; } // Run in the interpreter. if (script != NULL) { assert(call == NULL); PyObject *mainns = _PyXI_GetMainNamespace(session, failure); if (mainns == NULL) { goto finally; } res = _run_script(script, mainns, failure); } else { assert(call != NULL); PyObject *resobj; res = _make_call(call, &resobj, failure); if (res == 0) { res = _PyXI_Preserve(session, "resobj", resobj, failure); Py_DECREF(resobj); if (res < 0) { goto finally; } } } finally: // Clean up and switch back. (void)res; int exitres = _PyXI_Exit(session, failure, &result); assert(res == 0 || exitres != 0); _PyXI_FreeSession(session); _PyXI_FreeFailure(failure); res = exitres; if (_PyErr_Occurred(tstate)) { // It's a directly propagated exception. assert(res < 0); } else if (res < 0) { assert(result.excinfo != NULL); runres->excinfo = Py_NewRef(result.excinfo); res = -1; } else { assert(result.excinfo == NULL); runres->result = _PyXI_GetPreserved(&result, "resobj"); if (_PyErr_Occurred(tstate)) { res = -1; } } _PyXI_ClearResult(&result); return res; } /* module level code ********************************************************/ static long get_whence(PyInterpreterState *interp) { return _PyInterpreterState_GetWhence(interp); } static PyInterpreterState * resolve_interp(PyObject *idobj, int restricted, int reqready, const char *op) { PyInterpreterState *interp; if (idobj == NULL) { interp = PyInterpreterState_Get(); } else { interp = look_up_interp(idobj); if (interp == NULL) { return NULL; } } if (reqready && !_PyInterpreterState_IsReady(interp)) { if (idobj == NULL) { PyErr_Format(PyExc_InterpreterError, "cannot %s current interpreter (not ready)", op); } else { PyErr_Format(PyExc_InterpreterError, "cannot %s interpreter %R (not ready)", op, idobj); } return NULL; } if (restricted && get_whence(interp) != _PyInterpreterState_WHENCE_STDLIB) { if (idobj == NULL) { PyErr_Format(PyExc_InterpreterError, "cannot %s unrecognized current interpreter", op); } else { PyErr_Format(PyExc_InterpreterError, "cannot %s unrecognized interpreter %R", op, idobj); } return NULL; } return interp; } static PyObject * get_summary(PyInterpreterState *interp) { PyObject *idobj = _PyInterpreterState_GetIDObject(interp); if (idobj == NULL) { return NULL; } PyObject *whenceobj = PyLong_FromLong( get_whence(interp)); if (whenceobj == NULL) { Py_DECREF(idobj); return NULL; } PyObject *res = PyTuple_Pack(2, idobj, whenceobj); Py_DECREF(idobj); Py_DECREF(whenceobj); return res; } // Not converted to Argument Clinic because the function uses ``**kwargs``. static PyObject * interp_new_config(PyObject *self, PyObject *args, PyObject *kwds) { const char *name = NULL; if (!PyArg_ParseTuple(args, "|s:" MODULE_NAME_STR ".new_config", &name)) { return NULL; } PyObject *overrides = kwds; PyInterpreterConfig config; if (init_named_config(&config, name) < 0) { return NULL; } if (overrides != NULL && PyDict_GET_SIZE(overrides) > 0) { if (_PyInterpreterConfig_UpdateFromDict(&config, overrides) < 0) { return NULL; } } PyObject *dict = _PyInterpreterConfig_AsDict(&config); if (dict == NULL) { return NULL; } PyObject *configobj = _PyNamespace_New(dict); Py_DECREF(dict); return configobj; } PyDoc_STRVAR(new_config_doc, "new_config($module, name='isolated', /, **overrides)\n\ --\n\ \n\ Return a representation of a new PyInterpreterConfig.\n\ \n\ The name determines the initial values of the config. Supported named\n\ configs are: default, isolated, legacy, and empty.\n\ \n\ Any keyword arguments are set on the corresponding config fields,\n\ overriding the initial values."); /*[clinic input] _interpreters.create config as configobj: object(py_default="'isolated'") = NULL * reqrefs: bool = False Create a new interpreter and return a unique generated ID. The caller is responsible for destroying the interpreter before exiting, typically by using _interpreters.destroy(). This can be managed automatically by passing "reqrefs=True" and then using _incref() and _decref() appropriately. "config" must be a valid interpreter config or the name of a predefined config ('isolated' or 'legacy'). The default is 'isolated'. [clinic start generated code]*/ static PyObject * _interpreters_create_impl(PyObject *module, PyObject *configobj, int reqrefs) /*[clinic end generated code: output=c1cc6835b1277c16 input=235ce396a23624d5]*/ { PyInterpreterConfig config; if (config_from_object(configobj, &config) < 0) { return NULL; } long whence = _PyInterpreterState_WHENCE_STDLIB; PyInterpreterState *interp = \ _PyXI_NewInterpreter(&config, &whence, NULL, NULL); if (interp == NULL) { // XXX Move the chained exception to interpreters.create()? PyObject *exc = PyErr_GetRaisedException(); assert(exc != NULL); PyErr_SetString(PyExc_InterpreterError, "interpreter creation failed"); _PyErr_ChainExceptions1(exc); return NULL; } assert(_PyInterpreterState_IsReady(interp)); PyObject *idobj = _PyInterpreterState_GetIDObject(interp); if (idobj == NULL) { _PyXI_EndInterpreter(interp, NULL, NULL); return NULL; } if (reqrefs) { // Decref to 0 will destroy the interpreter. _PyInterpreterState_RequireIDRef(interp, 1); } return idobj; } /*[clinic input] _interpreters.destroy id: object * restrict as restricted: bool = False Destroy the identified interpreter. Attempting to destroy the current interpreter raises InterpreterError. So does an unrecognized ID. [clinic start generated code]*/ static PyObject * _interpreters_destroy_impl(PyObject *module, PyObject *id, int restricted) /*[clinic end generated code: output=0bc20da8700ab4dd input=561bdd6537639d40]*/ { // Look up the interpreter. int reqready = 0; PyInterpreterState *interp = \ resolve_interp(id, restricted, reqready, "destroy"); if (interp == NULL) { return NULL; } // Ensure we don't try to destroy the current interpreter. PyInterpreterState *current = _get_current_interp(); if (current == NULL) { return NULL; } if (interp == current) { PyErr_SetString(PyExc_InterpreterError, "cannot destroy the current interpreter"); return NULL; } // Ensure the interpreter isn't running. /* XXX We *could* support destroying a running interpreter but aren't going to worry about it for now. */ if (is_running_main(interp)) { PyErr_Format(PyExc_InterpreterError, "interpreter running"); return NULL; } // Destroy the interpreter. _PyXI_EndInterpreter(interp, NULL, NULL); Py_RETURN_NONE; } /*[clinic input] _interpreters.list_all * require_ready as reqready: bool = False Return a list containing the ID of every existing interpreter. [clinic start generated code]*/ static PyObject * _interpreters_list_all_impl(PyObject *module, int reqready) /*[clinic end generated code: output=3f21c1a7c78043c0 input=35bae91c381a2cf9]*/ { PyObject *ids = PyList_New(0); if (ids == NULL) { return NULL; } PyInterpreterState *interp = PyInterpreterState_Head(); while (interp != NULL) { if (!reqready || _PyInterpreterState_IsReady(interp)) { PyObject *item = get_summary(interp); if (item == NULL) { Py_DECREF(ids); return NULL; } // insert at front of list int res = PyList_Insert(ids, 0, item); Py_DECREF(item); if (res < 0) { Py_DECREF(ids); return NULL; } } interp = PyInterpreterState_Next(interp); } return ids; } /*[clinic input] _interpreters.get_current Return (ID, whence) of the current interpreter. [clinic start generated code]*/ static PyObject * _interpreters_get_current_impl(PyObject *module) /*[clinic end generated code: output=03161c8fcc0136eb input=37fb2c067c14d543]*/ { PyInterpreterState *interp =_get_current_interp(); if (interp == NULL) { return NULL; } assert(_PyInterpreterState_IsReady(interp)); return get_summary(interp); } /*[clinic input] _interpreters.get_main Return (ID, whence) of the main interpreter. [clinic start generated code]*/ static PyObject * _interpreters_get_main_impl(PyObject *module) /*[clinic end generated code: output=9647288aff735557 input=b4ace23ca562146f]*/ { PyInterpreterState *interp = _PyInterpreterState_Main(); assert(_PyInterpreterState_IsReady(interp)); return get_summary(interp); } /*[clinic input] _interpreters.set___main___attrs id: object updates: object(subclass_of='&PyDict_Type') * restrict as restricted: bool = False Bind the given attributes in the interpreter's __main__ module. [clinic start generated code]*/ static PyObject * _interpreters_set___main___attrs_impl(PyObject *module, PyObject *id, PyObject *updates, int restricted) /*[clinic end generated code: output=f3803010cb452bf0 input=d16ab8d81371f86a]*/ { // Look up the interpreter. int reqready = 1; PyInterpreterState *interp = \ resolve_interp(id, restricted, reqready, "update __main__ for"); if (interp == NULL) { return NULL; } // Check the updates. Py_ssize_t size = PyDict_Size(updates); if (size < 0) { return NULL; } if (size == 0) { PyErr_SetString(PyExc_ValueError, "arg 2 must be a non-empty dict"); return NULL; } _PyXI_session *session = _PyXI_NewSession(); if (session == NULL) { return NULL; } // Prep and switch interpreters, including apply the updates. if (_PyXI_Enter(session, interp, updates, NULL) < 0) { _PyXI_FreeSession(session); return NULL; } // Clean up and switch back. assert(!PyErr_Occurred()); int res = _PyXI_Exit(session, NULL, NULL); _PyXI_FreeSession(session); assert(res == 0); if (res < 0) { // unreachable if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "unresolved error"); } return NULL; } Py_RETURN_NONE; } static PyObject * _handle_script_error(struct run_result *runres) { assert(runres->result == NULL); if (runres->excinfo == NULL) { assert(PyErr_Occurred()); return NULL; } assert(!PyErr_Occurred()); return runres->excinfo; } /*[clinic input] _interpreters.exec id: object code: object shared: object(subclass_of='&PyDict_Type', c_default='NULL') = {} * restrict as restricted: bool = False Execute the provided code in the identified interpreter. This is equivalent to running the builtin exec() under the target interpreter, using the __dict__ of its __main__ module as both globals and locals. "code" may be a string containing the text of a Python script. Functions (and code objects) are also supported, with some restrictions. The code/function must not take any arguments or be a closure (i.e. have cell vars). Methods and other callables are not supported. If a function is provided, its code object is used and all its state is ignored, including its __globals__ dict. [clinic start generated code]*/ static PyObject * _interpreters_exec_impl(PyObject *module, PyObject *id, PyObject *code, PyObject *shared, int restricted) /*[clinic end generated code: output=492057c4f10dc304 input=5a22c1ed0c5dbcf3]*/ { PyThreadState *tstate = _PyThreadState_GET(); int reqready = 1; PyInterpreterState *interp = \ resolve_interp(id, restricted, reqready, "exec code for"); if (interp == NULL) { return NULL; } // We don't need the script to be "pure", which means it can use // global variables. They will be resolved against __main__. _PyXIData_t xidata = {0}; if (_PyCode_GetScriptXIData(tstate, code, &xidata) < 0) { unwrap_not_shareable(tstate, NULL); return NULL; } struct run_result runres = {0}; int res = _run_in_interpreter( tstate, interp, &xidata, NULL, shared, &runres); _PyXIData_Release(&xidata); if (res < 0) { return _handle_script_error(&runres); } assert(runres.result == NULL); Py_RETURN_NONE; } /*[clinic input] _interpreters.run_string id: object script: unicode shared: object(subclass_of='&PyDict_Type', c_default='NULL') = {} * restrict as restricted: bool = False Execute the provided string in the identified interpreter. (See _interpreters.exec().) [clinic start generated code]*/ static PyObject * _interpreters_run_string_impl(PyObject *module, PyObject *id, PyObject *script, PyObject *shared, int restricted) /*[clinic end generated code: output=a30a64fb9ad396a2 input=51ce549b9a8dbe21]*/ { #define FUNCNAME MODULE_NAME_STR ".run_string" PyThreadState *tstate = _PyThreadState_GET(); int reqready = 1; PyInterpreterState *interp = \ resolve_interp(id, restricted, reqready, "run a string in"); if (interp == NULL) { return NULL; } if (PyFunction_Check(script) || PyCode_Check(script)) { _PyArg_BadArgument(FUNCNAME, "argument 2", "a string", script); return NULL; } _PyXIData_t xidata = {0}; if (_PyCode_GetScriptXIData(tstate, script, &xidata) < 0) { unwrap_not_shareable(tstate, NULL); return NULL; } struct run_result runres = {0}; int res = _run_in_interpreter( tstate, interp, &xidata, NULL, shared, &runres); _PyXIData_Release(&xidata); if (res < 0) { return _handle_script_error(&runres); } assert(runres.result == NULL); Py_RETURN_NONE; #undef FUNCNAME } /*[clinic input] _interpreters.run_func id: object func: object shared: object(subclass_of='&PyDict_Type', c_default='NULL') = {} * restrict as restricted: bool = False Execute the body of the provided function in the identified interpreter. Code objects are also supported. In both cases, closures and args are not supported. Methods and other callables are not supported either. (See _interpreters.exec().) [clinic start generated code]*/ static PyObject * _interpreters_run_func_impl(PyObject *module, PyObject *id, PyObject *func, PyObject *shared, int restricted) /*[clinic end generated code: output=131f7202ca4a0c5e input=2d62bb9b9eaf4948]*/ { #define FUNCNAME MODULE_NAME_STR ".run_func" PyThreadState *tstate = _PyThreadState_GET(); int reqready = 1; PyInterpreterState *interp = \ resolve_interp(id, restricted, reqready, "run a function in"); if (interp == NULL) { return NULL; } // We don't worry about checking globals. They will be resolved // against __main__. PyObject *code; if (PyFunction_Check(func)) { code = PyFunction_GET_CODE(func); } else if (PyCode_Check(func)) { code = func; } else { _PyArg_BadArgument(FUNCNAME, "argument 2", "a function", func); return NULL; } _PyXIData_t xidata = {0}; if (_PyCode_GetScriptXIData(tstate, code, &xidata) < 0) { unwrap_not_shareable(tstate, NULL); return NULL; } struct run_result runres = {0}; int res = _run_in_interpreter( tstate, interp, &xidata, NULL, shared, &runres); _PyXIData_Release(&xidata); if (res < 0) { return _handle_script_error(&runres); } assert(runres.result == NULL); Py_RETURN_NONE; #undef FUNCNAME } /*[clinic input] _interpreters.call id: object callable: object args: object(subclass_of='&PyTuple_Type', c_default='NULL') = () kwargs: object(subclass_of='&PyDict_Type', c_default='NULL') = {} * preserve_exc: bool = False restrict as restricted: bool = False Call the provided object in the identified interpreter. Pass the given args and kwargs, if possible. [clinic start generated code]*/ static PyObject * _interpreters_call_impl(PyObject *module, PyObject *id, PyObject *callable, PyObject *args, PyObject *kwargs, int preserve_exc, int restricted) /*[clinic end generated code: output=b7a4a27d72df3ebc input=b026d0b212a575e6]*/ { PyThreadState *tstate = _PyThreadState_GET(); int reqready = 1; PyInterpreterState *interp = \ resolve_interp(id, restricted, reqready, "make a call in"); if (interp == NULL) { return NULL; } struct interp_call call = {0}; if (_interp_call_pack(tstate, &call, callable, args, kwargs) < 0) { return NULL; } PyObject *res_and_exc = NULL; struct run_result runres = {0}; if (_run_in_interpreter(tstate, interp, NULL, &call, NULL, &runres) < 0) { if (runres.excinfo == NULL) { assert(_PyErr_Occurred(tstate)); goto finally; } assert(!_PyErr_Occurred(tstate)); } assert(runres.result == NULL || runres.excinfo == NULL); res_and_exc = Py_BuildValue("OO", (runres.result ? runres.result : Py_None), (runres.excinfo ? runres.excinfo : Py_None)); finally: _interp_call_clear(&call); _run_result_clear(&runres); return res_and_exc; } /*[clinic input] @permit_long_summary _interpreters.is_shareable obj: object Return True if the object's data may be shared between interpreters and False otherwise. [clinic start generated code]*/ static PyObject * _interpreters_is_shareable_impl(PyObject *module, PyObject *obj) /*[clinic end generated code: output=227856926a22940b input=95f888d35a6d4bb3]*/ { PyThreadState *tstate = _PyThreadState_GET(); if (_PyObject_CheckXIData(tstate, obj) == 0) { Py_RETURN_TRUE; } PyErr_Clear(); Py_RETURN_FALSE; } /*[clinic input] _interpreters.is_running id: object * restrict as restricted: bool = False Return whether or not the identified interpreter is running. [clinic start generated code]*/ static PyObject * _interpreters_is_running_impl(PyObject *module, PyObject *id, int restricted) /*[clinic end generated code: output=32a6225d5ded9bdb input=3291578d04231125]*/ { int reqready = 1; PyInterpreterState *interp = \ resolve_interp(id, restricted, reqready, "check if running for"); if (interp == NULL) { return NULL; } if (is_running_main(interp)) { Py_RETURN_TRUE; } Py_RETURN_FALSE; } /*[clinic input] _interpreters.get_config id: object * restrict as restricted: bool = False Return a representation of the config used to initialize the interpreter. [clinic start generated code]*/ static PyObject * _interpreters_get_config_impl(PyObject *module, PyObject *id, int restricted) /*[clinic end generated code: output=56773353b9b7224a input=59519a01c22d96d1]*/ { if (id == Py_None) { id = NULL; } int reqready = 0; PyInterpreterState *interp = \ resolve_interp(id, restricted, reqready, "get the config of"); if (interp == NULL) { return NULL; } PyInterpreterConfig config; if (_PyInterpreterConfig_InitFromState(&config, interp) < 0) { return NULL; } PyObject *dict = _PyInterpreterConfig_AsDict(&config); if (dict == NULL) { return NULL; } PyObject *configobj = _PyNamespace_New(dict); Py_DECREF(dict); return configobj; } /*[clinic input] _interpreters.whence id: object Return an identifier for where the interpreter was created. [clinic start generated code]*/ static PyObject * _interpreters_whence_impl(PyObject *module, PyObject *id) /*[clinic end generated code: output=ef2c21ab106c2c20 input=eeede0a2fbfa2968]*/ { PyInterpreterState *interp = look_up_interp(id); if (interp == NULL) { return NULL; } long whence = get_whence(interp); return PyLong_FromLong(whence); } /*[clinic input] _interpreters.incref id: object * implieslink: bool = False restrict as restricted: bool = False [clinic start generated code]*/ static PyObject * _interpreters_incref_impl(PyObject *module, PyObject *id, int implieslink, int restricted) /*[clinic end generated code: output=eccaa4e03fbe8ee2 input=a0a614748f2e348c]*/ { int reqready = 1; PyInterpreterState *interp = \ resolve_interp(id, restricted, reqready, "incref"); if (interp == NULL) { return NULL; } if (implieslink) { // Decref to 0 will destroy the interpreter. _PyInterpreterState_RequireIDRef(interp, 1); } _PyInterpreterState_IDIncref(interp); Py_RETURN_NONE; } /*[clinic input] _interpreters.decref id: object * restrict as restricted: bool = False [clinic start generated code]*/ static PyObject * _interpreters_decref_impl(PyObject *module, PyObject *id, int restricted) /*[clinic end generated code: output=5c54db4b22086171 input=c4aa34f09c44e62a]*/ { int reqready = 1; PyInterpreterState *interp = \ resolve_interp(id, restricted, reqready, "decref"); if (interp == NULL) { return NULL; } _PyInterpreterState_IDDecref(interp); Py_RETURN_NONE; } /*[clinic input] @permit_long_docstring_body _interpreters.capture_exception exc as exc_arg: object = None Return a snapshot of an exception. If "exc" is None then the current exception, if any, is used (but not cleared). The returned snapshot is the same as what _interpreters.exec() returns. [clinic start generated code]*/ static PyObject * _interpreters_capture_exception_impl(PyObject *module, PyObject *exc_arg) /*[clinic end generated code: output=ef3f5393ef9c88a6 input=6c4dcb78fb722217]*/ { PyObject *exc = exc_arg; if (exc == NULL || exc == Py_None) { exc = PyErr_GetRaisedException(); if (exc == NULL) { Py_RETURN_NONE; } } else if (!PyExceptionInstance_Check(exc)) { PyErr_Format(PyExc_TypeError, "expected exception, got %R", exc); return NULL; } PyObject *captured = NULL; _PyXI_excinfo *info = _PyXI_NewExcInfo(exc); if (info == NULL) { goto finally; } captured = _PyXI_ExcInfoAsObject(info); if (captured == NULL) { goto finally; } PyObject *formatted = _PyXI_FormatExcInfo(info); if (formatted == NULL) { Py_CLEAR(captured); goto finally; } int res = PyObject_SetAttrString(captured, "formatted", formatted); Py_DECREF(formatted); if (res < 0) { Py_CLEAR(captured); goto finally; } finally: _PyXI_FreeExcInfo(info); if (exc != exc_arg) { if (PyErr_Occurred()) { PyErr_SetRaisedException(exc); } else { _PyErr_ChainExceptions1(exc); } } return captured; } static PyMethodDef module_functions[] = { {"new_config", _PyCFunction_CAST(interp_new_config), METH_VARARGS | METH_KEYWORDS, new_config_doc}, _INTERPRETERS_CREATE_METHODDEF _INTERPRETERS_DESTROY_METHODDEF _INTERPRETERS_LIST_ALL_METHODDEF _INTERPRETERS_GET_CURRENT_METHODDEF _INTERPRETERS_GET_MAIN_METHODDEF _INTERPRETERS_IS_RUNNING_METHODDEF _INTERPRETERS_GET_CONFIG_METHODDEF _INTERPRETERS_WHENCE_METHODDEF _INTERPRETERS_EXEC_METHODDEF _INTERPRETERS_CALL_METHODDEF _INTERPRETERS_RUN_STRING_METHODDEF _INTERPRETERS_RUN_FUNC_METHODDEF _INTERPRETERS_SET___MAIN___ATTRS_METHODDEF _INTERPRETERS_INCREF_METHODDEF _INTERPRETERS_DECREF_METHODDEF _INTERPRETERS_IS_SHAREABLE_METHODDEF _INTERPRETERS_CAPTURE_EXCEPTION_METHODDEF {NULL, NULL} /* sentinel */ }; /* initialization function */ PyDoc_STRVAR(module_doc, "This module provides primitive operations to manage Python interpreters.\n\ The 'interpreters' module provides a more convenient interface."); static int module_exec(PyObject *mod) { PyThreadState *tstate = _PyThreadState_GET(); module_state *state = get_module_state(mod); #define ADD_WHENCE(NAME) \ if (PyModule_AddIntConstant(mod, "WHENCE_" #NAME, \ _PyInterpreterState_WHENCE_##NAME) < 0) \ { \ goto error; \ } ADD_WHENCE(UNKNOWN) ADD_WHENCE(RUNTIME) ADD_WHENCE(LEGACY_CAPI) ADD_WHENCE(CAPI) ADD_WHENCE(XI) ADD_WHENCE(STDLIB) #undef ADD_WHENCE // exceptions if (PyModule_AddType(mod, (PyTypeObject *)PyExc_InterpreterError) < 0) { goto error; } if (PyModule_AddType(mod, (PyTypeObject *)PyExc_InterpreterNotFoundError) < 0) { goto error; } PyObject *exctype = _PyXIData_GetNotShareableErrorType(tstate); if (PyModule_AddType(mod, (PyTypeObject *)exctype) < 0) { goto error; } if (register_memoryview_xid(mod, &state->XIBufferViewType) < 0) { goto error; } return 0; error: return -1; } static struct PyModuleDef_Slot module_slots[] = { {Py_mod_exec, module_exec}, {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, {Py_mod_gil, Py_MOD_GIL_NOT_USED}, {0, NULL}, }; static int module_traverse(PyObject *mod, visitproc visit, void *arg) { module_state *state = get_module_state(mod); assert(state != NULL); return traverse_module_state(state, visit, arg); } static int module_clear(PyObject *mod) { module_state *state = get_module_state(mod); assert(state != NULL); return clear_module_state(state); } static void module_free(void *mod) { module_state *state = get_module_state((PyObject *)mod); assert(state != NULL); (void)clear_module_state(state); } static struct PyModuleDef moduledef = { .m_base = PyModuleDef_HEAD_INIT, .m_name = MODULE_NAME_STR, .m_doc = module_doc, .m_size = sizeof(module_state), .m_methods = module_functions, .m_slots = module_slots, .m_traverse = module_traverse, .m_clear = module_clear, .m_free = module_free, }; PyMODINIT_FUNC MODINIT_FUNC_NAME(void) { return PyModuleDef_Init(&moduledef); } /* Return the initial module search path. */ #include "Python.h" #include "pycore_fileutils.h" // _Py_abspath() #include "pycore_initconfig.h" // _PyStatus_EXCEPTION() #include "pycore_pathconfig.h" // _PyPathConfig_ReadGlobal() #include "pycore_pymem.h" // _PyMem_RawWcsdup() #include "pycore_pystate.h" // _PyThreadState_GET() #include "marshal.h" // PyMarshal_ReadObjectFromString #include "osdefs.h" // DELIM #include #ifdef MS_WINDOWS # include // GetFullPathNameW(), MAX_PATH # include #endif #ifdef __APPLE__ # include #endif #ifdef HAVE_DLFCN_H # include #endif /* Reference the precompiled getpath.py */ #include "Python/frozen_modules/getpath.h" #if (!defined(PREFIX) || !defined(EXEC_PREFIX) \ || !defined(VERSION) || !defined(VPATH) \ || !defined(PLATLIBDIR)) #error "PREFIX, EXEC_PREFIX, VERSION, VPATH and PLATLIBDIR macros must be defined" #endif #if !defined(PYTHONPATH) #define PYTHONPATH NULL #endif #if !defined(PYDEBUGEXT) #define PYDEBUGEXT NULL #endif #if !defined(PYWINVER) #ifdef MS_DLL_ID #define PYWINVER MS_DLL_ID #else #define PYWINVER NULL #endif #endif #if !defined(EXE_SUFFIX) #if defined(MS_WINDOWS) || defined(__CYGWIN__) || defined(__MINGW32__) #define EXE_SUFFIX L".exe" #else #define EXE_SUFFIX NULL #endif #endif /* HELPER FUNCTIONS for getpath.py */ static PyObject * getpath_abspath(PyObject *Py_UNUSED(self), PyObject *args) { PyObject *r = NULL; PyObject *pathobj; wchar_t *path; if (!PyArg_ParseTuple(args, "U", &pathobj)) { return NULL; } Py_ssize_t len; path = PyUnicode_AsWideCharString(pathobj, &len); if (path) { wchar_t *abs; if (_Py_abspath((const wchar_t *)_Py_normpath(path, -1), &abs) == 0 && abs) { r = PyUnicode_FromWideChar(abs, -1); PyMem_RawFree((void *)abs); } else { PyErr_SetString(PyExc_OSError, "failed to make path absolute"); } PyMem_Free((void *)path); } return r; } static PyObject * getpath_basename(PyObject *Py_UNUSED(self), PyObject *args) { PyObject *path; if (!PyArg_ParseTuple(args, "U", &path)) { return NULL; } Py_ssize_t end = PyUnicode_GET_LENGTH(path); Py_ssize_t pos = PyUnicode_FindChar(path, SEP, 0, end, -1); if (pos < 0) { return Py_NewRef(path); } return PyUnicode_Substring(path, pos + 1, end); } static PyObject * getpath_dirname(PyObject *Py_UNUSED(self), PyObject *args) { PyObject *path; if (!PyArg_ParseTuple(args, "U", &path)) { return NULL; } Py_ssize_t end = PyUnicode_GET_LENGTH(path); Py_ssize_t pos = PyUnicode_FindChar(path, SEP, 0, end, -1); if (pos < 0) { return Py_GetConstant(Py_CONSTANT_EMPTY_STR); } return PyUnicode_Substring(path, 0, pos); } static PyObject * getpath_isabs(PyObject *Py_UNUSED(self), PyObject *args) { PyObject *r = NULL; PyObject *pathobj; const wchar_t *path; if (!PyArg_ParseTuple(args, "U", &pathobj)) { return NULL; } path = PyUnicode_AsWideCharString(pathobj, NULL); if (path) { r = _Py_isabs(path) ? Py_True : Py_False; PyMem_Free((void *)path); } return Py_XNewRef(r); } static PyObject * getpath_hassuffix(PyObject *Py_UNUSED(self), PyObject *args) { PyObject *r = NULL; PyObject *pathobj; PyObject *suffixobj; const wchar_t *path; const wchar_t *suffix; if (!PyArg_ParseTuple(args, "UU", &pathobj, &suffixobj)) { return NULL; } Py_ssize_t len, suffixLen; path = PyUnicode_AsWideCharString(pathobj, &len); if (path) { suffix = PyUnicode_AsWideCharString(suffixobj, &suffixLen); if (suffix) { if (suffixLen > len || #ifdef MS_WINDOWS wcsicmp(&path[len - suffixLen], suffix) != 0 #else wcscmp(&path[len - suffixLen], suffix) != 0 #endif ) { r = Py_NewRef(Py_False); } else { r = Py_NewRef(Py_True); } PyMem_Free((void *)suffix); } PyMem_Free((void *)path); } return r; } static PyObject * getpath_isdir(PyObject *Py_UNUSED(self), PyObject *args) { PyObject *r = NULL; PyObject *pathobj; const wchar_t *path; if (!PyArg_ParseTuple(args, "U", &pathobj)) { return NULL; } path = PyUnicode_AsWideCharString(pathobj, NULL); if (path) { #ifdef MS_WINDOWS DWORD attr = GetFileAttributesW(path); r = (attr != INVALID_FILE_ATTRIBUTES) && (attr & FILE_ATTRIBUTE_DIRECTORY) ? Py_True : Py_False; #else struct stat st; r = (_Py_wstat(path, &st) == 0) && S_ISDIR(st.st_mode) ? Py_True : Py_False; #endif PyMem_Free((void *)path); } return Py_XNewRef(r); } static PyObject * getpath_isfile(PyObject *Py_UNUSED(self), PyObject *args) { PyObject *r = NULL; PyObject *pathobj; const wchar_t *path; if (!PyArg_ParseTuple(args, "U", &pathobj)) { return NULL; } path = PyUnicode_AsWideCharString(pathobj, NULL); if (path) { #ifdef MS_WINDOWS DWORD attr = GetFileAttributesW(path); r = (attr != INVALID_FILE_ATTRIBUTES) && !(attr & FILE_ATTRIBUTE_DIRECTORY) ? Py_True : Py_False; #else struct stat st; r = (_Py_wstat(path, &st) == 0) && S_ISREG(st.st_mode) ? Py_True : Py_False; #endif PyMem_Free((void *)path); } return Py_XNewRef(r); } static PyObject * getpath_isxfile(PyObject *Py_UNUSED(self), PyObject *args) { PyObject *r = NULL; PyObject *pathobj; const wchar_t *path; Py_ssize_t cchPath; if (!PyArg_ParseTuple(args, "U", &pathobj)) { return NULL; } path = PyUnicode_AsWideCharString(pathobj, &cchPath); if (path) { #ifdef MS_WINDOWS DWORD attr = GetFileAttributesW(path); r = (attr != INVALID_FILE_ATTRIBUTES) && !(attr & FILE_ATTRIBUTE_DIRECTORY) && (cchPath >= 4) && (CompareStringOrdinal(path + cchPath - 4, -1, L".exe", -1, 1 /* ignore case */) == CSTR_EQUAL) ? Py_True : Py_False; #else struct stat st; r = (_Py_wstat(path, &st) == 0) && S_ISREG(st.st_mode) && (st.st_mode & 0111) ? Py_True : Py_False; #endif PyMem_Free((void *)path); } return Py_XNewRef(r); } static PyObject * getpath_joinpath(PyObject *Py_UNUSED(self), PyObject *args) { if (!PyTuple_Check(args)) { PyErr_SetString(PyExc_TypeError, "requires tuple of arguments"); return NULL; } Py_ssize_t n = PyTuple_GET_SIZE(args); if (n == 0) { return Py_GetConstant(Py_CONSTANT_EMPTY_STR); } /* Convert all parts to wchar and accumulate max final length */ wchar_t **parts = (wchar_t **)PyMem_Malloc(n * sizeof(wchar_t *)); if (parts == NULL) { PyErr_NoMemory(); return NULL; } memset(parts, 0, n * sizeof(wchar_t *)); Py_ssize_t cchFinal = 0; Py_ssize_t first = 0; for (Py_ssize_t i = 0; i < n; ++i) { PyObject *s = PyTuple_GET_ITEM(args, i); Py_ssize_t cch; if (s == Py_None) { cch = 0; } else if (PyUnicode_Check(s)) { parts[i] = PyUnicode_AsWideCharString(s, &cch); if (!parts[i]) { cchFinal = -1; break; } if (_Py_isabs(parts[i])) { first = i; } } else { PyErr_SetString(PyExc_TypeError, "all arguments to joinpath() must be str or None"); cchFinal = -1; break; } cchFinal += cch + 1; } wchar_t *final = cchFinal > 0 ? (wchar_t *)PyMem_Malloc(cchFinal * sizeof(wchar_t)) : NULL; if (!final) { for (Py_ssize_t i = 0; i < n; ++i) { PyMem_Free(parts[i]); } PyMem_Free(parts); if (cchFinal) { PyErr_NoMemory(); return NULL; } return Py_GetConstant(Py_CONSTANT_EMPTY_STR); } final[0] = '\0'; /* Now join all the paths. The final result should be shorter than the buffer */ for (Py_ssize_t i = 0; i < n; ++i) { if (!parts[i]) { continue; } if (i >= first && final) { if (!final[0]) { /* final is definitely long enough to fit any individual part */ wcscpy(final, parts[i]); } else if (_Py_add_relfile(final, parts[i], cchFinal) < 0) { /* if we fail, keep iterating to free memory, but stop adding parts */ PyMem_Free(final); final = NULL; } } PyMem_Free(parts[i]); } PyMem_Free(parts); if (!final) { PyErr_SetString(PyExc_SystemError, "failed to join paths"); return NULL; } PyObject *r = PyUnicode_FromWideChar(_Py_normpath(final, -1), -1); PyMem_Free(final); return r; } static PyObject * getpath_readlines(PyObject *Py_UNUSED(self), PyObject *args) { PyObject *r = NULL; PyObject *pathobj; const wchar_t *path; if (!PyArg_ParseTuple(args, "U", &pathobj)) { return NULL; } path = PyUnicode_AsWideCharString(pathobj, NULL); if (!path) { return NULL; } FILE *fp = _Py_wfopen(path, L"rb"); if (!fp) { PyErr_SetFromErrno(PyExc_OSError); PyMem_Free((void *)path); return NULL; } PyMem_Free((void *)path); r = PyList_New(0); if (!r) { fclose(fp); return NULL; } const size_t MAX_FILE = 32 * 1024; char *buffer = (char *)PyMem_Malloc(MAX_FILE); if (!buffer) { Py_DECREF(r); fclose(fp); return NULL; } size_t cb = fread(buffer, 1, MAX_FILE, fp); fclose(fp); if (!cb) { return r; } if (cb >= MAX_FILE) { Py_DECREF(r); PyErr_SetString(PyExc_MemoryError, "cannot read file larger than 32KB during initialization"); return NULL; } buffer[cb] = '\0'; size_t len; wchar_t *wbuffer = _Py_DecodeUTF8_surrogateescape(buffer, cb, &len); PyMem_Free((void *)buffer); if (!wbuffer) { Py_DECREF(r); PyErr_NoMemory(); return NULL; } wchar_t *p1 = wbuffer; wchar_t *p2 = p1; while ((p2 = wcschr(p1, L'\n')) != NULL) { Py_ssize_t cb = p2 - p1; while (cb >= 0 && (p1[cb] == L'\n' || p1[cb] == L'\r')) { --cb; } PyObject *u = PyUnicode_FromWideChar(p1, cb >= 0 ? cb + 1 : 0); if (!u || PyList_Append(r, u) < 0) { Py_XDECREF(u); Py_CLEAR(r); break; } Py_DECREF(u); p1 = p2 + 1; } if (r && p1 && *p1) { PyObject *u = PyUnicode_FromWideChar(p1, -1); if (!u || PyList_Append(r, u) < 0) { Py_CLEAR(r); } Py_XDECREF(u); } PyMem_RawFree(wbuffer); return r; } static PyObject * getpath_realpath(PyObject *Py_UNUSED(self) , PyObject *args) { PyObject *pathobj; if (!PyArg_ParseTuple(args, "U", &pathobj)) { return NULL; } #if defined(HAVE_READLINK) /* This readlink calculation only resolves a symlinked file, and does not resolve any path segments. This is consistent with prior releases, however, the realpath implementation below is potentially correct in more cases. */ PyObject *r = NULL; int nlink = 0; wchar_t *path = PyUnicode_AsWideCharString(pathobj, NULL); if (!path) { goto done; } wchar_t *path2 = _PyMem_RawWcsdup(path); PyMem_Free((void *)path); path = path2; while (path) { wchar_t resolved[MAXPATHLEN + 1]; int linklen = _Py_wreadlink(path, resolved, Py_ARRAY_LENGTH(resolved)); if (linklen == -1) { r = PyUnicode_FromWideChar(path, -1); break; } if (_Py_isabs(resolved)) { PyMem_RawFree((void *)path); path = _PyMem_RawWcsdup(resolved); } else { wchar_t *s = wcsrchr(path, SEP); if (s) { *s = L'\0'; } path2 = _Py_join_relfile(path, resolved); if (path2) { path2 = _Py_normpath(path2, -1); } PyMem_RawFree((void *)path); path = path2; } nlink++; /* 40 is the Linux kernel 4.2 limit */ if (nlink >= 40) { PyErr_SetString(PyExc_OSError, "maximum number of symbolic links reached"); break; } } if (!path) { PyErr_NoMemory(); } done: PyMem_RawFree((void *)path); return r; #elif defined(HAVE_REALPATH) PyObject *r = NULL; struct stat st; const char *narrow = NULL; wchar_t *path = PyUnicode_AsWideCharString(pathobj, NULL); if (!path) { goto done; } narrow = Py_EncodeLocale(path, NULL); if (!narrow) { PyErr_NoMemory(); goto done; } if (lstat(narrow, &st)) { PyErr_SetFromErrno(PyExc_OSError); goto done; } if (!S_ISLNK(st.st_mode)) { r = Py_NewRef(pathobj); goto done; } wchar_t resolved[MAXPATHLEN+1]; if (_Py_wrealpath(path, resolved, MAXPATHLEN) == NULL) { PyErr_SetFromErrno(PyExc_OSError); } else { r = PyUnicode_FromWideChar(resolved, -1); } done: PyMem_Free((void *)path); PyMem_Free((void *)narrow); return r; #elif defined(MS_WINDOWS) HANDLE hFile; wchar_t resolved[MAXPATHLEN+1]; int len = 0, err; Py_ssize_t pathlen; PyObject *result; wchar_t *path = PyUnicode_AsWideCharString(pathobj, &pathlen); if (!path) { return NULL; } if (wcslen(path) != pathlen) { PyErr_SetString(PyExc_ValueError, "path contains embedded nulls"); return NULL; } Py_BEGIN_ALLOW_THREADS hFile = CreateFileW(path, 0, 0, NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL); if (hFile != INVALID_HANDLE_VALUE) { len = GetFinalPathNameByHandleW(hFile, resolved, MAXPATHLEN, VOLUME_NAME_DOS); err = len ? 0 : GetLastError(); CloseHandle(hFile); } else { err = GetLastError(); } Py_END_ALLOW_THREADS if (err) { PyErr_SetFromWindowsErr(err); result = NULL; } else if (len <= MAXPATHLEN) { const wchar_t *p = resolved; if (0 == wcsncmp(p, L"\\\\?\\", 4)) { if (GetFileAttributesW(&p[4]) != INVALID_FILE_ATTRIBUTES) { p += 4; len -= 4; } } if (CompareStringOrdinal(path, (int)pathlen, p, len, TRUE) == CSTR_EQUAL) { result = Py_NewRef(pathobj); } else { result = PyUnicode_FromWideChar(p, len); } } else { result = Py_NewRef(pathobj); } PyMem_Free(path); return result; #endif return Py_NewRef(pathobj); } static PyMethodDef getpath_methods[] = { {"abspath", getpath_abspath, METH_VARARGS, NULL}, {"basename", getpath_basename, METH_VARARGS, NULL}, {"dirname", getpath_dirname, METH_VARARGS, NULL}, {"hassuffix", getpath_hassuffix, METH_VARARGS, NULL}, {"isabs", getpath_isabs, METH_VARARGS, NULL}, {"isdir", getpath_isdir, METH_VARARGS, NULL}, {"isfile", getpath_isfile, METH_VARARGS, NULL}, {"isxfile", getpath_isxfile, METH_VARARGS, NULL}, {"joinpath", getpath_joinpath, METH_VARARGS, NULL}, {"readlines", getpath_readlines, METH_VARARGS, NULL}, {"realpath", getpath_realpath, METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} }; /* Two implementations of warn() to use depending on whether warnings are enabled or not. */ static PyObject * getpath_warn(PyObject *Py_UNUSED(self), PyObject *args) { PyObject *msgobj; if (!PyArg_ParseTuple(args, "U", &msgobj)) { return NULL; } fprintf(stderr, "%s\n", PyUnicode_AsUTF8(msgobj)); Py_RETURN_NONE; } static PyObject * getpath_nowarn(PyObject *Py_UNUSED(self), PyObject *args) { Py_RETURN_NONE; } static PyMethodDef getpath_warn_method = {"warn", getpath_warn, METH_VARARGS, NULL}; static PyMethodDef getpath_nowarn_method = {"warn", getpath_nowarn, METH_VARARGS, NULL}; /* Add the helper functions to the dict */ static int funcs_to_dict(PyObject *dict, int warnings) { for (PyMethodDef *m = getpath_methods; m->ml_name; ++m) { PyObject *f = PyCFunction_NewEx(m, NULL, NULL); if (!f) { return 0; } if (PyDict_SetItemString(dict, m->ml_name, f) < 0) { Py_DECREF(f); return 0; } Py_DECREF(f); } PyMethodDef *m2 = warnings ? &getpath_warn_method : &getpath_nowarn_method; PyObject *f = PyCFunction_NewEx(m2, NULL, NULL); if (!f) { return 0; } if (PyDict_SetItemString(dict, m2->ml_name, f) < 0) { Py_DECREF(f); return 0; } Py_DECREF(f); return 1; } /* Add a wide-character string constant to the dict */ static int wchar_to_dict(PyObject *dict, const char *key, const wchar_t *s) { PyObject *u; int r; if (s && s[0]) { u = PyUnicode_FromWideChar(s, -1); if (!u) { return 0; } } else { u = Py_NewRef(Py_None); } r = PyDict_SetItemString(dict, key, u) == 0; Py_DECREF(u); return r; } /* Add a narrow string constant to the dict, using default locale decoding */ static int decode_to_dict(PyObject *dict, const char *key, const char *s) { PyObject *u = NULL; int r; if (s && s[0]) { size_t len; const wchar_t *w = Py_DecodeLocale(s, &len); if (w) { u = PyUnicode_FromWideChar(w, len); PyMem_RawFree((void *)w); } if (!u) { return 0; } } else { u = Py_NewRef(Py_None); } r = PyDict_SetItemString(dict, key, u) == 0; Py_DECREF(u); return r; } /* Add an environment variable to the dict, optionally clearing it afterwards */ static int env_to_dict(PyObject *dict, const char *key, int and_clear) { PyObject *u = NULL; int r = 0; assert(strncmp(key, "ENV_", 4) == 0); assert(strlen(key) < 64); #ifdef MS_WINDOWS wchar_t wkey[64]; // Quick convert to wchar_t, since we know key is ASCII wchar_t *wp = wkey; for (const char *p = &key[4]; *p; ++p) { assert(!(*p & 0x80)); *wp++ = *p; } *wp = L'\0'; const wchar_t *v = _wgetenv(wkey); if (v) { u = PyUnicode_FromWideChar(v, -1); if (!u) { PyErr_Clear(); } } #else const char *v = getenv(&key[4]); if (v) { size_t len; const wchar_t *w = Py_DecodeLocale(v, &len); if (w) { u = PyUnicode_FromWideChar(w, len); if (!u) { PyErr_Clear(); } PyMem_RawFree((void *)w); } } #endif if (u) { r = PyDict_SetItemString(dict, key, u) == 0; Py_DECREF(u); } else { r = PyDict_SetItemString(dict, key, Py_None) == 0; } if (r && and_clear) { #ifdef MS_WINDOWS _wputenv_s(wkey, L""); #else unsetenv(&key[4]); #endif } return r; } /* Add an integer constant to the dict */ static int int_to_dict(PyObject *dict, const char *key, int v) { PyObject *o; int r; o = PyLong_FromLong(v); if (!o) { return 0; } r = PyDict_SetItemString(dict, key, o) == 0; Py_DECREF(o); return r; } #ifdef MS_WINDOWS static int winmodule_to_dict(PyObject *dict, const char *key, HMODULE mod) { wchar_t *buffer = NULL; for (DWORD cch = 256; buffer == NULL && cch < (1024 * 1024); cch *= 2) { buffer = (wchar_t*)PyMem_RawMalloc(cch * sizeof(wchar_t)); if (buffer) { if (GetModuleFileNameW(mod, buffer, cch) == cch) { PyMem_RawFree(buffer); buffer = NULL; } } } int r = wchar_to_dict(dict, key, buffer); PyMem_RawFree(buffer); return r; } #endif /* Add the current executable's path to the dict */ static int progname_to_dict(PyObject *dict, const char *key) { #ifdef MS_WINDOWS return winmodule_to_dict(dict, key, NULL); #elif defined(__APPLE__) char *path; uint32_t pathLen = 256; while (pathLen) { path = PyMem_RawMalloc((pathLen + 1) * sizeof(char)); if (!path) { return 0; } if (_NSGetExecutablePath(path, &pathLen) != 0) { PyMem_RawFree(path); continue; } // Only keep if the path is absolute if (path[0] == SEP) { int r = decode_to_dict(dict, key, path); PyMem_RawFree(path); return r; } // Fall back and store None PyMem_RawFree(path); break; } #endif return PyDict_SetItemString(dict, key, Py_None) == 0; } /* Add the runtime library's path to the dict */ static int library_to_dict(PyObject *dict, const char *key) { /* macOS framework builds do not link against a libpython dynamic library, but instead link against a macOS Framework. */ #if defined(Py_ENABLE_SHARED) || defined(WITH_NEXT_FRAMEWORK) #ifdef MS_WINDOWS extern HMODULE PyWin_DLLhModule; if (PyWin_DLLhModule) { return winmodule_to_dict(dict, key, PyWin_DLLhModule); } #endif #if HAVE_DLADDR Dl_info libpython_info; if (dladdr(&Py_Initialize, &libpython_info) && libpython_info.dli_fname) { return decode_to_dict(dict, key, libpython_info.dli_fname); } #endif #endif return PyDict_SetItemString(dict, key, Py_None) == 0; } PyObject * _Py_Get_Getpath_CodeObject(void) { return PyMarshal_ReadObjectFromString( (const char*)_Py_M__getpath, sizeof(_Py_M__getpath)); } /* Perform the actual path calculation. When compute_path_config is 0, this only reads any initialised path config values into the PyConfig struct. For example, Py_SetHome() or Py_SetPath(). The only error should be due to failed memory allocation. When compute_path_config is 1, full path calculation is performed. The GIL must be held, and there may be filesystem access, side effects, and potential unraisable errors that are reported directly to stderr. Calling this function multiple times on the same PyConfig is only safe because already-configured values are not recalculated. To actually recalculate paths, you need a clean PyConfig. */ PyStatus _PyConfig_InitPathConfig(PyConfig *config, int compute_path_config) { PyStatus status = _PyPathConfig_ReadGlobal(config); if (_PyStatus_EXCEPTION(status) || !compute_path_config) { return status; } if (!_PyThreadState_GET()) { return PyStatus_Error("cannot calculate path configuration without GIL"); } PyObject *configDict = _PyConfig_AsDict(config); if (!configDict) { PyErr_Clear(); return PyStatus_NoMemory(); } PyObject *dict = PyDict_New(); if (!dict) { PyErr_Clear(); Py_DECREF(configDict); return PyStatus_NoMemory(); } if (PyDict_SetItemString(dict, "config", configDict) < 0) { PyErr_Clear(); Py_DECREF(configDict); Py_DECREF(dict); return PyStatus_NoMemory(); } /* reference now held by dict */ Py_DECREF(configDict); PyObject *co = _Py_Get_Getpath_CodeObject(); if (!co || !PyCode_Check(co)) { PyErr_Clear(); Py_XDECREF(co); Py_DECREF(dict); return PyStatus_Error("error reading frozen getpath.py"); } #ifdef MS_WINDOWS PyObject *winreg = PyImport_ImportModule("winreg"); if (!winreg || PyDict_SetItemString(dict, "winreg", winreg) < 0) { PyErr_Clear(); Py_XDECREF(winreg); if (PyDict_SetItemString(dict, "winreg", Py_None) < 0) { PyErr_Clear(); Py_DECREF(co); Py_DECREF(dict); return PyStatus_Error("error importing winreg module"); } } else { Py_DECREF(winreg); } #endif if ( #ifdef MS_WINDOWS !decode_to_dict(dict, "os_name", "nt") || #elif defined(__APPLE__) !decode_to_dict(dict, "os_name", "darwin") || #else !decode_to_dict(dict, "os_name", "posix") || #endif #ifdef WITH_NEXT_FRAMEWORK !int_to_dict(dict, "WITH_NEXT_FRAMEWORK", 1) || #else !int_to_dict(dict, "WITH_NEXT_FRAMEWORK", 0) || #endif !decode_to_dict(dict, "PREFIX", PREFIX) || !decode_to_dict(dict, "EXEC_PREFIX", EXEC_PREFIX) || !decode_to_dict(dict, "PYTHONPATH", PYTHONPATH) || !decode_to_dict(dict, "VPATH", VPATH) || !decode_to_dict(dict, "PLATLIBDIR", PLATLIBDIR) || !decode_to_dict(dict, "PYDEBUGEXT", PYDEBUGEXT) || !int_to_dict(dict, "VERSION_MAJOR", PY_MAJOR_VERSION) || !int_to_dict(dict, "VERSION_MINOR", PY_MINOR_VERSION) || !decode_to_dict(dict, "PYWINVER", PYWINVER) || !wchar_to_dict(dict, "EXE_SUFFIX", EXE_SUFFIX) || !env_to_dict(dict, "ENV_PATH", 0) || !env_to_dict(dict, "ENV_PYTHONHOME", 0) || !env_to_dict(dict, "ENV_PYTHONEXECUTABLE", 0) || !env_to_dict(dict, "ENV___PYVENV_LAUNCHER__", 1) || !progname_to_dict(dict, "real_executable") || !library_to_dict(dict, "library") || !wchar_to_dict(dict, "executable_dir", NULL) || !wchar_to_dict(dict, "py_setpath", _PyPathConfig_GetGlobalModuleSearchPath()) || !funcs_to_dict(dict, config->pathconfig_warnings) || #ifdef Py_GIL_DISABLED !decode_to_dict(dict, "ABI_THREAD", "t") || #else !decode_to_dict(dict, "ABI_THREAD", "") || #endif #ifndef MS_WINDOWS PyDict_SetItemString(dict, "winreg", Py_None) < 0 || #endif PyDict_SetItemString(dict, "__builtins__", PyEval_GetBuiltins()) < 0 ) { Py_DECREF(co); Py_DECREF(dict); PyErr_FormatUnraisable("Exception ignored while preparing getpath"); return PyStatus_Error("error evaluating initial values"); } PyObject *r = PyEval_EvalCode(co, dict, dict); Py_DECREF(co); if (!r) { Py_DECREF(dict); PyErr_FormatUnraisable("Exception ignored while running getpath"); return PyStatus_Error("error evaluating path"); } Py_DECREF(r); if (_PyConfig_FromDict(config, configDict) < 0) { PyErr_FormatUnraisable("Exception ignored while reading getpath results"); Py_DECREF(dict); return PyStatus_Error("error getting getpath results"); } Py_DECREF(dict); return _PyStatus_OK(); } /* GDBM module using dictionary interface */ /* Author: Anthony Baxter, after dbmmodule.c */ /* Doc strings: Mitch Chapman */ // clinic/_gdbmmodule.c.h uses internal pycore_modsupport.h API #ifndef Py_BUILD_CORE_BUILTIN # define Py_BUILD_CORE_MODULE 1 #endif #include "Python.h" #include "pycore_object.h" // _PyObject_VisitType() #include "pycore_pyerrors.h" // _PyErr_SetLocaleString() #include "gdbm.h" #include #include // free() #include #include #if defined(WIN32) && !defined(__CYGWIN__) #include "gdbmerrno.h" extern const char * gdbm_strerror(gdbm_error); #endif typedef struct { PyTypeObject *gdbm_type; PyObject *gdbm_error; } _gdbm_state; static inline _gdbm_state* get_gdbm_state(PyObject *module) { void *state = PyModule_GetState(module); assert(state != NULL); return (_gdbm_state *)state; } /* * Set the gdbm error obtained by gdbm_strerror(gdbm_errno). * * If no error message exists, a generic (UTF-8) error message * is used instead. */ static void set_gdbm_error(_gdbm_state *state, const char *generic_error) { const char *gdbm_errmsg = gdbm_strerror(gdbm_errno); if (gdbm_errmsg) { _PyErr_SetLocaleString(state->gdbm_error, gdbm_errmsg); } else { PyErr_SetString(state->gdbm_error, generic_error); } } /*[clinic input] module _gdbm class _gdbm.gdbm "gdbmobject *" "&Gdbmtype" [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=38ae71cedfc7172b]*/ PyDoc_STRVAR(gdbmmodule__doc__, "This module provides an interface to the GNU DBM (GDBM) library.\n\ \n\ This module is quite similar to the dbm module, but uses GDBM instead to\n\ provide some additional functionality. Please note that the file formats\n\ created by GDBM and dbm are incompatible.\n\ \n\ GDBM objects behave like mappings (dictionaries), except that keys and\n\ values are always immutable bytes-like objects or strings. Printing\n\ a GDBM object doesn't print the keys and values, and the items() and\n\ values() methods are not supported."); typedef struct { PyObject_HEAD Py_ssize_t di_size; /* -1 means recompute */ GDBM_FILE di_dbm; } gdbmobject; #define _gdbmobject_CAST(op) ((gdbmobject *)(op)) #include "clinic/_gdbmmodule.c.h" #define check_gdbmobject_open(v, err) \ _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED((v)) \ if ((v)->di_dbm == NULL) { \ PyErr_SetString(err, "GDBM object has already been closed"); \ return NULL; \ } PyDoc_STRVAR(gdbm_object__doc__, "This object represents a GDBM database.\n\ GDBM objects behave like mappings (dictionaries), except that keys and\n\ values are always immutable bytes-like objects or strings. Printing\n\ a GDBM object doesn't print the keys and values, and the items() and\n\ values() methods are not supported.\n\ \n\ GDBM objects also support additional operations such as firstkey,\n\ nextkey, reorganize, and sync."); static PyObject * newgdbmobject(_gdbm_state *state, const char *file, int flags, int mode) { gdbmobject *dp = PyObject_GC_New(gdbmobject, state->gdbm_type); if (dp == NULL) { return NULL; } dp->di_size = -1; errno = 0; PyObject_GC_Track(dp); if ((dp->di_dbm = gdbm_open((char *)file, 0, flags, mode, NULL)) == 0) { if (errno != 0) { PyErr_SetFromErrnoWithFilename(state->gdbm_error, file); } else { set_gdbm_error(state, "gdbm_open() error"); } Py_DECREF(dp); return NULL; } return (PyObject *)dp; } /* Methods */ static void gdbm_dealloc(PyObject *op) { gdbmobject *dp = _gdbmobject_CAST(op); PyTypeObject *tp = Py_TYPE(dp); PyObject_GC_UnTrack(dp); if (dp->di_dbm) { gdbm_close(dp->di_dbm); } tp->tp_free(dp); Py_DECREF(tp); } static Py_ssize_t gdbm_length_lock_held(PyObject *op) { gdbmobject *dp = _gdbmobject_CAST(op); _gdbm_state *state = PyType_GetModuleState(Py_TYPE(dp)); if (dp->di_dbm == NULL) { PyErr_SetString(state->gdbm_error, "GDBM object has already been closed"); return -1; } if (dp->di_size < 0) { #if GDBM_VERSION_MAJOR >= 1 && GDBM_VERSION_MINOR >= 11 errno = 0; gdbm_count_t count; if (gdbm_count(dp->di_dbm, &count) == -1) { if (errno != 0) { PyErr_SetFromErrno(state->gdbm_error); } else { set_gdbm_error(state, "gdbm_count() error"); } return -1; } if (count > PY_SSIZE_T_MAX) { PyErr_SetString(PyExc_OverflowError, "count exceeds PY_SSIZE_T_MAX"); return -1; } dp->di_size = count; #else datum key,okey; okey.dsize=0; okey.dptr=NULL; Py_ssize_t size = 0; for (key = gdbm_firstkey(dp->di_dbm); key.dptr; key = gdbm_nextkey(dp->di_dbm,okey)) { size++; if (okey.dsize) { free(okey.dptr); } okey=key; } dp->di_size = size; #endif } return dp->di_size; } static Py_ssize_t gdbm_length(PyObject *op) { Py_ssize_t result; Py_BEGIN_CRITICAL_SECTION(op); result = gdbm_length_lock_held(op); Py_END_CRITICAL_SECTION(); return result; } static int gdbm_bool_lock_held(PyObject *op) { gdbmobject *dp = _gdbmobject_CAST(op); _gdbm_state *state = PyType_GetModuleState(Py_TYPE(dp)); if (dp->di_dbm == NULL) { PyErr_SetString(state->gdbm_error, "GDBM object has already been closed"); return -1; } if (dp->di_size > 0) { /* Known non-zero size. */ return 1; } if (dp->di_size == 0) { /* Known zero size. */ return 0; } /* Unknown size. Ensure DBM object has an entry. */ datum key = gdbm_firstkey(dp->di_dbm); if (key.dptr == NULL) { /* Empty. Cache this fact. */ dp->di_size = 0; return 0; } /* Non-empty. Don't cache the length since we don't know. */ free(key.dptr); return 1; } static int gdbm_bool(PyObject *op) { int result; Py_BEGIN_CRITICAL_SECTION(op); result = gdbm_bool_lock_held(op); Py_END_CRITICAL_SECTION(); return result; } // Wrapper function for PyArg_Parse(o, "s#", &d.dptr, &d.size). // This function is needed to support PY_SSIZE_T_CLEAN. // Return 1 on success, same to PyArg_Parse(). static int parse_datum(PyObject *o, datum *d, const char *failmsg) { Py_ssize_t size; if (!PyArg_Parse(o, "s#", &d->dptr, &size)) { if (failmsg != NULL) { PyErr_SetString(PyExc_TypeError, failmsg); } return 0; } if (INT_MAX < size) { PyErr_SetString(PyExc_OverflowError, "size does not fit in an int"); return 0; } d->dsize = size; return 1; } static PyObject * gdbm_subscript_lock_held(PyObject *op, PyObject *key) { PyObject *v; datum drec, krec; gdbmobject *dp = _gdbmobject_CAST(op); _gdbm_state *state = PyType_GetModuleState(Py_TYPE(dp)); if (!parse_datum(key, &krec, NULL)) { return NULL; } if (dp->di_dbm == NULL) { PyErr_SetString(state->gdbm_error, "GDBM object has already been closed"); return NULL; } drec = gdbm_fetch(dp->di_dbm, krec); if (drec.dptr == 0) { PyErr_SetObject(PyExc_KeyError, key); return NULL; } v = PyBytes_FromStringAndSize(drec.dptr, drec.dsize); free(drec.dptr); return v; } static PyObject * gdbm_subscript(PyObject *op, PyObject *key) { PyObject *result; Py_BEGIN_CRITICAL_SECTION(op); result = gdbm_subscript_lock_held(op, key); Py_END_CRITICAL_SECTION(); return result; } /*[clinic input] _gdbm.gdbm.get key: object default: object = None / Get the value for key, or default if not present. [clinic start generated code]*/ static PyObject * _gdbm_gdbm_get_impl(gdbmobject *self, PyObject *key, PyObject *default_value) /*[clinic end generated code: output=92421838f3a852f4 input=a9c20423f34c17b6]*/ { PyObject *res; res = gdbm_subscript((PyObject *)self, key); if (res == NULL && PyErr_ExceptionMatches(PyExc_KeyError)) { PyErr_Clear(); return Py_NewRef(default_value); } return res; } static int gdbm_ass_sub_lock_held(PyObject *op, PyObject *v, PyObject *w) { datum krec, drec; const char *failmsg = "gdbm mappings have bytes or string indices only"; gdbmobject *dp = _gdbmobject_CAST(op); _gdbm_state *state = PyType_GetModuleState(Py_TYPE(dp)); if (!parse_datum(v, &krec, failmsg)) { return -1; } if (dp->di_dbm == NULL) { PyErr_SetString(state->gdbm_error, "GDBM object has already been closed"); return -1; } dp->di_size = -1; if (w == NULL) { if (gdbm_delete(dp->di_dbm, krec) < 0) { if (gdbm_errno == GDBM_ITEM_NOT_FOUND) { PyErr_SetObject(PyExc_KeyError, v); } else { set_gdbm_error(state, "gdbm_delete() error"); } return -1; } } else { if (!parse_datum(w, &drec, failmsg)) { return -1; } errno = 0; if (gdbm_store(dp->di_dbm, krec, drec, GDBM_REPLACE) < 0) { if (errno != 0) { PyErr_SetFromErrno(state->gdbm_error); } else { set_gdbm_error(state, "gdbm_store() error"); } return -1; } } return 0; } static int gdbm_ass_sub(PyObject *op, PyObject *v, PyObject *w) { int result; Py_BEGIN_CRITICAL_SECTION(op); result = gdbm_ass_sub_lock_held(op, v, w); Py_END_CRITICAL_SECTION(); return result; } /*[clinic input] @permit_long_summary @critical_section _gdbm.gdbm.setdefault key: object default: object = None / Get value for key, or set it to default and return default if not present. [clinic start generated code]*/ static PyObject * _gdbm_gdbm_setdefault_impl(gdbmobject *self, PyObject *key, PyObject *default_value) /*[clinic end generated code: output=f3246e880509f142 input=f4008b358165bbb8]*/ { PyObject *res; res = gdbm_subscript((PyObject *)self, key); if (res == NULL && PyErr_ExceptionMatches(PyExc_KeyError)) { PyErr_Clear(); if (gdbm_ass_sub((PyObject *)self, key, default_value) < 0) return NULL; return gdbm_subscript((PyObject *)self, key); } return res; } /*[clinic input] @critical_section _gdbm.gdbm.close Close the database. [clinic start generated code]*/ static PyObject * _gdbm_gdbm_close_impl(gdbmobject *self) /*[clinic end generated code: output=f5abb4d6bb9e52d5 input=56b604f4e77f533d]*/ { if (self->di_dbm) { gdbm_close(self->di_dbm); } self->di_dbm = NULL; Py_RETURN_NONE; } /* XXX Should return a set or a set view */ /*[clinic input] @critical_section _gdbm.gdbm.keys cls: defining_class Get a list of all keys in the database. [clinic start generated code]*/ static PyObject * _gdbm_gdbm_keys_impl(gdbmobject *self, PyTypeObject *cls) /*[clinic end generated code: output=c24b824e81404755 input=785988b1ea8f77e0]*/ { PyObject *v, *item; datum key, nextkey; int err; _gdbm_state *state = PyType_GetModuleState(cls); assert(state != NULL); if (self == NULL || !Py_IS_TYPE(self, state->gdbm_type)) { PyErr_BadInternalCall(); return NULL; } check_gdbmobject_open(self, state->gdbm_error); v = PyList_New(0); if (v == NULL) return NULL; key = gdbm_firstkey(self->di_dbm); while (key.dptr) { item = PyBytes_FromStringAndSize(key.dptr, key.dsize); if (item == NULL) { free(key.dptr); Py_DECREF(v); return NULL; } err = PyList_Append(v, item); Py_DECREF(item); if (err != 0) { free(key.dptr); Py_DECREF(v); return NULL; } nextkey = gdbm_nextkey(self->di_dbm, key); free(key.dptr); key = nextkey; } return v; } static int gdbm_contains_lock_held(PyObject *self, PyObject *arg) { gdbmobject *dp = (gdbmobject *)self; datum key; Py_ssize_t size; _gdbm_state *state = PyType_GetModuleState(Py_TYPE(dp)); if ((dp)->di_dbm == NULL) { PyErr_SetString(state->gdbm_error, "GDBM object has already been closed"); return -1; } if (PyUnicode_Check(arg)) { key.dptr = (char *)PyUnicode_AsUTF8AndSize(arg, &size); key.dsize = size; if (key.dptr == NULL) return -1; } else if (!PyBytes_Check(arg)) { PyErr_Format(PyExc_TypeError, "gdbm key must be bytes or string, not %.100s", Py_TYPE(arg)->tp_name); return -1; } else { key.dptr = PyBytes_AS_STRING(arg); key.dsize = PyBytes_GET_SIZE(arg); } return gdbm_exists(dp->di_dbm, key); } static int gdbm_contains(PyObject *self, PyObject *arg) { int result; Py_BEGIN_CRITICAL_SECTION(self); result = gdbm_contains_lock_held(self, arg); Py_END_CRITICAL_SECTION(); return result; } /*[clinic input] @critical_section _gdbm.gdbm.firstkey cls: defining_class Return the starting key for the traversal. It's possible to loop over every key in the database using this method and the nextkey() method. The traversal is ordered by GDBM's internal hash values, and won't be sorted by the key values. [clinic start generated code]*/ static PyObject * _gdbm_gdbm_firstkey_impl(gdbmobject *self, PyTypeObject *cls) /*[clinic end generated code: output=139275e9c8b60827 input=aad5a7c886c542f5]*/ { PyObject *v; datum key; _gdbm_state *state = PyType_GetModuleState(cls); assert(state != NULL); check_gdbmobject_open(self, state->gdbm_error); key = gdbm_firstkey(self->di_dbm); if (key.dptr) { v = PyBytes_FromStringAndSize(key.dptr, key.dsize); free(key.dptr); return v; } else { Py_RETURN_NONE; } } /*[clinic input] @critical_section _gdbm.gdbm.nextkey cls: defining_class key: str(accept={str, robuffer}, zeroes=True) / Returns the key that follows key in the traversal. The following code prints every key in the database db, without having to create a list in memory that contains them all: k = db.firstkey() while k is not None: print(k) k = db.nextkey(k) [clinic start generated code]*/ static PyObject * _gdbm_gdbm_nextkey_impl(gdbmobject *self, PyTypeObject *cls, const char *key, Py_ssize_t key_length) /*[clinic end generated code: output=c81a69300ef41766 input=181f1130d5bfeb1e]*/ { PyObject *v; datum dbm_key, nextkey; _gdbm_state *state = PyType_GetModuleState(cls); assert(state != NULL); dbm_key.dptr = (char *)key; dbm_key.dsize = key_length; check_gdbmobject_open(self, state->gdbm_error); nextkey = gdbm_nextkey(self->di_dbm, dbm_key); if (nextkey.dptr) { v = PyBytes_FromStringAndSize(nextkey.dptr, nextkey.dsize); free(nextkey.dptr); return v; } else { Py_RETURN_NONE; } } /*[clinic input] @critical_section _gdbm.gdbm.reorganize cls: defining_class Reorganize the database. If you have carried out a lot of deletions and would like to shrink the space used by the GDBM file, this routine will reorganize the database. GDBM will not shorten the length of a database file except by using this reorganization; otherwise, deleted file space will be kept and reused as new (key,value) pairs are added. [clinic start generated code]*/ static PyObject * _gdbm_gdbm_reorganize_impl(gdbmobject *self, PyTypeObject *cls) /*[clinic end generated code: output=d77c69e8e3dd644a input=3e3ca0d2ea787861]*/ { _gdbm_state *state = PyType_GetModuleState(cls); assert(state != NULL); check_gdbmobject_open(self, state->gdbm_error); errno = 0; if (gdbm_reorganize(self->di_dbm) < 0) { if (errno != 0) { PyErr_SetFromErrno(state->gdbm_error); } else { set_gdbm_error(state, "gdbm_reorganize() error"); } return NULL; } Py_RETURN_NONE; } /*[clinic input] @critical_section _gdbm.gdbm.sync cls: defining_class Flush the database to the disk file. When the database has been opened in fast mode, this method forces any unwritten data to be written to the disk. [clinic start generated code]*/ static PyObject * _gdbm_gdbm_sync_impl(gdbmobject *self, PyTypeObject *cls) /*[clinic end generated code: output=bb680a2035c3f592 input=6054385b071d238a]*/ { _gdbm_state *state = PyType_GetModuleState(cls); assert(state != NULL); check_gdbmobject_open(self, state->gdbm_error); gdbm_sync(self->di_dbm); Py_RETURN_NONE; } /*[clinic input] @critical_section _gdbm.gdbm.clear cls: defining_class / Remove all items from the database. [clinic start generated code]*/ static PyObject * _gdbm_gdbm_clear_impl(gdbmobject *self, PyTypeObject *cls) /*[clinic end generated code: output=673577c573318661 input=b17467adfe62f23d]*/ { _gdbm_state *state = PyType_GetModuleState(cls); assert(state != NULL); check_gdbmobject_open(self, state->gdbm_error); datum key; // Invalidate cache self->di_size = -1; while (1) { key = gdbm_firstkey(self->di_dbm); if (key.dptr == NULL) { break; } if (gdbm_delete(self->di_dbm, key) < 0) { PyErr_SetString(state->gdbm_error, "cannot delete item from database"); free(key.dptr); return NULL; } free(key.dptr); } Py_RETURN_NONE; } static PyObject * gdbm__enter__(PyObject *self, PyObject *args) { return Py_NewRef(self); } static PyObject * gdbm__exit__(PyObject *self, PyObject *args) { PyObject *result; Py_BEGIN_CRITICAL_SECTION(self); result = _gdbm_gdbm_close_impl((gdbmobject *)self); Py_END_CRITICAL_SECTION(); return result; } static PyMethodDef gdbm_methods[] = { _GDBM_GDBM_CLOSE_METHODDEF _GDBM_GDBM_KEYS_METHODDEF _GDBM_GDBM_FIRSTKEY_METHODDEF _GDBM_GDBM_NEXTKEY_METHODDEF _GDBM_GDBM_REORGANIZE_METHODDEF _GDBM_GDBM_SYNC_METHODDEF _GDBM_GDBM_GET_METHODDEF _GDBM_GDBM_SETDEFAULT_METHODDEF _GDBM_GDBM_CLEAR_METHODDEF {"__enter__", gdbm__enter__, METH_NOARGS, NULL}, {"__exit__", gdbm__exit__, METH_VARARGS, NULL}, {NULL, NULL} /* sentinel */ }; static PyType_Slot gdbmtype_spec_slots[] = { {Py_tp_dealloc, gdbm_dealloc}, {Py_tp_traverse, _PyObject_VisitType}, {Py_tp_methods, gdbm_methods}, {Py_sq_contains, gdbm_contains}, {Py_mp_length, gdbm_length}, {Py_mp_subscript, gdbm_subscript}, {Py_mp_ass_subscript, gdbm_ass_sub}, {Py_nb_bool, gdbm_bool}, {Py_tp_doc, (char*)gdbm_object__doc__}, {0, 0} }; static PyType_Spec gdbmtype_spec = { .name = "_gdbm.gdbm", .basicsize = sizeof(gdbmobject), // Calling PyType_GetModuleState() on a subclass is not safe. // dbmtype_spec does not have Py_TPFLAGS_BASETYPE flag // which prevents to create a subclass. // So calling PyType_GetModuleState() in this file is always safe. .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_IMMUTABLETYPE), .slots = gdbmtype_spec_slots, }; /* ----------------------------------------------------------------- */ /*[clinic input] _gdbm.open as dbmopen filename: object flags: str="r" mode: int(py_default="0o666") = 0o666 / Open a dbm database and return a dbm object. The filename argument is the name of the database file. The optional flags argument can be 'r' (to open an existing database for reading only -- default), 'w' (to open an existing database for reading and writing), 'c' (which creates the database if it doesn't exist), or 'n' (which always creates a new empty database). Some versions of gdbm support additional flags which must be appended to one of the flags described above. The module constant 'open_flags' is a string of valid additional flags. The 'f' flag opens the database in fast mode; altered data will not automatically be written to the disk after every change. This results in faster writes to the database, but may result in an inconsistent database if the program crashes while the database is still open. Use the sync() method to force any unwritten data to be written to the disk. The 's' flag causes all database operations to be synchronized to disk. The 'u' flag disables locking of the database file. The optional mode argument is the Unix mode of the file, used only when the database has to be created. It defaults to octal 0o666. [clinic start generated code]*/ static PyObject * dbmopen_impl(PyObject *module, PyObject *filename, const char *flags, int mode) /*[clinic end generated code: output=9527750f5df90764 input=bca6ec81dc49292c]*/ { int iflags; _gdbm_state *state = get_gdbm_state(module); assert(state != NULL); switch (flags[0]) { case 'r': iflags = GDBM_READER; break; case 'w': iflags = GDBM_WRITER; break; case 'c': iflags = GDBM_WRCREAT; break; case 'n': iflags = GDBM_NEWDB; break; default: PyErr_SetString(state->gdbm_error, "First flag must be one of 'r', 'w', 'c' or 'n'"); return NULL; } for (flags++; *flags != '\0'; flags++) { switch (*flags) { #ifdef GDBM_FAST case 'f': iflags |= GDBM_FAST; break; #endif #ifdef GDBM_SYNC case 's': iflags |= GDBM_SYNC; break; #endif #ifdef GDBM_NOLOCK case 'u': iflags |= GDBM_NOLOCK; break; #endif default: PyErr_Format(state->gdbm_error, "Flag '%c' is not supported.", (unsigned char)*flags); return NULL; } } PyObject *filenamebytes; if (!PyUnicode_FSConverter(filename, &filenamebytes)) { return NULL; } const char *name = PyBytes_AS_STRING(filenamebytes); if (strlen(name) != (size_t)PyBytes_GET_SIZE(filenamebytes)) { Py_DECREF(filenamebytes); PyErr_SetString(PyExc_ValueError, "embedded null character"); return NULL; } PyObject *self = newgdbmobject(state, name, iflags, mode); Py_DECREF(filenamebytes); return self; } static const char gdbmmodule_open_flags[] = "rwcn" #ifdef GDBM_FAST "f" #endif #ifdef GDBM_SYNC "s" #endif #ifdef GDBM_NOLOCK "u" #endif ; static PyMethodDef _gdbm_module_methods[] = { DBMOPEN_METHODDEF { 0, 0 }, }; static int _gdbm_exec(PyObject *module) { _gdbm_state *state = get_gdbm_state(module); state->gdbm_type = (PyTypeObject *)PyType_FromModuleAndSpec(module, &gdbmtype_spec, NULL); if (state->gdbm_type == NULL) { return -1; } state->gdbm_error = PyErr_NewException("_gdbm.error", PyExc_OSError, NULL); if (state->gdbm_error == NULL) { return -1; } if (PyModule_AddType(module, (PyTypeObject *)state->gdbm_error) < 0) { return -1; } if (PyModule_AddStringConstant(module, "open_flags", gdbmmodule_open_flags) < 0) { return -1; } #if defined(GDBM_VERSION_MAJOR) && defined(GDBM_VERSION_MINOR) && \ defined(GDBM_VERSION_PATCH) PyObject *obj = Py_BuildValue("iii", GDBM_VERSION_MAJOR, GDBM_VERSION_MINOR, GDBM_VERSION_PATCH); if (PyModule_Add(module, "_GDBM_VERSION", obj) < 0) { return -1; } #endif return 0; } static int _gdbm_module_traverse(PyObject *module, visitproc visit, void *arg) { _gdbm_state *state = get_gdbm_state(module); Py_VISIT(state->gdbm_error); Py_VISIT(state->gdbm_type); return 0; } static int _gdbm_module_clear(PyObject *module) { _gdbm_state *state = get_gdbm_state(module); Py_CLEAR(state->gdbm_error); Py_CLEAR(state->gdbm_type); return 0; } static void _gdbm_module_free(void *module) { (void)_gdbm_module_clear((PyObject *)module); } static PyModuleDef_Slot _gdbm_module_slots[] = { {Py_mod_exec, _gdbm_exec}, {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, {Py_mod_gil, Py_MOD_GIL_NOT_USED}, {0, NULL} }; static struct PyModuleDef _gdbmmodule = { PyModuleDef_HEAD_INIT, .m_name = "_gdbm", .m_doc = gdbmmodule__doc__, .m_size = sizeof(_gdbm_state), .m_methods = _gdbm_module_methods, .m_slots = _gdbm_module_slots, .m_traverse = _gdbm_module_traverse, .m_clear = _gdbm_module_clear, .m_free = _gdbm_module_free, }; PyMODINIT_FUNC PyInit__gdbm(void) { return PyModuleDef_Init(&_gdbmmodule); } /*********************************************************** Copyright (C) 1997, 2002, 2003, 2007, 2008 Martin von Loewis Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appear in all copies. This software comes with no warranty. Use at your own risk. ******************************************************************/ #include "Python.h" #include "pycore_fileutils.h" // _Py_GetLocaleconvNumeric() #include "pycore_pymem.h" // _PyMem_Strdup() #include // setlocale() #include // strlen() #ifdef HAVE_ERRNO_H # include // errno #endif #ifdef HAVE_LANGINFO_H # include // nl_langinfo() #endif #ifdef HAVE_LIBINTL_H # include #endif #ifdef MS_WINDOWS # ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN # endif # include #endif PyDoc_STRVAR(locale__doc__, "Support for POSIX locales."); typedef struct _locale_state { PyObject *Error; } _locale_state; static inline _locale_state* get_locale_state(PyObject *m) { void *state = PyModule_GetState(m); assert(state != NULL); return (_locale_state *)state; } #include "clinic/_localemodule.c.h" /*[clinic input] module _locale [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=ed98569b726feada]*/ /* support functions for formatting floating-point numbers */ /* the grouping is terminated by either 0 or CHAR_MAX */ static PyObject* copy_grouping(const char* s) { int i; PyObject *result, *val = NULL; if (s[0] == '\0') { /* empty string: no grouping at all */ return PyList_New(0); } for (i = 0; s[i] != '\0' && s[i] != CHAR_MAX; i++) ; /* nothing */ result = PyList_New(i+1); if (!result) return NULL; i = -1; do { i++; val = PyLong_FromLong(s[i]); if (val == NULL) { Py_DECREF(result); return NULL; } PyList_SET_ITEM(result, i, val); } while (s[i] != '\0' && s[i] != CHAR_MAX); return result; } #if defined(MS_WINDOWS) // 16 is the number of elements in the szCodePage field // of the __crt_locale_strings structure. #define MAX_CP_LEN 15 static int check_locale_name(const char *locale, const char *end) { size_t len = end ? (size_t)(end - locale) : strlen(locale); const char *dot = memchr(locale, '.', len); if (dot && locale + len - dot - 1 > MAX_CP_LEN) { return -1; } return 0; } static int check_locale_name_all(const char *locale) { const char *start = locale; while (1) { const char *end = strchr(start, ';'); if (check_locale_name(start, end) < 0) { return -1; } if (end == NULL) { break; } start = end + 1; } return 0; } #endif /*[clinic input] _locale.setlocale category: int locale: str(accept={str, NoneType}) = NULL / Activates/queries locale processing. [clinic start generated code]*/ static PyObject * _locale_setlocale_impl(PyObject *module, int category, const char *locale) /*[clinic end generated code: output=a0e777ae5d2ff117 input=dbe18f1d66c57a6a]*/ { char *result; PyObject *result_object; #if defined(MS_WINDOWS) if (category < LC_MIN || category > LC_MAX) { PyErr_SetString(get_locale_state(module)->Error, "invalid locale category"); return NULL; } if (locale) { if ((category == LC_ALL ? check_locale_name_all(locale) : check_locale_name(locale, NULL)) < 0) { /* Debug assertion failure on Windows. * _Py_BEGIN_SUPPRESS_IPH/_Py_END_SUPPRESS_IPH do not help. */ PyErr_SetString(get_locale_state(module)->Error, "unsupported locale setting"); return NULL; } } #endif if (locale) { /* set locale */ result = setlocale(category, locale); if (!result) { /* operation failed, no setting was changed */ PyErr_SetString(get_locale_state(module)->Error, "unsupported locale setting"); return NULL; } result_object = PyUnicode_DecodeLocale(result, NULL); if (!result_object) return NULL; } else { /* get locale */ result = setlocale(category, NULL); if (!result) { PyErr_SetString(get_locale_state(module)->Error, "locale query failed"); return NULL; } result_object = PyUnicode_DecodeLocale(result, NULL); } return result_object; } static int locale_is_ascii(const char *str) { return (strlen(str) == 1 && ((unsigned char)str[0]) <= 127); } static int is_all_ascii(const char *str) { for (; *str; str++) { if ((unsigned char)*str > 127) { return 0; } } return 1; } static int locale_decode_monetary(PyObject *dict, struct lconv *lc) { #ifndef MS_WINDOWS int change_locale; change_locale = (!locale_is_ascii(lc->int_curr_symbol) || !locale_is_ascii(lc->currency_symbol) || !locale_is_ascii(lc->mon_decimal_point) || !locale_is_ascii(lc->mon_thousands_sep)); /* Keep a copy of the LC_CTYPE locale */ char *oldloc = NULL, *loc = NULL; if (change_locale) { oldloc = setlocale(LC_CTYPE, NULL); if (!oldloc) { PyErr_SetString(PyExc_RuntimeWarning, "failed to get LC_CTYPE locale"); return -1; } oldloc = _PyMem_Strdup(oldloc); if (!oldloc) { PyErr_NoMemory(); return -1; } loc = setlocale(LC_MONETARY, NULL); if (loc != NULL && strcmp(loc, oldloc) == 0) { loc = NULL; } if (loc != NULL) { /* Only set the locale temporarily the LC_CTYPE locale to the LC_MONETARY locale if the two locales are different and at least one string is non-ASCII. */ setlocale(LC_CTYPE, loc); } } #define GET_LOCALE_STRING(ATTR) PyUnicode_DecodeLocale(lc->ATTR, NULL) #else /* MS_WINDOWS */ /* Use _W_* fields of Windows struct lconv */ #define GET_LOCALE_STRING(ATTR) PyUnicode_FromWideChar(lc->_W_ ## ATTR, -1) #endif /* MS_WINDOWS */ int res = -1; #define RESULT_STRING(ATTR) \ do { \ PyObject *obj; \ obj = GET_LOCALE_STRING(ATTR); \ if (obj == NULL) { \ goto done; \ } \ if (PyDict_SetItemString(dict, Py_STRINGIFY(ATTR), obj) < 0) { \ Py_DECREF(obj); \ goto done; \ } \ Py_DECREF(obj); \ } while (0) RESULT_STRING(int_curr_symbol); RESULT_STRING(currency_symbol); RESULT_STRING(mon_decimal_point); RESULT_STRING(mon_thousands_sep); #undef RESULT_STRING #undef GET_LOCALE_STRING res = 0; done: #ifndef MS_WINDOWS if (loc != NULL) { setlocale(LC_CTYPE, oldloc); } PyMem_Free(oldloc); #endif return res; } /*[clinic input] _locale.localeconv Returns numeric and monetary locale-specific parameters. [clinic start generated code]*/ static PyObject * _locale_localeconv_impl(PyObject *module) /*[clinic end generated code: output=43a54515e0a2aef5 input=f1132d15accf4444]*/ { PyObject* result; struct lconv *lc; PyObject *x; result = PyDict_New(); if (!result) { return NULL; } /* if LC_NUMERIC is different in the C library, use saved value */ lc = localeconv(); /* hopefully, the localeconv result survives the C library calls involved herein */ #define RESULT(key, obj)\ do { \ if (obj == NULL) \ goto failed; \ if (PyDict_SetItemString(result, key, obj) < 0) { \ Py_DECREF(obj); \ goto failed; \ } \ Py_DECREF(obj); \ } while (0) #ifdef MS_WINDOWS /* Use _W_* fields of Windows struct lconv */ #define GET_LOCALE_STRING(ATTR) PyUnicode_FromWideChar(lc->_W_ ## ATTR, -1) #else #define GET_LOCALE_STRING(ATTR) PyUnicode_DecodeLocale(lc->ATTR, NULL) #endif #define RESULT_STRING(s)\ do { \ x = GET_LOCALE_STRING(s); \ RESULT(#s, x); \ } while (0) #define RESULT_INT(i)\ do { \ x = PyLong_FromLong(lc->i); \ RESULT(#i, x); \ } while (0) /* Monetary information: LC_MONETARY encoding */ if (locale_decode_monetary(result, lc) < 0) { goto failed; } x = copy_grouping(lc->mon_grouping); RESULT("mon_grouping", x); RESULT_STRING(positive_sign); RESULT_STRING(negative_sign); RESULT_INT(int_frac_digits); RESULT_INT(frac_digits); RESULT_INT(p_cs_precedes); RESULT_INT(p_sep_by_space); RESULT_INT(n_cs_precedes); RESULT_INT(n_sep_by_space); RESULT_INT(p_sign_posn); RESULT_INT(n_sign_posn); /* Numeric information: LC_NUMERIC encoding */ PyObject *decimal_point = NULL, *thousands_sep = NULL; if (_Py_GetLocaleconvNumeric(lc, &decimal_point, &thousands_sep) < 0) { Py_XDECREF(decimal_point); Py_XDECREF(thousands_sep); goto failed; } if (PyDict_SetItemString(result, "decimal_point", decimal_point) < 0) { Py_DECREF(decimal_point); Py_DECREF(thousands_sep); goto failed; } Py_DECREF(decimal_point); if (PyDict_SetItemString(result, "thousands_sep", thousands_sep) < 0) { Py_DECREF(thousands_sep); goto failed; } Py_DECREF(thousands_sep); x = copy_grouping(lc->grouping); RESULT("grouping", x); return result; failed: Py_DECREF(result); return NULL; #undef RESULT #undef RESULT_STRING #undef RESULT_INT #undef GET_LOCALE_STRING } #if defined(HAVE_WCSCOLL) /*[clinic input] _locale.strcoll os1: unicode os2: unicode / Compares two strings according to the locale. [clinic start generated code]*/ static PyObject * _locale_strcoll_impl(PyObject *module, PyObject *os1, PyObject *os2) /*[clinic end generated code: output=82ddc6d62c76d618 input=693cd02bcbf38dd8]*/ { PyObject *result = NULL; wchar_t *ws1 = NULL, *ws2 = NULL; /* Convert the unicode strings to wchar[]. */ ws1 = PyUnicode_AsWideCharString(os1, NULL); if (ws1 == NULL) goto done; ws2 = PyUnicode_AsWideCharString(os2, NULL); if (ws2 == NULL) goto done; /* Collate the strings. */ result = PyLong_FromLong(wcscoll(ws1, ws2)); done: /* Deallocate everything. */ if (ws1) PyMem_Free(ws1); if (ws2) PyMem_Free(ws2); return result; } #endif #ifdef HAVE_WCSXFRM /*[clinic input] _locale.strxfrm string as str: unicode / Return a string that can be used as a key for locale-aware comparisons. [clinic start generated code]*/ static PyObject * _locale_strxfrm_impl(PyObject *module, PyObject *str) /*[clinic end generated code: output=3081866ebffc01af input=1378bbe6a88b4780]*/ { Py_ssize_t n1; wchar_t *s = NULL, *buf = NULL; size_t n2; PyObject *result = NULL; s = PyUnicode_AsWideCharString(str, &n1); if (s == NULL) goto exit; if (wcslen(s) != (size_t)n1) { PyErr_SetString(PyExc_ValueError, "embedded null character"); goto exit; } errno = 0; n2 = wcsxfrm(NULL, s, 0); if (errno && errno != ERANGE) { PyErr_SetFromErrno(PyExc_OSError); goto exit; } buf = PyMem_New(wchar_t, n2+1); if (!buf) { PyErr_NoMemory(); goto exit; } errno = 0; n2 = wcsxfrm(buf, s, n2+1); if (errno) { PyErr_SetFromErrno(PyExc_OSError); goto exit; } /* The result is just a sequence of integers, they are not necessary Unicode code points, so PyUnicode_FromWideChar() cannot be used here. For example, 0xD83D 0xDC0D should not be larger than 0xFF41. */ #if SIZEOF_WCHAR_T == 4 { /* Some codes can exceed the range of Unicode code points (0 - 0x10FFFF), so they cannot be directly used in PyUnicode_FromKindAndData(). They should be first encoded in a way that preserves the lexicographical order. Codes in the range 0-0xFFFF represent themself. Codes larger than 0xFFFF are encoded as a pair: * 0x1xxxx -- the highest 16 bits * 0x0xxxx -- the lowest 16 bits */ size_t n3 = 0; for (size_t i = 0; i < n2; i++) { if ((Py_UCS4)buf[i] > 0x10000u) { n3++; } } if (n3) { n3 += n2; // no integer overflow Py_UCS4 *buf2 = PyMem_New(Py_UCS4, n3); if (buf2 == NULL) { PyErr_NoMemory(); goto exit; } size_t j = 0; for (size_t i = 0; i < n2; i++) { Py_UCS4 c = (Py_UCS4)buf[i]; if (c > 0x10000u) { buf2[j++] = (c >> 16) | 0x10000u; buf2[j++] = c & 0xFFFFu; } else { buf2[j++] = c; } } assert(j == n3); result = PyUnicode_FromKindAndData(PyUnicode_4BYTE_KIND, buf2, n3); PyMem_Free(buf2); goto exit; } } #endif result = PyUnicode_FromKindAndData(sizeof(wchar_t), buf, n2); exit: PyMem_Free(buf); PyMem_Free(s); return result; } #endif #if defined(MS_WINDOWS) /*[clinic input] _locale._getdefaultlocale [clinic start generated code]*/ static PyObject * _locale__getdefaultlocale_impl(PyObject *module) /*[clinic end generated code: output=e6254088579534c2 input=003ea41acd17f7c7]*/ { char encoding[20]; char locale[100]; PyOS_snprintf(encoding, sizeof(encoding), "cp%u", GetACP()); if (GetLocaleInfoA(LOCALE_USER_DEFAULT, LOCALE_SISO639LANGNAME, locale, sizeof(locale))) { Py_ssize_t i = strlen(locale); locale[i++] = '_'; if (GetLocaleInfoA(LOCALE_USER_DEFAULT, LOCALE_SISO3166CTRYNAME, locale+i, (int)(sizeof(locale)-i))) return Py_BuildValue("ss", locale, encoding); } /* If we end up here, this windows version didn't know about ISO639/ISO3166 names (it's probably Windows 95). Return the Windows language identifier instead (a hexadecimal number) */ locale[0] = '0'; locale[1] = 'x'; if (GetLocaleInfoA(LOCALE_USER_DEFAULT, LOCALE_IDEFAULTLANGUAGE, locale+2, sizeof(locale)-2)) { return Py_BuildValue("ss", locale, encoding); } /* cannot determine the language code (very unlikely) */ Py_INCREF(Py_None); return Py_BuildValue("Os", Py_None, encoding); } #endif #ifdef HAVE_LANGINFO_H #define LANGINFO(X, Y) {#X, X, Y} static struct langinfo_constant{ const char *name; int value; int category; } langinfo_constants[] = { /* These constants should exist on any langinfo implementation */ LANGINFO(DAY_1, LC_TIME), LANGINFO(DAY_2, LC_TIME), LANGINFO(DAY_3, LC_TIME), LANGINFO(DAY_4, LC_TIME), LANGINFO(DAY_5, LC_TIME), LANGINFO(DAY_6, LC_TIME), LANGINFO(DAY_7, LC_TIME), LANGINFO(ABDAY_1, LC_TIME), LANGINFO(ABDAY_2, LC_TIME), LANGINFO(ABDAY_3, LC_TIME), LANGINFO(ABDAY_4, LC_TIME), LANGINFO(ABDAY_5, LC_TIME), LANGINFO(ABDAY_6, LC_TIME), LANGINFO(ABDAY_7, LC_TIME), LANGINFO(MON_1, LC_TIME), LANGINFO(MON_2, LC_TIME), LANGINFO(MON_3, LC_TIME), LANGINFO(MON_4, LC_TIME), LANGINFO(MON_5, LC_TIME), LANGINFO(MON_6, LC_TIME), LANGINFO(MON_7, LC_TIME), LANGINFO(MON_8, LC_TIME), LANGINFO(MON_9, LC_TIME), LANGINFO(MON_10, LC_TIME), LANGINFO(MON_11, LC_TIME), LANGINFO(MON_12, LC_TIME), LANGINFO(ABMON_1, LC_TIME), LANGINFO(ABMON_2, LC_TIME), LANGINFO(ABMON_3, LC_TIME), LANGINFO(ABMON_4, LC_TIME), LANGINFO(ABMON_5, LC_TIME), LANGINFO(ABMON_6, LC_TIME), LANGINFO(ABMON_7, LC_TIME), LANGINFO(ABMON_8, LC_TIME), LANGINFO(ABMON_9, LC_TIME), LANGINFO(ABMON_10, LC_TIME), LANGINFO(ABMON_11, LC_TIME), LANGINFO(ABMON_12, LC_TIME), #ifdef RADIXCHAR /* The following are not available with glibc 2.0 */ LANGINFO(RADIXCHAR, LC_NUMERIC), LANGINFO(THOUSEP, LC_NUMERIC), /* YESSTR and NOSTR are deprecated in glibc, since they are a special case of message translation, which should be rather done using gettext. So we don't expose it to Python in the first place. LANGINFO(YESSTR, LC_MESSAGES), LANGINFO(NOSTR, LC_MESSAGES), */ LANGINFO(CRNCYSTR, LC_MONETARY), #endif LANGINFO(D_T_FMT, LC_TIME), LANGINFO(D_FMT, LC_TIME), LANGINFO(T_FMT, LC_TIME), LANGINFO(AM_STR, LC_TIME), LANGINFO(PM_STR, LC_TIME), /* The following constants are available only with XPG4, but... OpenBSD doesn't have CODESET but has T_FMT_AMPM, and doesn't have a few of the others. Solution: ifdef-test them all. */ #ifdef CODESET LANGINFO(CODESET, LC_CTYPE), #endif #ifdef T_FMT_AMPM LANGINFO(T_FMT_AMPM, LC_TIME), #endif #ifdef ERA LANGINFO(ERA, LC_TIME), #endif #ifdef ERA_D_FMT LANGINFO(ERA_D_FMT, LC_TIME), #endif #ifdef ERA_D_T_FMT LANGINFO(ERA_D_T_FMT, LC_TIME), #endif #ifdef ERA_T_FMT LANGINFO(ERA_T_FMT, LC_TIME), #endif #ifdef ALT_DIGITS LANGINFO(ALT_DIGITS, LC_TIME), #endif #ifdef YESEXPR LANGINFO(YESEXPR, LC_MESSAGES), #endif #ifdef NOEXPR LANGINFO(NOEXPR, LC_MESSAGES), #endif #ifdef _DATE_FMT /* This is not available in all glibc versions that have CODESET. */ LANGINFO(_DATE_FMT, LC_TIME), #endif {0, 0, 0} }; /* Temporary make the LC_CTYPE locale to be the same as * the locale of the specified category. */ static int change_locale(int category, char **oldloc) { /* Keep a copy of the LC_CTYPE locale */ *oldloc = setlocale(LC_CTYPE, NULL); if (!*oldloc) { PyErr_SetString(PyExc_RuntimeError, "failed to get LC_CTYPE locale"); return -1; } *oldloc = _PyMem_Strdup(*oldloc); if (!*oldloc) { PyErr_NoMemory(); return -1; } /* Set a new locale if it is different. */ char *loc = setlocale(category, NULL); if (loc == NULL || strcmp(loc, *oldloc) == 0) { PyMem_Free(*oldloc); *oldloc = NULL; return 0; } setlocale(LC_CTYPE, loc); return 1; } /* Restore the old LC_CTYPE locale. */ static void restore_locale(char *oldloc) { if (oldloc != NULL) { setlocale(LC_CTYPE, oldloc); PyMem_Free(oldloc); } } #ifdef __GLIBC__ #if defined(ALT_DIGITS) || defined(ERA) static PyObject * decode_strings(const char *result, size_t max_count) { /* Convert a sequence of NUL-separated C strings to a Python string * containing semicolon separated items. */ size_t i = 0; size_t count = 0; for (; count < max_count && result[i]; count++) { i += strlen(result + i) + 1; } char *buf = PyMem_Malloc(i); if (buf == NULL) { PyErr_NoMemory(); return NULL; } memcpy(buf, result, i); /* Replace all NULs with semicolons. */ i = 0; while (--count) { i += strlen(buf + i); buf[i++] = ';'; } PyObject *pyresult = PyUnicode_DecodeLocale(buf, NULL); PyMem_Free(buf); return pyresult; } #endif #endif /*[clinic input] _locale.nl_langinfo key as item: int / Return the value for the locale information associated with key. [clinic start generated code]*/ static PyObject * _locale_nl_langinfo_impl(PyObject *module, int item) /*[clinic end generated code: output=6aea457b47e077a3 input=00798143eecfeddc]*/ { int i; /* Check whether this is a supported constant. GNU libc sometimes returns numeric values in the char* return value, which would crash PyUnicode_FromString. */ for (i = 0; langinfo_constants[i].name; i++) { if (langinfo_constants[i].value == item) { /* Check NULL as a workaround for GNU libc's returning NULL instead of an empty string for nl_langinfo(ERA). */ const char *result = nl_langinfo(item); result = result != NULL ? result : ""; char *oldloc = NULL; if (langinfo_constants[i].category != LC_CTYPE && *result && ( #ifdef __GLIBC__ // gh-133740: Always change the locale for ALT_DIGITS and ERA # ifdef ALT_DIGITS item == ALT_DIGITS || # endif # ifdef ERA item == ERA || # endif #endif !is_all_ascii(result)) && change_locale(langinfo_constants[i].category, &oldloc) < 0) { return NULL; } PyObject *pyresult; #ifdef __GLIBC__ /* According to the POSIX specification the result must be * a sequence of semicolon-separated strings. * But in Glibc they are NUL-separated. */ #ifdef ALT_DIGITS if (item == ALT_DIGITS && *result) { pyresult = decode_strings(result, 100); } else #endif #ifdef ERA if (item == ERA && *result) { pyresult = decode_strings(result, SIZE_MAX); } else #endif #endif { pyresult = PyUnicode_DecodeLocale(result, NULL); } restore_locale(oldloc); return pyresult; } } PyErr_SetString(PyExc_ValueError, "unsupported langinfo constant"); return NULL; } #endif /* HAVE_LANGINFO_H */ #ifdef HAVE_LIBINTL_H /*[clinic input] _locale.gettext msg as in: str / gettext(msg) -> string Return translation of msg. [clinic start generated code]*/ static PyObject * _locale_gettext_impl(PyObject *module, const char *in) /*[clinic end generated code: output=493bb4b38a4704fe input=949fc8efc2bb3bc3]*/ { return PyUnicode_DecodeLocale(gettext(in), NULL); } /*[clinic input] _locale.dgettext domain: str(accept={str, NoneType}) msg as in: str / dgettext(domain, msg) -> string Return translation of msg in domain. [clinic start generated code]*/ static PyObject * _locale_dgettext_impl(PyObject *module, const char *domain, const char *in) /*[clinic end generated code: output=3c0cd5287b972c8f input=a277388a635109d8]*/ { return PyUnicode_DecodeLocale(dgettext(domain, in), NULL); } /*[clinic input] _locale.dcgettext domain: str(accept={str, NoneType}) msg as msgid: str category: int / Return translation of msg in domain and category. [clinic start generated code]*/ static PyObject * _locale_dcgettext_impl(PyObject *module, const char *domain, const char *msgid, int category) /*[clinic end generated code: output=0f4cc4fce0aa283f input=ec5f8fed4336de67]*/ { return PyUnicode_DecodeLocale(dcgettext(domain,msgid,category), NULL); } /*[clinic input] _locale.textdomain domain: str(accept={str, NoneType}) / Set the C library's textdmain to domain, returning the new domain. [clinic start generated code]*/ static PyObject * _locale_textdomain_impl(PyObject *module, const char *domain) /*[clinic end generated code: output=7992df06aadec313 input=66359716f5eb1d38]*/ { domain = textdomain(domain); if (!domain) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } return PyUnicode_DecodeLocale(domain, NULL); } /*[clinic input] _locale.bindtextdomain domain: str dir as dirname_obj: object / Bind the C library's domain to dir. [clinic start generated code]*/ static PyObject * _locale_bindtextdomain_impl(PyObject *module, const char *domain, PyObject *dirname_obj) /*[clinic end generated code: output=6d6f3c7b345d785c input=c0dff085acfe272b]*/ { const char *dirname, *current_dirname; PyObject *dirname_bytes = NULL, *result; if (!strlen(domain)) { PyErr_SetString(get_locale_state(module)->Error, "domain must be a non-empty string"); return 0; } if (dirname_obj != Py_None) { if (!PyUnicode_FSConverter(dirname_obj, &dirname_bytes)) return NULL; dirname = PyBytes_AsString(dirname_bytes); } else { dirname_bytes = NULL; dirname = NULL; } current_dirname = bindtextdomain(domain, dirname); if (current_dirname == NULL) { PyErr_SetFromErrno(PyExc_OSError); Py_XDECREF(dirname_bytes); return NULL; } result = PyUnicode_DecodeLocale(current_dirname, NULL); Py_XDECREF(dirname_bytes); return result; } #ifdef HAVE_BIND_TEXTDOMAIN_CODESET /*[clinic input] _locale.bind_textdomain_codeset domain: str codeset: str(accept={str, NoneType}) / Bind the C library's domain to codeset. [clinic start generated code]*/ static PyObject * _locale_bind_textdomain_codeset_impl(PyObject *module, const char *domain, const char *codeset) /*[clinic end generated code: output=fa452f9c8b1b9e89 input=23fbe3540400f259]*/ { codeset = bind_textdomain_codeset(domain, codeset); if (codeset) { return PyUnicode_DecodeLocale(codeset, NULL); } Py_RETURN_NONE; } #endif // HAVE_BIND_TEXTDOMAIN_CODESET #endif // HAVE_LIBINTL_H /*[clinic input] _locale.getencoding Get the current locale encoding. [clinic start generated code]*/ static PyObject * _locale_getencoding_impl(PyObject *module) /*[clinic end generated code: output=86b326b971872e46 input=6503d11e5958b360]*/ { return _Py_GetLocaleEncodingObject(); } static struct PyMethodDef PyLocale_Methods[] = { _LOCALE_SETLOCALE_METHODDEF _LOCALE_LOCALECONV_METHODDEF #ifdef HAVE_WCSCOLL _LOCALE_STRCOLL_METHODDEF #endif #ifdef HAVE_WCSXFRM _LOCALE_STRXFRM_METHODDEF #endif #if defined(MS_WINDOWS) _LOCALE__GETDEFAULTLOCALE_METHODDEF #endif #ifdef HAVE_LANGINFO_H _LOCALE_NL_LANGINFO_METHODDEF #endif #ifdef HAVE_LIBINTL_H _LOCALE_GETTEXT_METHODDEF _LOCALE_DGETTEXT_METHODDEF _LOCALE_DCGETTEXT_METHODDEF _LOCALE_TEXTDOMAIN_METHODDEF _LOCALE_BINDTEXTDOMAIN_METHODDEF #ifdef HAVE_BIND_TEXTDOMAIN_CODESET _LOCALE_BIND_TEXTDOMAIN_CODESET_METHODDEF #endif #endif _LOCALE_GETENCODING_METHODDEF {NULL, NULL} }; static int _locale_exec(PyObject *module) { #ifdef HAVE_LANGINFO_H int i; #endif #define ADD_INT(module, value) \ do { \ if (PyModule_AddIntConstant(module, #value, value) < 0) { \ return -1; \ } \ } while (0) ADD_INT(module, LC_CTYPE); ADD_INT(module, LC_TIME); ADD_INT(module, LC_COLLATE); ADD_INT(module, LC_MONETARY); #ifdef LC_MESSAGES ADD_INT(module, LC_MESSAGES); #endif /* LC_MESSAGES */ ADD_INT(module, LC_NUMERIC); ADD_INT(module, LC_ALL); ADD_INT(module, CHAR_MAX); _locale_state *state = get_locale_state(module); state->Error = PyErr_NewException("locale.Error", NULL, NULL); if (PyModule_AddObjectRef(module, "Error", state->Error) < 0) { return -1; } #ifdef HAVE_LANGINFO_H for (i = 0; langinfo_constants[i].name; i++) { if (PyModule_AddIntConstant(module, langinfo_constants[i].name, langinfo_constants[i].value) < 0) { return -1; } } #endif if (PyErr_Occurred()) { return -1; } return 0; #undef ADD_INT } static struct PyModuleDef_Slot _locale_slots[] = { {Py_mod_exec, _locale_exec}, {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED}, {Py_mod_gil, Py_MOD_GIL_NOT_USED}, {0, NULL} }; static int locale_traverse(PyObject *module, visitproc visit, void *arg) { _locale_state *state = get_locale_state(module); Py_VISIT(state->Error); return 0; } static int locale_clear(PyObject *module) { _locale_state *state = get_locale_state(module); Py_CLEAR(state->Error); return 0; } static void locale_free(void *module) { locale_clear((PyObject*)module); } static struct PyModuleDef _localemodule = { PyModuleDef_HEAD_INIT, "_locale", locale__doc__, sizeof(_locale_state), PyLocale_Methods, _locale_slots, locale_traverse, locale_clear, locale_free, }; PyMODINIT_FUNC PyInit__locale(void) { return PyModuleDef_Init(&_localemodule); } /* Local variables: c-basic-offset: 4 indent-tabs-mode: nil End: */ """