Removed the Requirement to Install Python and NodeJS (Now Bundled with Borealis)

This commit is contained in:
2025-04-24 00:42:19 -06:00
parent 785265d3e7
commit 9c68cdea84
7786 changed files with 2386458 additions and 217 deletions

139
Dependencies/Python/include/Python.h vendored Normal file
View File

@@ -0,0 +1,139 @@
// Entry point of the Python C API.
// C extensions should only #include <Python.h>, and not include directly
// the other Python header files included by <Python.h>.
#ifndef Py_PYTHON_H
#define Py_PYTHON_H
// Since this is a "meta-include" file, "#ifdef __cplusplus / extern "C" {"
// is not needed.
// Include Python header files
#include "patchlevel.h"
#include "pyconfig.h"
#include "pymacconfig.h"
// Include standard header files
#include <assert.h> // assert()
#include <inttypes.h> // uintptr_t
#include <limits.h> // INT_MAX
#include <math.h> // HUGE_VAL
#include <stdarg.h> // va_list
#include <wchar.h> // wchar_t
#ifdef HAVE_SYS_TYPES_H
# include <sys/types.h> // ssize_t
#endif
// <errno.h>, <stdio.h>, <stdlib.h> and <string.h> headers are no longer used
// by Python, but kept for the backward compatibility of existing third party C
// extensions. They are not included by limited C API version 3.11 and newer.
//
// The <ctype.h> and <unistd.h> headers are not included by limited C API
// version 3.13 and newer.
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 < 0x030b0000
# include <errno.h> // errno
# include <stdio.h> // FILE*
# include <stdlib.h> // getenv()
# include <string.h> // memcpy()
#endif
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 < 0x030d0000
# include <ctype.h> // tolower()
# ifndef MS_WINDOWS
# include <unistd.h> // close()
# endif
#endif
// gh-111506: The free-threaded build is not compatible with the limited API
// or the stable ABI.
#if defined(Py_LIMITED_API) && defined(Py_GIL_DISABLED)
# error "The limited API is not currently supported in the free-threaded build"
#endif
#if defined(Py_GIL_DISABLED) && defined(_MSC_VER)
# include <intrin.h> // __readgsqword()
#endif
#if defined(Py_GIL_DISABLED) && defined(__MINGW32__)
# include <intrin.h> // __readgsqword()
#endif
// Include Python header files
#include "pyport.h"
#include "pymacro.h"
#include "pymath.h"
#include "pymem.h"
#include "pytypedefs.h"
#include "pybuffer.h"
#include "pystats.h"
#include "pyatomic.h"
#include "lock.h"
#include "object.h"
#include "objimpl.h"
#include "typeslots.h"
#include "pyhash.h"
#include "cpython/pydebug.h"
#include "bytearrayobject.h"
#include "bytesobject.h"
#include "unicodeobject.h"
#include "pyerrors.h"
#include "longobject.h"
#include "cpython/longintrepr.h"
#include "boolobject.h"
#include "floatobject.h"
#include "complexobject.h"
#include "rangeobject.h"
#include "memoryobject.h"
#include "tupleobject.h"
#include "listobject.h"
#include "dictobject.h"
#include "cpython/odictobject.h"
#include "enumobject.h"
#include "setobject.h"
#include "methodobject.h"
#include "moduleobject.h"
#include "monitoring.h"
#include "cpython/funcobject.h"
#include "cpython/classobject.h"
#include "fileobject.h"
#include "pycapsule.h"
#include "cpython/code.h"
#include "pyframe.h"
#include "traceback.h"
#include "sliceobject.h"
#include "cpython/cellobject.h"
#include "iterobject.h"
#include "cpython/initconfig.h"
#include "pystate.h"
#include "cpython/genobject.h"
#include "descrobject.h"
#include "genericaliasobject.h"
#include "warnings.h"
#include "weakrefobject.h"
#include "structseq.h"
#include "cpython/picklebufobject.h"
#include "cpython/pytime.h"
#include "codecs.h"
#include "pythread.h"
#include "cpython/context.h"
#include "modsupport.h"
#include "compile.h"
#include "pythonrun.h"
#include "pylifecycle.h"
#include "ceval.h"
#include "sysmodule.h"
#include "osmodule.h"
#include "intrcheck.h"
#include "import.h"
#include "abstract.h"
#include "bltinmodule.h"
#include "critical_section.h"
#include "cpython/pyctype.h"
#include "pystrtod.h"
#include "pystrcmp.h"
#include "fileutils.h"
#include "cpython/pyfpe.h"
#include "cpython/tracemalloc.h"
#endif /* !Py_PYTHON_H */

911
Dependencies/Python/include/abstract.h vendored Normal file
View File

@@ -0,0 +1,911 @@
/* Abstract Object Interface (many thanks to Jim Fulton) */
#ifndef Py_ABSTRACTOBJECT_H
#define Py_ABSTRACTOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
/* === Object Protocol ================================================== */
/* Implemented elsewhere:
int PyObject_Print(PyObject *o, FILE *fp, int flags);
Print an object 'o' on file 'fp'. Returns -1 on error. The flags argument
is used to enable certain printing options. The only option currently
supported is Py_PRINT_RAW. By default (flags=0), PyObject_Print() formats
the object by calling PyObject_Repr(). If flags equals to Py_PRINT_RAW, it
formats the object by calling PyObject_Str(). */
/* Implemented elsewhere:
int PyObject_HasAttrString(PyObject *o, const char *attr_name);
Returns 1 if object 'o' has the attribute attr_name, and 0 otherwise.
This is equivalent to the Python expression: hasattr(o,attr_name).
This function always succeeds. */
/* Implemented elsewhere:
PyObject* PyObject_GetAttrString(PyObject *o, const char *attr_name);
Retrieve an attributed named attr_name form object o.
Returns the attribute value on success, or NULL on failure.
This is the equivalent of the Python expression: o.attr_name. */
/* Implemented elsewhere:
int PyObject_HasAttr(PyObject *o, PyObject *attr_name);
Returns 1 if o has the attribute attr_name, and 0 otherwise.
This is equivalent to the Python expression: hasattr(o,attr_name).
This function always succeeds. */
/* Implemented elsewhere:
int PyObject_HasAttrStringWithError(PyObject *o, const char *attr_name);
Returns 1 if object 'o' has the attribute attr_name, and 0 otherwise.
This is equivalent to the Python expression: hasattr(o,attr_name).
Returns -1 on failure. */
/* Implemented elsewhere:
int PyObject_HasAttrWithError(PyObject *o, PyObject *attr_name);
Returns 1 if o has the attribute attr_name, and 0 otherwise.
This is equivalent to the Python expression: hasattr(o,attr_name).
Returns -1 on failure. */
/* Implemented elsewhere:
PyObject* PyObject_GetAttr(PyObject *o, PyObject *attr_name);
Retrieve an attributed named 'attr_name' form object 'o'.
Returns the attribute value on success, or NULL on failure.
This is the equivalent of the Python expression: o.attr_name. */
/* Implemented elsewhere:
int PyObject_GetOptionalAttr(PyObject *obj, PyObject *attr_name, PyObject **result);
Variant of PyObject_GetAttr() which doesn't raise AttributeError
if the attribute is not found.
If the attribute is found, return 1 and set *result to a new strong
reference to the attribute.
If the attribute is not found, return 0 and set *result to NULL;
the AttributeError is silenced.
If an error other than AttributeError is raised, return -1 and
set *result to NULL.
*/
/* Implemented elsewhere:
int PyObject_GetOptionalAttrString(PyObject *obj, const char *attr_name, PyObject **result);
Variant of PyObject_GetAttrString() which doesn't raise AttributeError
if the attribute is not found.
If the attribute is found, return 1 and set *result to a new strong
reference to the attribute.
If the attribute is not found, return 0 and set *result to NULL;
the AttributeError is silenced.
If an error other than AttributeError is raised, return -1 and
set *result to NULL.
*/
/* Implemented elsewhere:
int PyObject_SetAttrString(PyObject *o, const char *attr_name, PyObject *v);
Set the value of the attribute named attr_name, for object 'o',
to the value 'v'. Raise an exception and return -1 on failure; return 0 on
success.
This is the equivalent of the Python statement o.attr_name=v. */
/* Implemented elsewhere:
int PyObject_SetAttr(PyObject *o, PyObject *attr_name, PyObject *v);
Set the value of the attribute named attr_name, for object 'o', to the value
'v'. an exception and return -1 on failure; return 0 on success.
This is the equivalent of the Python statement o.attr_name=v. */
/* Implemented elsewhere:
int PyObject_DelAttrString(PyObject *o, const char *attr_name);
Delete attribute named attr_name, for object o. Returns
-1 on failure.
This is the equivalent of the Python statement: del o.attr_name. */
/* Implemented elsewhere:
int PyObject_DelAttr(PyObject *o, PyObject *attr_name);
Delete attribute named attr_name, for object o. Returns -1
on failure. This is the equivalent of the Python
statement: del o.attr_name. */
/* Implemented elsewhere:
PyObject *PyObject_Repr(PyObject *o);
Compute the string representation of object 'o'. Returns the
string representation on success, NULL on failure.
This is the equivalent of the Python expression: repr(o).
Called by the repr() built-in function. */
/* Implemented elsewhere:
PyObject *PyObject_Str(PyObject *o);
Compute the string representation of object, o. Returns the
string representation on success, NULL on failure.
This is the equivalent of the Python expression: str(o).
Called by the str() and print() built-in functions. */
/* Declared elsewhere
PyAPI_FUNC(int) PyCallable_Check(PyObject *o);
Determine if the object, o, is callable. Return 1 if the object is callable
and 0 otherwise.
This function always succeeds. */
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03090000
/* Call a callable Python object without any arguments */
PyAPI_FUNC(PyObject *) PyObject_CallNoArgs(PyObject *func);
#endif
/* Call a callable Python object 'callable' with arguments given by the
tuple 'args' and keywords arguments given by the dictionary 'kwargs'.
'args' must not be NULL, use an empty tuple if no arguments are
needed. If no named arguments are needed, 'kwargs' can be NULL.
This is the equivalent of the Python expression:
callable(*args, **kwargs). */
PyAPI_FUNC(PyObject *) PyObject_Call(PyObject *callable,
PyObject *args, PyObject *kwargs);
/* Call a callable Python object 'callable', with arguments given by the
tuple 'args'. If no arguments are needed, then 'args' can be NULL.
Returns the result of the call on success, or NULL on failure.
This is the equivalent of the Python expression:
callable(*args). */
PyAPI_FUNC(PyObject *) PyObject_CallObject(PyObject *callable,
PyObject *args);
/* Call a callable Python object, callable, with a variable number of C
arguments. The C arguments are described using a mkvalue-style format
string.
The format may be NULL, indicating that no arguments are provided.
Returns the result of the call on success, or NULL on failure.
This is the equivalent of the Python expression:
callable(arg1, arg2, ...). */
PyAPI_FUNC(PyObject *) PyObject_CallFunction(PyObject *callable,
const char *format, ...);
/* Call the method named 'name' of object 'obj' with a variable number of
C arguments. The C arguments are described by a mkvalue format string.
The format can be NULL, indicating that no arguments are provided.
Returns the result of the call on success, or NULL on failure.
This is the equivalent of the Python expression:
obj.name(arg1, arg2, ...). */
PyAPI_FUNC(PyObject *) PyObject_CallMethod(PyObject *obj,
const char *name,
const char *format, ...);
/* Call a callable Python object 'callable' with a variable number of C
arguments. The C arguments are provided as PyObject* values, terminated
by a NULL.
Returns the result of the call on success, or NULL on failure.
This is the equivalent of the Python expression:
callable(arg1, arg2, ...). */
PyAPI_FUNC(PyObject *) PyObject_CallFunctionObjArgs(PyObject *callable,
...);
/* Call the method named 'name' of object 'obj' with a variable number of
C arguments. The C arguments are provided as PyObject* values, terminated
by NULL.
Returns the result of the call on success, or NULL on failure.
This is the equivalent of the Python expression: obj.name(*args). */
PyAPI_FUNC(PyObject *) PyObject_CallMethodObjArgs(
PyObject *obj,
PyObject *name,
...);
/* Given a vectorcall nargsf argument, return the actual number of arguments.
* (For use outside the limited API, this is re-defined as a static inline
* function in cpython/abstract.h)
*/
PyAPI_FUNC(Py_ssize_t) PyVectorcall_NARGS(size_t nargsf);
/* Call "callable" (which must support vectorcall) with positional arguments
"tuple" and keyword arguments "dict". "dict" may also be NULL */
PyAPI_FUNC(PyObject *) PyVectorcall_Call(PyObject *callable, PyObject *tuple, PyObject *dict);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030C0000
#define PY_VECTORCALL_ARGUMENTS_OFFSET \
(_Py_STATIC_CAST(size_t, 1) << (8 * sizeof(size_t) - 1))
/* Perform a PEP 590-style vector call on 'callable' */
PyAPI_FUNC(PyObject *) PyObject_Vectorcall(
PyObject *callable,
PyObject *const *args,
size_t nargsf,
PyObject *kwnames);
/* Call the method 'name' on args[0] with arguments in args[1..nargsf-1]. */
PyAPI_FUNC(PyObject *) PyObject_VectorcallMethod(
PyObject *name, PyObject *const *args,
size_t nargsf, PyObject *kwnames);
#endif
/* Implemented elsewhere:
Py_hash_t PyObject_Hash(PyObject *o);
Compute and return the hash, hash_value, of an object, o. On
failure, return -1.
This is the equivalent of the Python expression: hash(o). */
/* Implemented elsewhere:
int PyObject_IsTrue(PyObject *o);
Returns 1 if the object, o, is considered to be true, 0 if o is
considered to be false and -1 on failure.
This is equivalent to the Python expression: not not o. */
/* Implemented elsewhere:
int PyObject_Not(PyObject *o);
Returns 0 if the object, o, is considered to be true, 1 if o is
considered to be false and -1 on failure.
This is equivalent to the Python expression: not o. */
/* Get the type of an object.
On success, returns a type object corresponding to the object type of object
'o'. On failure, returns NULL.
This is equivalent to the Python expression: type(o) */
PyAPI_FUNC(PyObject *) PyObject_Type(PyObject *o);
/* Return the size of object 'o'. If the object 'o' provides both sequence and
mapping protocols, the sequence size is returned.
On error, -1 is returned.
This is the equivalent to the Python expression: len(o) */
PyAPI_FUNC(Py_ssize_t) PyObject_Size(PyObject *o);
/* For DLL compatibility */
#undef PyObject_Length
PyAPI_FUNC(Py_ssize_t) PyObject_Length(PyObject *o);
#define PyObject_Length PyObject_Size
/* Return element of 'o' corresponding to the object 'key'. Return NULL
on failure.
This is the equivalent of the Python expression: o[key] */
PyAPI_FUNC(PyObject *) PyObject_GetItem(PyObject *o, PyObject *key);
/* Map the object 'key' to the value 'v' into 'o'.
Raise an exception and return -1 on failure; return 0 on success.
This is the equivalent of the Python statement: o[key]=v. */
PyAPI_FUNC(int) PyObject_SetItem(PyObject *o, PyObject *key, PyObject *v);
/* Remove the mapping for the string 'key' from the object 'o'.
Returns -1 on failure.
This is equivalent to the Python statement: del o[key]. */
PyAPI_FUNC(int) PyObject_DelItemString(PyObject *o, const char *key);
/* Delete the mapping for the object 'key' from the object 'o'.
Returns -1 on failure.
This is the equivalent of the Python statement: del o[key]. */
PyAPI_FUNC(int) PyObject_DelItem(PyObject *o, PyObject *key);
/* Takes an arbitrary object and returns the result of calling
obj.__format__(format_spec). */
PyAPI_FUNC(PyObject *) PyObject_Format(PyObject *obj,
PyObject *format_spec);
/* ==== Iterators ================================================ */
/* Takes an object and returns an iterator for it.
This is typically a new iterator but if the argument is an iterator, this
returns itself. */
PyAPI_FUNC(PyObject *) PyObject_GetIter(PyObject *);
/* Takes an AsyncIterable object and returns an AsyncIterator for it.
This is typically a new iterator but if the argument is an AsyncIterator,
this returns itself. */
PyAPI_FUNC(PyObject *) PyObject_GetAIter(PyObject *);
/* Returns non-zero if the object 'obj' provides iterator protocols, and 0 otherwise.
This function always succeeds. */
PyAPI_FUNC(int) PyIter_Check(PyObject *);
/* Returns non-zero if the object 'obj' provides AsyncIterator protocols, and 0 otherwise.
This function always succeeds. */
PyAPI_FUNC(int) PyAIter_Check(PyObject *);
/* Takes an iterator object and calls its tp_iternext slot,
returning the next value.
If the iterator is exhausted, this returns NULL without setting an
exception.
NULL with an exception means an error occurred. */
PyAPI_FUNC(PyObject *) PyIter_Next(PyObject *);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030A0000
/* Takes generator, coroutine or iterator object and sends the value into it.
Returns:
- PYGEN_RETURN (0) if generator has returned.
'result' parameter is filled with return value
- PYGEN_ERROR (-1) if exception was raised.
'result' parameter is NULL
- PYGEN_NEXT (1) if generator has yielded.
'result' parameter is filled with yielded value. */
PyAPI_FUNC(PySendResult) PyIter_Send(PyObject *, PyObject *, PyObject **);
#endif
/* === Number Protocol ================================================== */
/* Returns 1 if the object 'o' provides numeric protocols, and 0 otherwise.
This function always succeeds. */
PyAPI_FUNC(int) PyNumber_Check(PyObject *o);
/* Returns the result of adding o1 and o2, or NULL on failure.
This is the equivalent of the Python expression: o1 + o2. */
PyAPI_FUNC(PyObject *) PyNumber_Add(PyObject *o1, PyObject *o2);
/* Returns the result of subtracting o2 from o1, or NULL on failure.
This is the equivalent of the Python expression: o1 - o2. */
PyAPI_FUNC(PyObject *) PyNumber_Subtract(PyObject *o1, PyObject *o2);
/* Returns the result of multiplying o1 and o2, or NULL on failure.
This is the equivalent of the Python expression: o1 * o2. */
PyAPI_FUNC(PyObject *) PyNumber_Multiply(PyObject *o1, PyObject *o2);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000
/* This is the equivalent of the Python expression: o1 @ o2. */
PyAPI_FUNC(PyObject *) PyNumber_MatrixMultiply(PyObject *o1, PyObject *o2);
#endif
/* Returns the result of dividing o1 by o2 giving an integral result,
or NULL on failure.
This is the equivalent of the Python expression: o1 // o2. */
PyAPI_FUNC(PyObject *) PyNumber_FloorDivide(PyObject *o1, PyObject *o2);
/* Returns the result of dividing o1 by o2 giving a float result, or NULL on
failure.
This is the equivalent of the Python expression: o1 / o2. */
PyAPI_FUNC(PyObject *) PyNumber_TrueDivide(PyObject *o1, PyObject *o2);
/* Returns the remainder of dividing o1 by o2, or NULL on failure.
This is the equivalent of the Python expression: o1 % o2. */
PyAPI_FUNC(PyObject *) PyNumber_Remainder(PyObject *o1, PyObject *o2);
/* See the built-in function divmod.
Returns NULL on failure.
This is the equivalent of the Python expression: divmod(o1, o2). */
PyAPI_FUNC(PyObject *) PyNumber_Divmod(PyObject *o1, PyObject *o2);
/* See the built-in function pow. Returns NULL on failure.
This is the equivalent of the Python expression: pow(o1, o2, o3),
where o3 is optional. */
PyAPI_FUNC(PyObject *) PyNumber_Power(PyObject *o1, PyObject *o2,
PyObject *o3);
/* Returns the negation of o on success, or NULL on failure.
This is the equivalent of the Python expression: -o. */
PyAPI_FUNC(PyObject *) PyNumber_Negative(PyObject *o);
/* Returns the positive of o on success, or NULL on failure.
This is the equivalent of the Python expression: +o. */
PyAPI_FUNC(PyObject *) PyNumber_Positive(PyObject *o);
/* Returns the absolute value of 'o', or NULL on failure.
This is the equivalent of the Python expression: abs(o). */
PyAPI_FUNC(PyObject *) PyNumber_Absolute(PyObject *o);
/* Returns the bitwise negation of 'o' on success, or NULL on failure.
This is the equivalent of the Python expression: ~o. */
PyAPI_FUNC(PyObject *) PyNumber_Invert(PyObject *o);
/* Returns the result of left shifting o1 by o2 on success, or NULL on failure.
This is the equivalent of the Python expression: o1 << o2. */
PyAPI_FUNC(PyObject *) PyNumber_Lshift(PyObject *o1, PyObject *o2);
/* Returns the result of right shifting o1 by o2 on success, or NULL on
failure.
This is the equivalent of the Python expression: o1 >> o2. */
PyAPI_FUNC(PyObject *) PyNumber_Rshift(PyObject *o1, PyObject *o2);
/* Returns the result of bitwise and of o1 and o2 on success, or NULL on
failure.
This is the equivalent of the Python expression: o1 & o2. */
PyAPI_FUNC(PyObject *) PyNumber_And(PyObject *o1, PyObject *o2);
/* Returns the bitwise exclusive or of o1 by o2 on success, or NULL on failure.
This is the equivalent of the Python expression: o1 ^ o2. */
PyAPI_FUNC(PyObject *) PyNumber_Xor(PyObject *o1, PyObject *o2);
/* Returns the result of bitwise or on o1 and o2 on success, or NULL on
failure.
This is the equivalent of the Python expression: o1 | o2. */
PyAPI_FUNC(PyObject *) PyNumber_Or(PyObject *o1, PyObject *o2);
/* Returns 1 if obj is an index integer (has the nb_index slot of the
tp_as_number structure filled in), and 0 otherwise. */
PyAPI_FUNC(int) PyIndex_Check(PyObject *);
/* Returns the object 'o' converted to a Python int, or NULL with an exception
raised on failure. */
PyAPI_FUNC(PyObject *) PyNumber_Index(PyObject *o);
/* Returns the object 'o' converted to Py_ssize_t by going through
PyNumber_Index() first.
If an overflow error occurs while converting the int to Py_ssize_t, then the
second argument 'exc' is the error-type to return. If it is NULL, then the
overflow error is cleared and the value is clipped. */
PyAPI_FUNC(Py_ssize_t) PyNumber_AsSsize_t(PyObject *o, PyObject *exc);
/* Returns the object 'o' converted to an integer object on success, or NULL
on failure.
This is the equivalent of the Python expression: int(o). */
PyAPI_FUNC(PyObject *) PyNumber_Long(PyObject *o);
/* Returns the object 'o' converted to a float object on success, or NULL
on failure.
This is the equivalent of the Python expression: float(o). */
PyAPI_FUNC(PyObject *) PyNumber_Float(PyObject *o);
/* --- In-place variants of (some of) the above number protocol functions -- */
/* Returns the result of adding o2 to o1, possibly in-place, or NULL
on failure.
This is the equivalent of the Python expression: o1 += o2. */
PyAPI_FUNC(PyObject *) PyNumber_InPlaceAdd(PyObject *o1, PyObject *o2);
/* Returns the result of subtracting o2 from o1, possibly in-place or
NULL on failure.
This is the equivalent of the Python expression: o1 -= o2. */
PyAPI_FUNC(PyObject *) PyNumber_InPlaceSubtract(PyObject *o1, PyObject *o2);
/* Returns the result of multiplying o1 by o2, possibly in-place, or NULL on
failure.
This is the equivalent of the Python expression: o1 *= o2. */
PyAPI_FUNC(PyObject *) PyNumber_InPlaceMultiply(PyObject *o1, PyObject *o2);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000
/* This is the equivalent of the Python expression: o1 @= o2. */
PyAPI_FUNC(PyObject *) PyNumber_InPlaceMatrixMultiply(PyObject *o1, PyObject *o2);
#endif
/* Returns the result of dividing o1 by o2 giving an integral result, possibly
in-place, or NULL on failure.
This is the equivalent of the Python expression: o1 /= o2. */
PyAPI_FUNC(PyObject *) PyNumber_InPlaceFloorDivide(PyObject *o1,
PyObject *o2);
/* Returns the result of dividing o1 by o2 giving a float result, possibly
in-place, or null on failure.
This is the equivalent of the Python expression: o1 /= o2. */
PyAPI_FUNC(PyObject *) PyNumber_InPlaceTrueDivide(PyObject *o1,
PyObject *o2);
/* Returns the remainder of dividing o1 by o2, possibly in-place, or NULL on
failure.
This is the equivalent of the Python expression: o1 %= o2. */
PyAPI_FUNC(PyObject *) PyNumber_InPlaceRemainder(PyObject *o1, PyObject *o2);
/* Returns the result of raising o1 to the power of o2, possibly in-place,
or NULL on failure.
This is the equivalent of the Python expression: o1 **= o2,
or o1 = pow(o1, o2, o3) if o3 is present. */
PyAPI_FUNC(PyObject *) PyNumber_InPlacePower(PyObject *o1, PyObject *o2,
PyObject *o3);
/* Returns the result of left shifting o1 by o2, possibly in-place, or NULL
on failure.
This is the equivalent of the Python expression: o1 <<= o2. */
PyAPI_FUNC(PyObject *) PyNumber_InPlaceLshift(PyObject *o1, PyObject *o2);
/* Returns the result of right shifting o1 by o2, possibly in-place or NULL
on failure.
This is the equivalent of the Python expression: o1 >>= o2. */
PyAPI_FUNC(PyObject *) PyNumber_InPlaceRshift(PyObject *o1, PyObject *o2);
/* Returns the result of bitwise and of o1 and o2, possibly in-place, or NULL
on failure.
This is the equivalent of the Python expression: o1 &= o2. */
PyAPI_FUNC(PyObject *) PyNumber_InPlaceAnd(PyObject *o1, PyObject *o2);
/* Returns the bitwise exclusive or of o1 by o2, possibly in-place, or NULL
on failure.
This is the equivalent of the Python expression: o1 ^= o2. */
PyAPI_FUNC(PyObject *) PyNumber_InPlaceXor(PyObject *o1, PyObject *o2);
/* Returns the result of bitwise or of o1 and o2, possibly in-place,
or NULL on failure.
This is the equivalent of the Python expression: o1 |= o2. */
PyAPI_FUNC(PyObject *) PyNumber_InPlaceOr(PyObject *o1, PyObject *o2);
/* Returns the integer n converted to a string with a base, with a base
marker of 0b, 0o or 0x prefixed if applicable.
If n is not an int object, it is converted with PyNumber_Index first. */
PyAPI_FUNC(PyObject *) PyNumber_ToBase(PyObject *n, int base);
/* === Sequence protocol ================================================ */
/* Return 1 if the object provides sequence protocol, and zero
otherwise.
This function always succeeds. */
PyAPI_FUNC(int) PySequence_Check(PyObject *o);
/* Return the size of sequence object o, or -1 on failure. */
PyAPI_FUNC(Py_ssize_t) PySequence_Size(PyObject *o);
/* For DLL compatibility */
#undef PySequence_Length
PyAPI_FUNC(Py_ssize_t) PySequence_Length(PyObject *o);
#define PySequence_Length PySequence_Size
/* Return the concatenation of o1 and o2 on success, and NULL on failure.
This is the equivalent of the Python expression: o1 + o2. */
PyAPI_FUNC(PyObject *) PySequence_Concat(PyObject *o1, PyObject *o2);
/* Return the result of repeating sequence object 'o' 'count' times,
or NULL on failure.
This is the equivalent of the Python expression: o * count. */
PyAPI_FUNC(PyObject *) PySequence_Repeat(PyObject *o, Py_ssize_t count);
/* Return the ith element of o, or NULL on failure.
This is the equivalent of the Python expression: o[i]. */
PyAPI_FUNC(PyObject *) PySequence_GetItem(PyObject *o, Py_ssize_t i);
/* Return the slice of sequence object o between i1 and i2, or NULL on failure.
This is the equivalent of the Python expression: o[i1:i2]. */
PyAPI_FUNC(PyObject *) PySequence_GetSlice(PyObject *o, Py_ssize_t i1, Py_ssize_t i2);
/* Assign object 'v' to the ith element of the sequence 'o'. Raise an exception
and return -1 on failure; return 0 on success.
This is the equivalent of the Python statement o[i] = v. */
PyAPI_FUNC(int) PySequence_SetItem(PyObject *o, Py_ssize_t i, PyObject *v);
/* Delete the 'i'-th element of the sequence 'v'. Returns -1 on failure.
This is the equivalent of the Python statement: del o[i]. */
PyAPI_FUNC(int) PySequence_DelItem(PyObject *o, Py_ssize_t i);
/* Assign the sequence object 'v' to the slice in sequence object 'o',
from 'i1' to 'i2'. Returns -1 on failure.
This is the equivalent of the Python statement: o[i1:i2] = v. */
PyAPI_FUNC(int) PySequence_SetSlice(PyObject *o, Py_ssize_t i1, Py_ssize_t i2,
PyObject *v);
/* Delete the slice in sequence object 'o' from 'i1' to 'i2'.
Returns -1 on failure.
This is the equivalent of the Python statement: del o[i1:i2]. */
PyAPI_FUNC(int) PySequence_DelSlice(PyObject *o, Py_ssize_t i1, Py_ssize_t i2);
/* Returns the sequence 'o' as a tuple on success, and NULL on failure.
This is equivalent to the Python expression: tuple(o). */
PyAPI_FUNC(PyObject *) PySequence_Tuple(PyObject *o);
/* Returns the sequence 'o' as a list on success, and NULL on failure.
This is equivalent to the Python expression: list(o) */
PyAPI_FUNC(PyObject *) PySequence_List(PyObject *o);
/* Return the sequence 'o' as a list, unless it's already a tuple or list.
Use PySequence_Fast_GET_ITEM to access the members of this list, and
PySequence_Fast_GET_SIZE to get its length.
Returns NULL on failure. If the object does not support iteration, raises a
TypeError exception with 'm' as the message text. */
PyAPI_FUNC(PyObject *) PySequence_Fast(PyObject *o, const char* m);
/* Return the size of the sequence 'o', assuming that 'o' was returned by
PySequence_Fast and is not NULL. */
#define PySequence_Fast_GET_SIZE(o) \
(PyList_Check(o) ? PyList_GET_SIZE(o) : PyTuple_GET_SIZE(o))
/* Return the 'i'-th element of the sequence 'o', assuming that o was returned
by PySequence_Fast, and that i is within bounds. */
#define PySequence_Fast_GET_ITEM(o, i)\
(PyList_Check(o) ? PyList_GET_ITEM((o), (i)) : PyTuple_GET_ITEM((o), (i)))
/* Return a pointer to the underlying item array for
an object returned by PySequence_Fast */
#define PySequence_Fast_ITEMS(sf) \
(PyList_Check(sf) ? ((PyListObject *)(sf))->ob_item \
: ((PyTupleObject *)(sf))->ob_item)
/* Return the number of occurrences on value on 'o', that is, return
the number of keys for which o[key] == value.
On failure, return -1. This is equivalent to the Python expression:
o.count(value). */
PyAPI_FUNC(Py_ssize_t) PySequence_Count(PyObject *o, PyObject *value);
/* Return 1 if 'ob' is in the sequence 'seq'; 0 if 'ob' is not in the sequence
'seq'; -1 on error.
Use __contains__ if possible, else _PySequence_IterSearch(). */
PyAPI_FUNC(int) PySequence_Contains(PyObject *seq, PyObject *ob);
/* For DLL-level backwards compatibility */
#undef PySequence_In
/* Determine if the sequence 'o' contains 'value'. If an item in 'o' is equal
to 'value', return 1, otherwise return 0. On error, return -1.
This is equivalent to the Python expression: value in o. */
PyAPI_FUNC(int) PySequence_In(PyObject *o, PyObject *value);
/* For source-level backwards compatibility */
#define PySequence_In PySequence_Contains
/* Return the first index for which o[i] == value.
On error, return -1.
This is equivalent to the Python expression: o.index(value). */
PyAPI_FUNC(Py_ssize_t) PySequence_Index(PyObject *o, PyObject *value);
/* --- In-place versions of some of the above Sequence functions --- */
/* Append sequence 'o2' to sequence 'o1', in-place when possible. Return the
resulting object, which could be 'o1', or NULL on failure.
This is the equivalent of the Python expression: o1 += o2. */
PyAPI_FUNC(PyObject *) PySequence_InPlaceConcat(PyObject *o1, PyObject *o2);
/* Repeat sequence 'o' by 'count', in-place when possible. Return the resulting
object, which could be 'o', or NULL on failure.
This is the equivalent of the Python expression: o1 *= count. */
PyAPI_FUNC(PyObject *) PySequence_InPlaceRepeat(PyObject *o, Py_ssize_t count);
/* === Mapping protocol ================================================= */
/* Return 1 if the object provides mapping protocol, and 0 otherwise.
This function always succeeds. */
PyAPI_FUNC(int) PyMapping_Check(PyObject *o);
/* Returns the number of keys in mapping object 'o' on success, and -1 on
failure. This is equivalent to the Python expression: len(o). */
PyAPI_FUNC(Py_ssize_t) PyMapping_Size(PyObject *o);
/* For DLL compatibility */
#undef PyMapping_Length
PyAPI_FUNC(Py_ssize_t) PyMapping_Length(PyObject *o);
#define PyMapping_Length PyMapping_Size
/* Implemented as a macro:
int PyMapping_DelItemString(PyObject *o, const char *key);
Remove the mapping for the string 'key' from the mapping 'o'. Returns -1 on
failure.
This is equivalent to the Python statement: del o[key]. */
#define PyMapping_DelItemString(O, K) PyObject_DelItemString((O), (K))
/* Implemented as a macro:
int PyMapping_DelItem(PyObject *o, PyObject *key);
Remove the mapping for the object 'key' from the mapping object 'o'.
Returns -1 on failure.
This is equivalent to the Python statement: del o[key]. */
#define PyMapping_DelItem(O, K) PyObject_DelItem((O), (K))
/* On success, return 1 if the mapping object 'o' has the key 'key',
and 0 otherwise.
This is equivalent to the Python expression: key in o.
This function always succeeds. */
PyAPI_FUNC(int) PyMapping_HasKeyString(PyObject *o, const char *key);
/* Return 1 if the mapping object has the key 'key', and 0 otherwise.
This is equivalent to the Python expression: key in o.
This function always succeeds. */
PyAPI_FUNC(int) PyMapping_HasKey(PyObject *o, PyObject *key);
/* Return 1 if the mapping object has the key 'key', and 0 otherwise.
This is equivalent to the Python expression: key in o.
On failure, return -1. */
PyAPI_FUNC(int) PyMapping_HasKeyWithError(PyObject *o, PyObject *key);
/* Return 1 if the mapping object has the key 'key', and 0 otherwise.
This is equivalent to the Python expression: key in o.
On failure, return -1. */
PyAPI_FUNC(int) PyMapping_HasKeyStringWithError(PyObject *o, const char *key);
/* On success, return a list or tuple of the keys in mapping object 'o'.
On failure, return NULL. */
PyAPI_FUNC(PyObject *) PyMapping_Keys(PyObject *o);
/* On success, return a list or tuple of the values in mapping object 'o'.
On failure, return NULL. */
PyAPI_FUNC(PyObject *) PyMapping_Values(PyObject *o);
/* On success, return a list or tuple of the items in mapping object 'o',
where each item is a tuple containing a key-value pair. On failure, return
NULL. */
PyAPI_FUNC(PyObject *) PyMapping_Items(PyObject *o);
/* Return element of 'o' corresponding to the string 'key' or NULL on failure.
This is the equivalent of the Python expression: o[key]. */
PyAPI_FUNC(PyObject *) PyMapping_GetItemString(PyObject *o,
const char *key);
/* Variants of PyObject_GetItem() and PyMapping_GetItemString() which don't
raise KeyError if the key is not found.
If the key is found, return 1 and set *result to a new strong
reference to the corresponding value.
If the key is not found, return 0 and set *result to NULL;
the KeyError is silenced.
If an error other than KeyError is raised, return -1 and
set *result to NULL.
*/
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030d0000
PyAPI_FUNC(int) PyMapping_GetOptionalItem(PyObject *, PyObject *, PyObject **);
PyAPI_FUNC(int) PyMapping_GetOptionalItemString(PyObject *, const char *, PyObject **);
#endif
/* Map the string 'key' to the value 'v' in the mapping 'o'.
Returns -1 on failure.
This is the equivalent of the Python statement: o[key]=v. */
PyAPI_FUNC(int) PyMapping_SetItemString(PyObject *o, const char *key,
PyObject *value);
/* isinstance(object, typeorclass) */
PyAPI_FUNC(int) PyObject_IsInstance(PyObject *object, PyObject *typeorclass);
/* issubclass(object, typeorclass) */
PyAPI_FUNC(int) PyObject_IsSubclass(PyObject *object, PyObject *typeorclass);
#ifndef Py_LIMITED_API
# define Py_CPYTHON_ABSTRACTOBJECT_H
# include "cpython/abstract.h"
# undef Py_CPYTHON_ABSTRACTOBJECT_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* Py_ABSTRACTOBJECT_H */

View File

@@ -0,0 +1,14 @@
#ifndef Py_BLTINMODULE_H
#define Py_BLTINMODULE_H
#ifdef __cplusplus
extern "C" {
#endif
PyAPI_DATA(PyTypeObject) PyFilter_Type;
PyAPI_DATA(PyTypeObject) PyMap_Type;
PyAPI_DATA(PyTypeObject) PyZip_Type;
#ifdef __cplusplus
}
#endif
#endif /* !Py_BLTINMODULE_H */

View File

@@ -0,0 +1,47 @@
/* Boolean object interface */
#ifndef Py_BOOLOBJECT_H
#define Py_BOOLOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
// PyBool_Type is declared by object.h
#define PyBool_Check(x) Py_IS_TYPE((x), &PyBool_Type)
/* Py_False and Py_True are the only two bools in existence. */
/* Don't use these directly */
PyAPI_DATA(PyLongObject) _Py_FalseStruct;
PyAPI_DATA(PyLongObject) _Py_TrueStruct;
/* Use these macros */
#if defined(Py_LIMITED_API) && Py_LIMITED_API+0 >= 0x030D0000
# define Py_False Py_GetConstantBorrowed(Py_CONSTANT_FALSE)
# define Py_True Py_GetConstantBorrowed(Py_CONSTANT_TRUE)
#else
# define Py_False _PyObject_CAST(&_Py_FalseStruct)
# define Py_True _PyObject_CAST(&_Py_TrueStruct)
#endif
// Test if an object is the True singleton, the same as "x is True" in Python.
PyAPI_FUNC(int) Py_IsTrue(PyObject *x);
#define Py_IsTrue(x) Py_Is((x), Py_True)
// Test if an object is the False singleton, the same as "x is False" in Python.
PyAPI_FUNC(int) Py_IsFalse(PyObject *x);
#define Py_IsFalse(x) Py_Is((x), Py_False)
/* Macros for returning Py_True or Py_False, respectively */
#define Py_RETURN_TRUE return Py_True
#define Py_RETURN_FALSE return Py_False
/* Function to return a bool from a C long */
PyAPI_FUNC(PyObject *) PyBool_FromLong(long);
#ifdef __cplusplus
}
#endif
#endif /* !Py_BOOLOBJECT_H */

View File

@@ -0,0 +1,44 @@
/* ByteArray object interface */
#ifndef Py_BYTEARRAYOBJECT_H
#define Py_BYTEARRAYOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
/* Type PyByteArrayObject represents a mutable array of bytes.
* The Python API is that of a sequence;
* the bytes are mapped to ints in [0, 256).
* Bytes are not characters; they may be used to encode characters.
* The only way to go between bytes and str/unicode is via encoding
* and decoding.
* For the convenience of C programmers, the bytes type is considered
* to contain a char pointer, not an unsigned char pointer.
*/
/* Type object */
PyAPI_DATA(PyTypeObject) PyByteArray_Type;
PyAPI_DATA(PyTypeObject) PyByteArrayIter_Type;
/* Type check macros */
#define PyByteArray_Check(self) PyObject_TypeCheck((self), &PyByteArray_Type)
#define PyByteArray_CheckExact(self) Py_IS_TYPE((self), &PyByteArray_Type)
/* Direct API functions */
PyAPI_FUNC(PyObject *) PyByteArray_FromObject(PyObject *);
PyAPI_FUNC(PyObject *) PyByteArray_Concat(PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyByteArray_FromStringAndSize(const char *, Py_ssize_t);
PyAPI_FUNC(Py_ssize_t) PyByteArray_Size(PyObject *);
PyAPI_FUNC(char *) PyByteArray_AsString(PyObject *);
PyAPI_FUNC(int) PyByteArray_Resize(PyObject *, Py_ssize_t);
#ifndef Py_LIMITED_API
# define Py_CPYTHON_BYTEARRAYOBJECT_H
# include "cpython/bytearrayobject.h"
# undef Py_CPYTHON_BYTEARRAYOBJECT_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_BYTEARRAYOBJECT_H */

View File

@@ -0,0 +1,66 @@
// Bytes object interface
#ifndef Py_BYTESOBJECT_H
#define Py_BYTESOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
/*
Type PyBytesObject represents a byte string. An extra zero byte is
reserved at the end to ensure it is zero-terminated, but a size is
present so strings with null bytes in them can be represented. This
is an immutable object type.
There are functions to create new bytes objects, to test
an object for bytes-ness, and to get the
byte string value. The latter function returns a null pointer
if the object is not of the proper type.
There is a variant that takes an explicit size as well as a
variant that assumes a zero-terminated string. Note that none of the
functions should be applied to NULL pointer.
*/
PyAPI_DATA(PyTypeObject) PyBytes_Type;
PyAPI_DATA(PyTypeObject) PyBytesIter_Type;
#define PyBytes_Check(op) \
PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_BYTES_SUBCLASS)
#define PyBytes_CheckExact(op) Py_IS_TYPE((op), &PyBytes_Type)
PyAPI_FUNC(PyObject *) PyBytes_FromStringAndSize(const char *, Py_ssize_t);
PyAPI_FUNC(PyObject *) PyBytes_FromString(const char *);
PyAPI_FUNC(PyObject *) PyBytes_FromObject(PyObject *);
PyAPI_FUNC(PyObject *) PyBytes_FromFormatV(const char*, va_list)
Py_GCC_ATTRIBUTE((format(printf, 1, 0)));
PyAPI_FUNC(PyObject *) PyBytes_FromFormat(const char*, ...)
Py_GCC_ATTRIBUTE((format(printf, 1, 2)));
PyAPI_FUNC(Py_ssize_t) PyBytes_Size(PyObject *);
PyAPI_FUNC(char *) PyBytes_AsString(PyObject *);
PyAPI_FUNC(PyObject *) PyBytes_Repr(PyObject *, int);
PyAPI_FUNC(void) PyBytes_Concat(PyObject **, PyObject *);
PyAPI_FUNC(void) PyBytes_ConcatAndDel(PyObject **, PyObject *);
PyAPI_FUNC(PyObject *) PyBytes_DecodeEscape(const char *, Py_ssize_t,
const char *, Py_ssize_t,
const char *);
/* Provides access to the internal data buffer and size of a bytes object.
Passing NULL as len parameter will force the string buffer to be
0-terminated (passing a string with embedded NUL characters will
cause an exception). */
PyAPI_FUNC(int) PyBytes_AsStringAndSize(
PyObject *obj, /* bytes object */
char **s, /* pointer to buffer variable */
Py_ssize_t *len /* pointer to length variable or NULL */
);
#ifndef Py_LIMITED_API
# define Py_CPYTHON_BYTESOBJECT_H
# include "cpython/bytesobject.h"
# undef Py_CPYTHON_BYTESOBJECT_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_BYTESOBJECT_H */

145
Dependencies/Python/include/ceval.h vendored Normal file
View File

@@ -0,0 +1,145 @@
/* Interface to random parts in ceval.c */
#ifndef Py_CEVAL_H
#define Py_CEVAL_H
#ifdef __cplusplus
extern "C" {
#endif
PyAPI_FUNC(PyObject *) PyEval_EvalCode(PyObject *, PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyEval_EvalCodeEx(PyObject *co,
PyObject *globals,
PyObject *locals,
PyObject *const *args, int argc,
PyObject *const *kwds, int kwdc,
PyObject *const *defs, int defc,
PyObject *kwdefs, PyObject *closure);
PyAPI_FUNC(PyObject *) PyEval_GetBuiltins(void);
PyAPI_FUNC(PyObject *) PyEval_GetGlobals(void);
PyAPI_FUNC(PyObject *) PyEval_GetLocals(void);
PyAPI_FUNC(PyFrameObject *) PyEval_GetFrame(void);
PyAPI_FUNC(PyObject *) PyEval_GetFrameBuiltins(void);
PyAPI_FUNC(PyObject *) PyEval_GetFrameGlobals(void);
PyAPI_FUNC(PyObject *) PyEval_GetFrameLocals(void);
PyAPI_FUNC(int) Py_AddPendingCall(int (*func)(void *), void *arg);
PyAPI_FUNC(int) Py_MakePendingCalls(void);
/* Protection against deeply nested recursive calls
In Python 3.0, this protection has two levels:
* normal anti-recursion protection is triggered when the recursion level
exceeds the current recursion limit. It raises a RecursionError, and sets
the "overflowed" flag in the thread state structure. This flag
temporarily *disables* the normal protection; this allows cleanup code
to potentially outgrow the recursion limit while processing the
RecursionError.
* "last chance" anti-recursion protection is triggered when the recursion
level exceeds "current recursion limit + 50". By construction, this
protection can only be triggered when the "overflowed" flag is set. It
means the cleanup code has itself gone into an infinite loop, or the
RecursionError has been mistakingly ignored. When this protection is
triggered, the interpreter aborts with a Fatal Error.
In addition, the "overflowed" flag is automatically reset when the
recursion level drops below "current recursion limit - 50". This heuristic
is meant to ensure that the normal anti-recursion protection doesn't get
disabled too long.
Please note: this scheme has its own limitations. See:
http://mail.python.org/pipermail/python-dev/2008-August/082106.html
for some observations.
*/
PyAPI_FUNC(void) Py_SetRecursionLimit(int);
PyAPI_FUNC(int) Py_GetRecursionLimit(void);
PyAPI_FUNC(int) Py_EnterRecursiveCall(const char *where);
PyAPI_FUNC(void) Py_LeaveRecursiveCall(void);
PyAPI_FUNC(const char *) PyEval_GetFuncName(PyObject *);
PyAPI_FUNC(const char *) PyEval_GetFuncDesc(PyObject *);
PyAPI_FUNC(PyObject *) PyEval_EvalFrame(PyFrameObject *);
PyAPI_FUNC(PyObject *) PyEval_EvalFrameEx(PyFrameObject *f, int exc);
/* Interface for threads.
A module that plans to do a blocking system call (or something else
that lasts a long time and doesn't touch Python data) can allow other
threads to run as follows:
...preparations here...
Py_BEGIN_ALLOW_THREADS
...blocking system call here...
Py_END_ALLOW_THREADS
...interpret result here...
The Py_BEGIN_ALLOW_THREADS/Py_END_ALLOW_THREADS pair expands to a
{}-surrounded block.
To leave the block in the middle (e.g., with return), you must insert
a line containing Py_BLOCK_THREADS before the return, e.g.
if (...premature_exit...) {
Py_BLOCK_THREADS
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
An alternative is:
Py_BLOCK_THREADS
if (...premature_exit...) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
Py_UNBLOCK_THREADS
For convenience, that the value of 'errno' is restored across
Py_END_ALLOW_THREADS and Py_BLOCK_THREADS.
WARNING: NEVER NEST CALLS TO Py_BEGIN_ALLOW_THREADS AND
Py_END_ALLOW_THREADS!!!
Note that not yet all candidates have been converted to use this
mechanism!
*/
PyAPI_FUNC(PyThreadState *) PyEval_SaveThread(void);
PyAPI_FUNC(void) PyEval_RestoreThread(PyThreadState *);
Py_DEPRECATED(3.9) PyAPI_FUNC(void) PyEval_InitThreads(void);
PyAPI_FUNC(void) PyEval_AcquireThread(PyThreadState *tstate);
PyAPI_FUNC(void) PyEval_ReleaseThread(PyThreadState *tstate);
#define Py_BEGIN_ALLOW_THREADS { \
PyThreadState *_save; \
_save = PyEval_SaveThread();
#define Py_BLOCK_THREADS PyEval_RestoreThread(_save);
#define Py_UNBLOCK_THREADS _save = PyEval_SaveThread();
#define Py_END_ALLOW_THREADS PyEval_RestoreThread(_save); \
}
/* Masks and values used by FORMAT_VALUE opcode. */
#define FVC_MASK 0x3
#define FVC_NONE 0x0
#define FVC_STR 0x1
#define FVC_REPR 0x2
#define FVC_ASCII 0x3
#define FVS_MASK 0x4
#define FVS_HAVE_SPEC 0x4
#ifndef Py_LIMITED_API
# define Py_CPYTHON_CEVAL_H
# include "cpython/ceval.h"
# undef Py_CPYTHON_CEVAL_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_CEVAL_H */

176
Dependencies/Python/include/codecs.h vendored Normal file
View File

@@ -0,0 +1,176 @@
#ifndef Py_CODECREGISTRY_H
#define Py_CODECREGISTRY_H
#ifdef __cplusplus
extern "C" {
#endif
/* ------------------------------------------------------------------------
Python Codec Registry and support functions
Written by Marc-Andre Lemburg (mal@lemburg.com).
Copyright (c) Corporation for National Research Initiatives.
------------------------------------------------------------------------ */
/* Register a new codec search function.
As side effect, this tries to load the encodings package, if not
yet done, to make sure that it is always first in the list of
search functions.
The search_function's refcount is incremented by this function. */
PyAPI_FUNC(int) PyCodec_Register(
PyObject *search_function
);
/* Unregister a codec search function and clear the registry's cache.
If the search function is not registered, do nothing.
Return 0 on success. Raise an exception and return -1 on error. */
PyAPI_FUNC(int) PyCodec_Unregister(
PyObject *search_function
);
/* Codec registry encoding check API.
Returns 1/0 depending on whether there is a registered codec for
the given encoding.
*/
PyAPI_FUNC(int) PyCodec_KnownEncoding(
const char *encoding
);
/* Generic codec based encoding API.
object is passed through the encoder function found for the given
encoding using the error handling method defined by errors. errors
may be NULL to use the default method defined for the codec.
Raises a LookupError in case no encoder can be found.
*/
PyAPI_FUNC(PyObject *) PyCodec_Encode(
PyObject *object,
const char *encoding,
const char *errors
);
/* Generic codec based decoding API.
object is passed through the decoder function found for the given
encoding using the error handling method defined by errors. errors
may be NULL to use the default method defined for the codec.
Raises a LookupError in case no encoder can be found.
*/
PyAPI_FUNC(PyObject *) PyCodec_Decode(
PyObject *object,
const char *encoding,
const char *errors
);
// --- Codec Lookup APIs --------------------------------------------------
/* Codec registry lookup API.
Looks up the given encoding and returns a CodecInfo object with
function attributes which implement the different aspects of
processing the encoding.
The encoding string is looked up converted to all lower-case
characters. This makes encodings looked up through this mechanism
effectively case-insensitive.
If no codec is found, a KeyError is set and NULL returned.
As side effect, this tries to load the encodings package, if not
yet done. This is part of the lazy load strategy for the encodings
package.
*/
/* Get an encoder function for the given encoding. */
PyAPI_FUNC(PyObject *) PyCodec_Encoder(const char *encoding);
/* Get a decoder function for the given encoding. */
PyAPI_FUNC(PyObject *) PyCodec_Decoder(const char *encoding);
/* Get an IncrementalEncoder object for the given encoding. */
PyAPI_FUNC(PyObject *) PyCodec_IncrementalEncoder(
const char *encoding,
const char *errors);
/* Get an IncrementalDecoder object function for the given encoding. */
PyAPI_FUNC(PyObject *) PyCodec_IncrementalDecoder(
const char *encoding,
const char *errors);
/* Get a StreamReader factory function for the given encoding. */
PyAPI_FUNC(PyObject *) PyCodec_StreamReader(
const char *encoding,
PyObject *stream,
const char *errors);
/* Get a StreamWriter factory function for the given encoding. */
PyAPI_FUNC(PyObject *) PyCodec_StreamWriter(
const char *encoding,
PyObject *stream,
const char *errors);
/* Unicode encoding error handling callback registry API */
/* Register the error handling callback function error under the given
name. This function will be called by the codec when it encounters
unencodable characters/undecodable bytes and doesn't know the
callback name, when name is specified as the error parameter
in the call to the encode/decode function.
Return 0 on success, -1 on error */
PyAPI_FUNC(int) PyCodec_RegisterError(const char *name, PyObject *error);
/* Lookup the error handling callback function registered under the given
name. As a special case NULL can be passed, in which case
the error handling callback for "strict" will be returned. */
PyAPI_FUNC(PyObject *) PyCodec_LookupError(const char *name);
/* raise exc as an exception */
PyAPI_FUNC(PyObject *) PyCodec_StrictErrors(PyObject *exc);
/* ignore the unicode error, skipping the faulty input */
PyAPI_FUNC(PyObject *) PyCodec_IgnoreErrors(PyObject *exc);
/* replace the unicode encode error with ? or U+FFFD */
PyAPI_FUNC(PyObject *) PyCodec_ReplaceErrors(PyObject *exc);
/* replace the unicode encode error with XML character references */
PyAPI_FUNC(PyObject *) PyCodec_XMLCharRefReplaceErrors(PyObject *exc);
/* replace the unicode encode error with backslash escapes (\x, \u and \U) */
PyAPI_FUNC(PyObject *) PyCodec_BackslashReplaceErrors(PyObject *exc);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000
/* replace the unicode encode error with backslash escapes (\N, \x, \u and \U) */
PyAPI_FUNC(PyObject *) PyCodec_NameReplaceErrors(PyObject *exc);
#endif
#ifndef Py_LIMITED_API
PyAPI_DATA(const char *) Py_hexdigits;
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_CODECREGISTRY_H */

22
Dependencies/Python/include/compile.h vendored Normal file
View File

@@ -0,0 +1,22 @@
#ifndef Py_COMPILE_H
#define Py_COMPILE_H
#ifdef __cplusplus
extern "C" {
#endif
/* These definitions must match corresponding definitions in graminit.h. */
#define Py_single_input 256
#define Py_file_input 257
#define Py_eval_input 258
#define Py_func_type_input 345
#ifndef Py_LIMITED_API
# define Py_CPYTHON_COMPILE_H
# include "cpython/compile.h"
# undef Py_CPYTHON_COMPILE_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_COMPILE_H */

View File

@@ -0,0 +1,30 @@
/* Complex number structure */
#ifndef Py_COMPLEXOBJECT_H
#define Py_COMPLEXOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
/* Complex object interface */
PyAPI_DATA(PyTypeObject) PyComplex_Type;
#define PyComplex_Check(op) PyObject_TypeCheck((op), &PyComplex_Type)
#define PyComplex_CheckExact(op) Py_IS_TYPE((op), &PyComplex_Type)
PyAPI_FUNC(PyObject *) PyComplex_FromDoubles(double real, double imag);
PyAPI_FUNC(double) PyComplex_RealAsDouble(PyObject *op);
PyAPI_FUNC(double) PyComplex_ImagAsDouble(PyObject *op);
#ifndef Py_LIMITED_API
# define Py_CPYTHON_COMPLEXOBJECT_H
# include "cpython/complexobject.h"
# undef Py_CPYTHON_COMPLEXOBJECT_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_COMPLEXOBJECT_H */

View File

@@ -0,0 +1,87 @@
#ifndef Py_CPYTHON_ABSTRACTOBJECT_H
# error "this header file must not be included directly"
#endif
/* === Object Protocol ================================================== */
/* Like PyObject_CallMethod(), but expect a _Py_Identifier*
as the method name. */
PyAPI_FUNC(PyObject*) _PyObject_CallMethodId(
PyObject *obj,
_Py_Identifier *name,
const char *format, ...);
/* Convert keyword arguments from the FASTCALL (stack: C array, kwnames: tuple)
format to a Python dictionary ("kwargs" dict).
The type of kwnames keys is not checked. The final function getting
arguments is responsible to check if all keys are strings, for example using
PyArg_ParseTupleAndKeywords() or PyArg_ValidateKeywordArguments().
Duplicate keys are merged using the last value. If duplicate keys must raise
an exception, the caller is responsible to implement an explicit keys on
kwnames. */
PyAPI_FUNC(PyObject*) _PyStack_AsDict(PyObject *const *values, PyObject *kwnames);
/* === Vectorcall protocol (PEP 590) ============================= */
// PyVectorcall_NARGS() is exported as a function for the stable ABI.
// Here (when we are not using the stable ABI), the name is overridden to
// call a static inline function for best performance.
static inline Py_ssize_t
_PyVectorcall_NARGS(size_t n)
{
return n & ~PY_VECTORCALL_ARGUMENTS_OFFSET;
}
#define PyVectorcall_NARGS(n) _PyVectorcall_NARGS(n)
PyAPI_FUNC(vectorcallfunc) PyVectorcall_Function(PyObject *callable);
// Backwards compatibility aliases (PEP 590) for API that was provisional
// in Python 3.8
#define _PyObject_Vectorcall PyObject_Vectorcall
#define _PyObject_VectorcallMethod PyObject_VectorcallMethod
#define _PyObject_FastCallDict PyObject_VectorcallDict
#define _PyVectorcall_Function PyVectorcall_Function
#define _PyObject_CallOneArg PyObject_CallOneArg
#define _PyObject_CallMethodNoArgs PyObject_CallMethodNoArgs
#define _PyObject_CallMethodOneArg PyObject_CallMethodOneArg
/* Same as PyObject_Vectorcall except that keyword arguments are passed as
dict, which may be NULL if there are no keyword arguments. */
PyAPI_FUNC(PyObject *) PyObject_VectorcallDict(
PyObject *callable,
PyObject *const *args,
size_t nargsf,
PyObject *kwargs);
PyAPI_FUNC(PyObject *) PyObject_CallOneArg(PyObject *func, PyObject *arg);
static inline PyObject *
PyObject_CallMethodNoArgs(PyObject *self, PyObject *name)
{
size_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET;
return PyObject_VectorcallMethod(name, &self, nargsf, _Py_NULL);
}
static inline PyObject *
PyObject_CallMethodOneArg(PyObject *self, PyObject *name, PyObject *arg)
{
PyObject *args[2] = {self, arg};
size_t nargsf = 2 | PY_VECTORCALL_ARGUMENTS_OFFSET;
assert(arg != NULL);
return PyObject_VectorcallMethod(name, args, nargsf, _Py_NULL);
}
/* Guess the size of object 'o' using len(o) or o.__length_hint__().
If neither of those return a non-negative value, then return the default
value. If one of the calls fails, this function returns -1. */
PyAPI_FUNC(Py_ssize_t) PyObject_LengthHint(PyObject *o, Py_ssize_t);
/* === Sequence protocol ================================================ */
/* Assume tp_as_sequence and sq_item exist and that 'i' does not
need to be corrected for a negative index. */
#define PySequence_ITEM(o, i)\
( Py_TYPE(o)->tp_as_sequence->sq_item((o), (i)) )

View File

@@ -0,0 +1,34 @@
#ifndef Py_CPYTHON_BYTEARRAYOBJECT_H
# error "this header file must not be included directly"
#endif
/* Object layout */
typedef struct {
PyObject_VAR_HEAD
Py_ssize_t ob_alloc; /* How many bytes allocated in ob_bytes */
char *ob_bytes; /* Physical backing buffer */
char *ob_start; /* Logical start inside ob_bytes */
Py_ssize_t ob_exports; /* How many buffer exports */
} PyByteArrayObject;
PyAPI_DATA(char) _PyByteArray_empty_string[];
/* Macros and static inline functions, trading safety for speed */
#define _PyByteArray_CAST(op) \
(assert(PyByteArray_Check(op)), _Py_CAST(PyByteArrayObject*, op))
static inline char* PyByteArray_AS_STRING(PyObject *op)
{
PyByteArrayObject *self = _PyByteArray_CAST(op);
if (Py_SIZE(self)) {
return self->ob_start;
}
return _PyByteArray_empty_string;
}
#define PyByteArray_AS_STRING(self) PyByteArray_AS_STRING(_PyObject_CAST(self))
static inline Py_ssize_t PyByteArray_GET_SIZE(PyObject *op) {
PyByteArrayObject *self = _PyByteArray_CAST(op);
return Py_SIZE(self);
}
#define PyByteArray_GET_SIZE(self) PyByteArray_GET_SIZE(_PyObject_CAST(self))

View File

@@ -0,0 +1,37 @@
#ifndef Py_CPYTHON_BYTESOBJECT_H
# error "this header file must not be included directly"
#endif
typedef struct {
PyObject_VAR_HEAD
Py_DEPRECATED(3.11) Py_hash_t ob_shash;
char ob_sval[1];
/* Invariants:
* ob_sval contains space for 'ob_size+1' elements.
* ob_sval[ob_size] == 0.
* ob_shash is the hash of the byte string or -1 if not computed yet.
*/
} PyBytesObject;
PyAPI_FUNC(int) _PyBytes_Resize(PyObject **, Py_ssize_t);
/* Macros and static inline functions, trading safety for speed */
#define _PyBytes_CAST(op) \
(assert(PyBytes_Check(op)), _Py_CAST(PyBytesObject*, op))
static inline char* PyBytes_AS_STRING(PyObject *op)
{
return _PyBytes_CAST(op)->ob_sval;
}
#define PyBytes_AS_STRING(op) PyBytes_AS_STRING(_PyObject_CAST(op))
static inline Py_ssize_t PyBytes_GET_SIZE(PyObject *op) {
PyBytesObject *self = _PyBytes_CAST(op);
return Py_SIZE(self);
}
#define PyBytes_GET_SIZE(self) PyBytes_GET_SIZE(_PyObject_CAST(self))
/* _PyBytes_Join(sep, x) is like sep.join(x). sep must be PyBytesObject*,
x must be an iterable object. */
PyAPI_FUNC(PyObject*) _PyBytes_Join(PyObject *sep, PyObject *x);

View File

@@ -0,0 +1,44 @@
/* Cell object interface */
#ifndef Py_LIMITED_API
#ifndef Py_CELLOBJECT_H
#define Py_CELLOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
PyObject_HEAD
/* Content of the cell or NULL when empty */
PyObject *ob_ref;
} PyCellObject;
PyAPI_DATA(PyTypeObject) PyCell_Type;
#define PyCell_Check(op) Py_IS_TYPE((op), &PyCell_Type)
PyAPI_FUNC(PyObject *) PyCell_New(PyObject *);
PyAPI_FUNC(PyObject *) PyCell_Get(PyObject *);
PyAPI_FUNC(int) PyCell_Set(PyObject *, PyObject *);
static inline PyObject* PyCell_GET(PyObject *op) {
PyCellObject *cell;
assert(PyCell_Check(op));
cell = _Py_CAST(PyCellObject*, op);
return cell->ob_ref;
}
#define PyCell_GET(op) PyCell_GET(_PyObject_CAST(op))
static inline void PyCell_SET(PyObject *op, PyObject *value) {
PyCellObject *cell;
assert(PyCell_Check(op));
cell = _Py_CAST(PyCellObject*, op);
cell->ob_ref = value;
}
#define PyCell_SET(op, value) PyCell_SET(_PyObject_CAST(op), (value))
#ifdef __cplusplus
}
#endif
#endif /* !Py_TUPLEOBJECT_H */
#endif /* Py_LIMITED_API */

View File

@@ -0,0 +1,25 @@
#ifndef Py_CPYTHON_CEVAL_H
# error "this header file must not be included directly"
#endif
PyAPI_FUNC(void) PyEval_SetProfile(Py_tracefunc, PyObject *);
PyAPI_FUNC(void) PyEval_SetProfileAllThreads(Py_tracefunc, PyObject *);
PyAPI_FUNC(void) PyEval_SetTrace(Py_tracefunc, PyObject *);
PyAPI_FUNC(void) PyEval_SetTraceAllThreads(Py_tracefunc, PyObject *);
/* Look at the current frame's (if any) code's co_flags, and turn on
the corresponding compiler flags in cf->cf_flags. Return 1 if any
flag was set, else return 0. */
PyAPI_FUNC(int) PyEval_MergeCompilerFlags(PyCompilerFlags *cf);
PyAPI_FUNC(PyObject *) _PyEval_EvalFrameDefault(PyThreadState *tstate, struct _PyInterpreterFrame *f, int exc);
PyAPI_FUNC(Py_ssize_t) PyUnstable_Eval_RequestCodeExtraIndex(freefunc);
// Old name -- remove when this API changes:
_Py_DEPRECATED_EXTERNALLY(3.12) static inline Py_ssize_t
_PyEval_RequestCodeExtraIndex(freefunc f) {
return PyUnstable_Eval_RequestCodeExtraIndex(f);
}
PyAPI_FUNC(int) _PyEval_SliceIndex(PyObject *, Py_ssize_t *);
PyAPI_FUNC(int) _PyEval_SliceIndexNotNone(PyObject *, Py_ssize_t *);

View File

@@ -0,0 +1,71 @@
/* Former class object interface -- now only bound methods are here */
/* Revealing some structures (not for general use) */
#ifndef Py_LIMITED_API
#ifndef Py_CLASSOBJECT_H
#define Py_CLASSOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
PyObject_HEAD
PyObject *im_func; /* The callable object implementing the method */
PyObject *im_self; /* The instance it is bound to */
PyObject *im_weakreflist; /* List of weak references */
vectorcallfunc vectorcall;
} PyMethodObject;
PyAPI_DATA(PyTypeObject) PyMethod_Type;
#define PyMethod_Check(op) Py_IS_TYPE((op), &PyMethod_Type)
PyAPI_FUNC(PyObject *) PyMethod_New(PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyMethod_Function(PyObject *);
PyAPI_FUNC(PyObject *) PyMethod_Self(PyObject *);
#define _PyMethod_CAST(meth) \
(assert(PyMethod_Check(meth)), _Py_CAST(PyMethodObject*, meth))
/* Static inline functions for direct access to these values.
Type checks are *not* done, so use with care. */
static inline PyObject* PyMethod_GET_FUNCTION(PyObject *meth) {
return _PyMethod_CAST(meth)->im_func;
}
#define PyMethod_GET_FUNCTION(meth) PyMethod_GET_FUNCTION(_PyObject_CAST(meth))
static inline PyObject* PyMethod_GET_SELF(PyObject *meth) {
return _PyMethod_CAST(meth)->im_self;
}
#define PyMethod_GET_SELF(meth) PyMethod_GET_SELF(_PyObject_CAST(meth))
typedef struct {
PyObject_HEAD
PyObject *func;
} PyInstanceMethodObject;
PyAPI_DATA(PyTypeObject) PyInstanceMethod_Type;
#define PyInstanceMethod_Check(op) Py_IS_TYPE((op), &PyInstanceMethod_Type)
PyAPI_FUNC(PyObject *) PyInstanceMethod_New(PyObject *);
PyAPI_FUNC(PyObject *) PyInstanceMethod_Function(PyObject *);
#define _PyInstanceMethod_CAST(meth) \
(assert(PyInstanceMethod_Check(meth)), \
_Py_CAST(PyInstanceMethodObject*, meth))
/* Static inline function for direct access to these values.
Type checks are *not* done, so use with care. */
static inline PyObject* PyInstanceMethod_GET_FUNCTION(PyObject *meth) {
return _PyInstanceMethod_CAST(meth)->func;
}
#define PyInstanceMethod_GET_FUNCTION(meth) PyInstanceMethod_GET_FUNCTION(_PyObject_CAST(meth))
#ifdef __cplusplus
}
#endif
#endif // !Py_CLASSOBJECT_H
#endif // !Py_LIMITED_API

View File

@@ -0,0 +1,358 @@
/* Definitions for bytecode */
#ifndef Py_LIMITED_API
#ifndef Py_CODE_H
#define Py_CODE_H
#ifdef __cplusplus
extern "C" {
#endif
/* Count of all local monitoring events */
#define _PY_MONITORING_LOCAL_EVENTS 10
/* Count of all "real" monitoring events (not derived from other events) */
#define _PY_MONITORING_UNGROUPED_EVENTS 15
/* Count of all monitoring events */
#define _PY_MONITORING_EVENTS 17
/* Tables of which tools are active for each monitored event. */
typedef struct _Py_LocalMonitors {
uint8_t tools[_PY_MONITORING_LOCAL_EVENTS];
} _Py_LocalMonitors;
typedef struct _Py_GlobalMonitors {
uint8_t tools[_PY_MONITORING_UNGROUPED_EVENTS];
} _Py_GlobalMonitors;
typedef struct {
PyObject *_co_code;
PyObject *_co_varnames;
PyObject *_co_cellvars;
PyObject *_co_freevars;
} _PyCoCached;
/* Ancillary data structure used for instrumentation.
Line instrumentation creates this with sufficient
space for one entry per code unit. The total size
of the data will be `bytes_per_entry * Py_SIZE(code)` */
typedef struct {
uint8_t bytes_per_entry;
uint8_t data[1];
} _PyCoLineInstrumentationData;
typedef struct {
int size;
int capacity;
struct _PyExecutorObject *executors[1];
} _PyExecutorArray;
/* Main data structure used for instrumentation.
* This is allocated when needed for instrumentation
*/
typedef struct {
/* Monitoring specific to this code object */
_Py_LocalMonitors local_monitors;
/* Monitoring that is active on this code object */
_Py_LocalMonitors active_monitors;
/* The tools that are to be notified for events for the matching code unit */
uint8_t *tools;
/* Information to support line events */
_PyCoLineInstrumentationData *lines;
/* The tools that are to be notified for line events for the matching code unit */
uint8_t *line_tools;
/* Information to support instruction events */
/* The underlying instructions, which can themselves be instrumented */
uint8_t *per_instruction_opcodes;
/* The tools that are to be notified for instruction events for the matching code unit */
uint8_t *per_instruction_tools;
} _PyCoMonitoringData;
// To avoid repeating ourselves in deepfreeze.py, all PyCodeObject members are
// defined in this macro:
#define _PyCode_DEF(SIZE) { \
PyObject_VAR_HEAD \
\
/* Note only the following fields are used in hash and/or comparisons \
* \
* - co_name \
* - co_argcount \
* - co_posonlyargcount \
* - co_kwonlyargcount \
* - co_nlocals \
* - co_stacksize \
* - co_flags \
* - co_firstlineno \
* - co_consts \
* - co_names \
* - co_localsplusnames \
* This is done to preserve the name and line number for tracebacks \
* and debuggers; otherwise, constant de-duplication would collapse \
* identical functions/lambdas defined on different lines. \
*/ \
\
/* These fields are set with provided values on new code objects. */ \
\
/* The hottest fields (in the eval loop) are grouped here at the top. */ \
PyObject *co_consts; /* list (constants used) */ \
PyObject *co_names; /* list of strings (names used) */ \
PyObject *co_exceptiontable; /* Byte string encoding exception handling \
table */ \
int co_flags; /* CO_..., see below */ \
\
/* The rest are not so impactful on performance. */ \
int co_argcount; /* #arguments, except *args */ \
int co_posonlyargcount; /* #positional only arguments */ \
int co_kwonlyargcount; /* #keyword only arguments */ \
int co_stacksize; /* #entries needed for evaluation stack */ \
int co_firstlineno; /* first source line number */ \
\
/* redundant values (derived from co_localsplusnames and \
co_localspluskinds) */ \
int co_nlocalsplus; /* number of local + cell + free variables */ \
int co_framesize; /* Size of frame in words */ \
int co_nlocals; /* number of local variables */ \
int co_ncellvars; /* total number of cell variables */ \
int co_nfreevars; /* number of free variables */ \
uint32_t co_version; /* version number */ \
\
PyObject *co_localsplusnames; /* tuple mapping offsets to names */ \
PyObject *co_localspluskinds; /* Bytes mapping to local kinds (one byte \
per variable) */ \
PyObject *co_filename; /* unicode (where it was loaded from) */ \
PyObject *co_name; /* unicode (name, for reference) */ \
PyObject *co_qualname; /* unicode (qualname, for reference) */ \
PyObject *co_linetable; /* bytes object that holds location info */ \
PyObject *co_weakreflist; /* to support weakrefs to code objects */ \
_PyExecutorArray *co_executors; /* executors from optimizer */ \
_PyCoCached *_co_cached; /* cached co_* attributes */ \
uintptr_t _co_instrumentation_version; /* current instrumentation version */ \
_PyCoMonitoringData *_co_monitoring; /* Monitoring data */ \
int _co_firsttraceable; /* index of first traceable instruction */ \
/* Scratch space for extra data relating to the code object. \
Type is a void* to keep the format private in codeobject.c to force \
people to go through the proper APIs. */ \
void *co_extra; \
char co_code_adaptive[(SIZE)]; \
}
/* Bytecode object */
struct PyCodeObject _PyCode_DEF(1);
/* Masks for co_flags above */
#define CO_OPTIMIZED 0x0001
#define CO_NEWLOCALS 0x0002
#define CO_VARARGS 0x0004
#define CO_VARKEYWORDS 0x0008
#define CO_NESTED 0x0010
#define CO_GENERATOR 0x0020
/* The CO_COROUTINE flag is set for coroutine functions (defined with
``async def`` keywords) */
#define CO_COROUTINE 0x0080
#define CO_ITERABLE_COROUTINE 0x0100
#define CO_ASYNC_GENERATOR 0x0200
/* bpo-39562: These constant values are changed in Python 3.9
to prevent collision with compiler flags. CO_FUTURE_ and PyCF_
constants must be kept unique. PyCF_ constants can use bits from
0x0100 to 0x10000. CO_FUTURE_ constants use bits starting at 0x20000. */
#define CO_FUTURE_DIVISION 0x20000
#define CO_FUTURE_ABSOLUTE_IMPORT 0x40000 /* do absolute imports by default */
#define CO_FUTURE_WITH_STATEMENT 0x80000
#define CO_FUTURE_PRINT_FUNCTION 0x100000
#define CO_FUTURE_UNICODE_LITERALS 0x200000
#define CO_FUTURE_BARRY_AS_BDFL 0x400000
#define CO_FUTURE_GENERATOR_STOP 0x800000
#define CO_FUTURE_ANNOTATIONS 0x1000000
#define CO_NO_MONITORING_EVENTS 0x2000000
/* This should be defined if a future statement modifies the syntax.
For example, when a keyword is added.
*/
#define PY_PARSER_REQUIRES_FUTURE_KEYWORD
#define CO_MAXBLOCKS 21 /* Max static block nesting within a function */
PyAPI_DATA(PyTypeObject) PyCode_Type;
#define PyCode_Check(op) Py_IS_TYPE((op), &PyCode_Type)
static inline Py_ssize_t PyCode_GetNumFree(PyCodeObject *op) {
assert(PyCode_Check(op));
return op->co_nfreevars;
}
static inline int PyUnstable_Code_GetFirstFree(PyCodeObject *op) {
assert(PyCode_Check(op));
return op->co_nlocalsplus - op->co_nfreevars;
}
Py_DEPRECATED(3.13) static inline int PyCode_GetFirstFree(PyCodeObject *op) {
return PyUnstable_Code_GetFirstFree(op);
}
/* Unstable public interface */
PyAPI_FUNC(PyCodeObject *) PyUnstable_Code_New(
int, int, int, int, int, PyObject *, PyObject *,
PyObject *, PyObject *, PyObject *, PyObject *,
PyObject *, PyObject *, PyObject *, int, PyObject *,
PyObject *);
PyAPI_FUNC(PyCodeObject *) PyUnstable_Code_NewWithPosOnlyArgs(
int, int, int, int, int, int, PyObject *, PyObject *,
PyObject *, PyObject *, PyObject *, PyObject *,
PyObject *, PyObject *, PyObject *, int, PyObject *,
PyObject *);
/* same as struct above */
// Old names -- remove when this API changes:
_Py_DEPRECATED_EXTERNALLY(3.12) static inline PyCodeObject *
PyCode_New(
int a, int b, int c, int d, int e, PyObject *f, PyObject *g,
PyObject *h, PyObject *i, PyObject *j, PyObject *k,
PyObject *l, PyObject *m, PyObject *n, int o, PyObject *p,
PyObject *q)
{
return PyUnstable_Code_New(
a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q);
}
_Py_DEPRECATED_EXTERNALLY(3.12) static inline PyCodeObject *
PyCode_NewWithPosOnlyArgs(
int a, int poac, int b, int c, int d, int e, PyObject *f, PyObject *g,
PyObject *h, PyObject *i, PyObject *j, PyObject *k,
PyObject *l, PyObject *m, PyObject *n, int o, PyObject *p,
PyObject *q)
{
return PyUnstable_Code_NewWithPosOnlyArgs(
a, poac, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q);
}
/* Creates a new empty code object with the specified source location. */
PyAPI_FUNC(PyCodeObject *)
PyCode_NewEmpty(const char *filename, const char *funcname, int firstlineno);
/* Return the line number associated with the specified bytecode index
in this code object. If you just need the line number of a frame,
use PyFrame_GetLineNumber() instead. */
PyAPI_FUNC(int) PyCode_Addr2Line(PyCodeObject *, int);
PyAPI_FUNC(int) PyCode_Addr2Location(PyCodeObject *, int, int *, int *, int *, int *);
#define PY_FOREACH_CODE_EVENT(V) \
V(CREATE) \
V(DESTROY)
typedef enum {
#define PY_DEF_EVENT(op) PY_CODE_EVENT_##op,
PY_FOREACH_CODE_EVENT(PY_DEF_EVENT)
#undef PY_DEF_EVENT
} PyCodeEvent;
/*
* A callback that is invoked for different events in a code object's lifecycle.
*
* The callback is invoked with a borrowed reference to co, after it is
* created and before it is destroyed.
*
* If the callback sets an exception, it must return -1. Otherwise
* it should return 0.
*/
typedef int (*PyCode_WatchCallback)(
PyCodeEvent event,
PyCodeObject* co);
/*
* Register a per-interpreter callback that will be invoked for code object
* lifecycle events.
*
* Returns a handle that may be passed to PyCode_ClearWatcher on success,
* or -1 and sets an error if no more handles are available.
*/
PyAPI_FUNC(int) PyCode_AddWatcher(PyCode_WatchCallback callback);
/*
* Clear the watcher associated with the watcher_id handle.
*
* Returns 0 on success or -1 if no watcher exists for the provided id.
*/
PyAPI_FUNC(int) PyCode_ClearWatcher(int watcher_id);
/* for internal use only */
struct _opaque {
int computed_line;
const uint8_t *lo_next;
const uint8_t *limit;
};
typedef struct _line_offsets {
int ar_start;
int ar_end;
int ar_line;
struct _opaque opaque;
} PyCodeAddressRange;
/* Update *bounds to describe the first and one-past-the-last instructions in the
same line as lasti. Return the number of that line.
*/
PyAPI_FUNC(int) _PyCode_CheckLineNumber(int lasti, PyCodeAddressRange *bounds);
/* Create a comparable key used to compare constants taking in account the
* object type. It is used to make sure types are not coerced (e.g., float and
* complex) _and_ to distinguish 0.0 from -0.0 e.g. on IEEE platforms
*
* Return (type(obj), obj, ...): a tuple with variable size (at least 2 items)
* depending on the type and the value. The type is the first item to not
* compare bytes and str which can raise a BytesWarning exception. */
PyAPI_FUNC(PyObject*) _PyCode_ConstantKey(PyObject *obj);
PyAPI_FUNC(PyObject*) PyCode_Optimize(PyObject *code, PyObject* consts,
PyObject *names, PyObject *lnotab);
PyAPI_FUNC(int) PyUnstable_Code_GetExtra(
PyObject *code, Py_ssize_t index, void **extra);
PyAPI_FUNC(int) PyUnstable_Code_SetExtra(
PyObject *code, Py_ssize_t index, void *extra);
// Old names -- remove when this API changes:
_Py_DEPRECATED_EXTERNALLY(3.12) static inline int
_PyCode_GetExtra(PyObject *code, Py_ssize_t index, void **extra)
{
return PyUnstable_Code_GetExtra(code, index, extra);
}
_Py_DEPRECATED_EXTERNALLY(3.12) static inline int
_PyCode_SetExtra(PyObject *code, Py_ssize_t index, void *extra)
{
return PyUnstable_Code_SetExtra(code, index, extra);
}
/* Equivalent to getattr(code, 'co_code') in Python.
Returns a strong reference to a bytes object. */
PyAPI_FUNC(PyObject *) PyCode_GetCode(PyCodeObject *code);
/* Equivalent to getattr(code, 'co_varnames') in Python. */
PyAPI_FUNC(PyObject *) PyCode_GetVarnames(PyCodeObject *code);
/* Equivalent to getattr(code, 'co_cellvars') in Python. */
PyAPI_FUNC(PyObject *) PyCode_GetCellvars(PyCodeObject *code);
/* Equivalent to getattr(code, 'co_freevars') in Python. */
PyAPI_FUNC(PyObject *) PyCode_GetFreevars(PyCodeObject *code);
typedef enum _PyCodeLocationInfoKind {
/* short forms are 0 to 9 */
PY_CODE_LOCATION_INFO_SHORT0 = 0,
/* one lineforms are 10 to 12 */
PY_CODE_LOCATION_INFO_ONE_LINE0 = 10,
PY_CODE_LOCATION_INFO_ONE_LINE1 = 11,
PY_CODE_LOCATION_INFO_ONE_LINE2 = 12,
PY_CODE_LOCATION_INFO_NO_COLUMNS = 13,
PY_CODE_LOCATION_INFO_LONG = 14,
PY_CODE_LOCATION_INFO_NONE = 15
} _PyCodeLocationInfoKind;
#ifdef __cplusplus
}
#endif
#endif // !Py_CODE_H
#endif // !Py_LIMITED_API

View File

@@ -0,0 +1,50 @@
#ifndef Py_CPYTHON_COMPILE_H
# error "this header file must not be included directly"
#endif
/* Public interface */
#define PyCF_MASK (CO_FUTURE_DIVISION | CO_FUTURE_ABSOLUTE_IMPORT | \
CO_FUTURE_WITH_STATEMENT | CO_FUTURE_PRINT_FUNCTION | \
CO_FUTURE_UNICODE_LITERALS | CO_FUTURE_BARRY_AS_BDFL | \
CO_FUTURE_GENERATOR_STOP | CO_FUTURE_ANNOTATIONS)
#define PyCF_MASK_OBSOLETE (CO_NESTED)
/* bpo-39562: CO_FUTURE_ and PyCF_ constants must be kept unique.
PyCF_ constants can use bits from 0x0100 to 0x10000.
CO_FUTURE_ constants use bits starting at 0x20000. */
#define PyCF_SOURCE_IS_UTF8 0x0100
#define PyCF_DONT_IMPLY_DEDENT 0x0200
#define PyCF_ONLY_AST 0x0400
#define PyCF_IGNORE_COOKIE 0x0800
#define PyCF_TYPE_COMMENTS 0x1000
#define PyCF_ALLOW_TOP_LEVEL_AWAIT 0x2000
#define PyCF_ALLOW_INCOMPLETE_INPUT 0x4000
#define PyCF_OPTIMIZED_AST (0x8000 | PyCF_ONLY_AST)
#define PyCF_COMPILE_MASK (PyCF_ONLY_AST | PyCF_ALLOW_TOP_LEVEL_AWAIT | \
PyCF_TYPE_COMMENTS | PyCF_DONT_IMPLY_DEDENT | \
PyCF_ALLOW_INCOMPLETE_INPUT | PyCF_OPTIMIZED_AST)
typedef struct {
int cf_flags; /* bitmask of CO_xxx flags relevant to future */
int cf_feature_version; /* minor Python version (PyCF_ONLY_AST) */
} PyCompilerFlags;
#define _PyCompilerFlags_INIT \
(PyCompilerFlags){.cf_flags = 0, .cf_feature_version = PY_MINOR_VERSION}
/* Future feature support */
#define FUTURE_NESTED_SCOPES "nested_scopes"
#define FUTURE_GENERATORS "generators"
#define FUTURE_DIVISION "division"
#define FUTURE_ABSOLUTE_IMPORT "absolute_import"
#define FUTURE_WITH_STATEMENT "with_statement"
#define FUTURE_PRINT_FUNCTION "print_function"
#define FUTURE_UNICODE_LITERALS "unicode_literals"
#define FUTURE_BARRY_AS_BDFL "barry_as_FLUFL"
#define FUTURE_GENERATOR_STOP "generator_stop"
#define FUTURE_ANNOTATIONS "annotations"
#define PY_INVALID_STACK_EFFECT INT_MAX
PyAPI_FUNC(int) PyCompile_OpcodeStackEffect(int opcode, int oparg);
PyAPI_FUNC(int) PyCompile_OpcodeStackEffectWithJump(int opcode, int oparg, int jump);

View File

@@ -0,0 +1,33 @@
#ifndef Py_CPYTHON_COMPLEXOBJECT_H
# error "this header file must not be included directly"
#endif
typedef struct {
double real;
double imag;
} Py_complex;
// Operations on complex numbers.
PyAPI_FUNC(Py_complex) _Py_c_sum(Py_complex, Py_complex);
PyAPI_FUNC(Py_complex) _Py_c_diff(Py_complex, Py_complex);
PyAPI_FUNC(Py_complex) _Py_c_neg(Py_complex);
PyAPI_FUNC(Py_complex) _Py_c_prod(Py_complex, Py_complex);
PyAPI_FUNC(Py_complex) _Py_c_quot(Py_complex, Py_complex);
PyAPI_FUNC(Py_complex) _Py_c_pow(Py_complex, Py_complex);
PyAPI_FUNC(double) _Py_c_abs(Py_complex);
/* Complex object interface */
/*
PyComplexObject represents a complex number with double-precision
real and imaginary parts.
*/
typedef struct {
PyObject_HEAD
Py_complex cval;
} PyComplexObject;
PyAPI_FUNC(PyObject *) PyComplex_FromCComplex(Py_complex);
PyAPI_FUNC(Py_complex) PyComplex_AsCComplex(PyObject *op);

View File

@@ -0,0 +1,74 @@
#ifndef Py_LIMITED_API
#ifndef Py_CONTEXT_H
#define Py_CONTEXT_H
#ifdef __cplusplus
extern "C" {
#endif
PyAPI_DATA(PyTypeObject) PyContext_Type;
typedef struct _pycontextobject PyContext;
PyAPI_DATA(PyTypeObject) PyContextVar_Type;
typedef struct _pycontextvarobject PyContextVar;
PyAPI_DATA(PyTypeObject) PyContextToken_Type;
typedef struct _pycontexttokenobject PyContextToken;
#define PyContext_CheckExact(o) Py_IS_TYPE((o), &PyContext_Type)
#define PyContextVar_CheckExact(o) Py_IS_TYPE((o), &PyContextVar_Type)
#define PyContextToken_CheckExact(o) Py_IS_TYPE((o), &PyContextToken_Type)
PyAPI_FUNC(PyObject *) PyContext_New(void);
PyAPI_FUNC(PyObject *) PyContext_Copy(PyObject *);
PyAPI_FUNC(PyObject *) PyContext_CopyCurrent(void);
PyAPI_FUNC(int) PyContext_Enter(PyObject *);
PyAPI_FUNC(int) PyContext_Exit(PyObject *);
/* Create a new context variable.
default_value can be NULL.
*/
PyAPI_FUNC(PyObject *) PyContextVar_New(
const char *name, PyObject *default_value);
/* Get a value for the variable.
Returns -1 if an error occurred during lookup.
Returns 0 if value either was or was not found.
If value was found, *value will point to it.
If not, it will point to:
- default_value, if not NULL;
- the default value of "var", if not NULL;
- NULL.
'*value' will be a new ref, if not NULL.
*/
PyAPI_FUNC(int) PyContextVar_Get(
PyObject *var, PyObject *default_value, PyObject **value);
/* Set a new value for the variable.
Returns NULL if an error occurs.
*/
PyAPI_FUNC(PyObject *) PyContextVar_Set(PyObject *var, PyObject *value);
/* Reset a variable to its previous value.
Returns 0 on success, -1 on error.
*/
PyAPI_FUNC(int) PyContextVar_Reset(PyObject *var, PyObject *token);
#ifdef __cplusplus
}
#endif
#endif /* !Py_CONTEXT_H */
#endif /* !Py_LIMITED_API */

View File

@@ -0,0 +1,134 @@
#ifndef Py_CPYTHON_CRITICAL_SECTION_H
# error "this header file must not be included directly"
#endif
// Python critical sections
//
// Conceptually, critical sections are a deadlock avoidance layer on top of
// per-object locks. These helpers, in combination with those locks, replace
// our usage of the global interpreter lock to provide thread-safety for
// otherwise thread-unsafe objects, such as dict.
//
// NOTE: These APIs are no-ops in non-free-threaded builds.
//
// Straightforward per-object locking could introduce deadlocks that were not
// present when running with the GIL. Threads may hold locks for multiple
// objects simultaneously because Python operations can nest. If threads were
// to acquire the same locks in different orders, they would deadlock.
//
// One way to avoid deadlocks is to allow threads to hold only the lock (or
// locks) for a single operation at a time (typically a single lock, but some
// operations involve two locks). When a thread begins a nested operation it
// could suspend the locks for any outer operation: before beginning the nested
// operation, the locks for the outer operation are released and when the
// nested operation completes, the locks for the outer operation are
// reacquired.
//
// To improve performance, this API uses a variation of the above scheme.
// Instead of immediately suspending locks any time a nested operation begins,
// locks are only suspended if the thread would block. This reduces the number
// of lock acquisitions and releases for nested operations, while still
// avoiding deadlocks.
//
// Additionally, the locks for any active operation are suspended around
// other potentially blocking operations, such as I/O. This is because the
// interaction between locks and blocking operations can lead to deadlocks in
// the same way as the interaction between multiple locks.
//
// Each thread's critical sections and their corresponding locks are tracked in
// a stack in `PyThreadState.critical_section`. When a thread calls
// `_PyThreadState_Detach()`, such as before a blocking I/O operation or when
// waiting to acquire a lock, the thread suspends all of its active critical
// sections, temporarily releasing the associated locks. When the thread calls
// `_PyThreadState_Attach()`, it resumes the top-most (i.e., most recent)
// critical section by reacquiring the associated lock or locks. See
// `_PyCriticalSection_Resume()`.
//
// NOTE: Only the top-most critical section is guaranteed to be active.
// Operations that need to lock two objects at once must use
// `Py_BEGIN_CRITICAL_SECTION2()`. You *CANNOT* use nested critical sections
// to lock more than one object at once, because the inner critical section
// may suspend the outer critical sections. This API does not provide a way
// to lock more than two objects at once (though it could be added later
// if actually needed).
//
// NOTE: Critical sections implicitly behave like reentrant locks because
// attempting to acquire the same lock will suspend any outer (earlier)
// critical sections. However, they are less efficient for this use case than
// purposefully designed reentrant locks.
//
// Example usage:
// Py_BEGIN_CRITICAL_SECTION(op);
// ...
// Py_END_CRITICAL_SECTION();
//
// To lock two objects at once:
// Py_BEGIN_CRITICAL_SECTION2(op1, op2);
// ...
// Py_END_CRITICAL_SECTION2();
typedef struct PyCriticalSection PyCriticalSection;
typedef struct PyCriticalSection2 PyCriticalSection2;
PyAPI_FUNC(void)
PyCriticalSection_Begin(PyCriticalSection *c, PyObject *op);
PyAPI_FUNC(void)
PyCriticalSection_End(PyCriticalSection *c);
PyAPI_FUNC(void)
PyCriticalSection2_Begin(PyCriticalSection2 *c, PyObject *a, PyObject *b);
PyAPI_FUNC(void)
PyCriticalSection2_End(PyCriticalSection2 *c);
#ifndef Py_GIL_DISABLED
# define Py_BEGIN_CRITICAL_SECTION(op) \
{
# define Py_END_CRITICAL_SECTION() \
}
# define Py_BEGIN_CRITICAL_SECTION2(a, b) \
{
# define Py_END_CRITICAL_SECTION2() \
}
#else /* !Py_GIL_DISABLED */
// NOTE: the contents of this struct are private and may change betweeen
// Python releases without a deprecation period.
struct PyCriticalSection {
// Tagged pointer to an outer active critical section (or 0).
uintptr_t _cs_prev;
// Mutex used to protect critical section
PyMutex *_cs_mutex;
};
// A critical section protected by two mutexes. Use
// Py_BEGIN_CRITICAL_SECTION2 and Py_END_CRITICAL_SECTION2.
// NOTE: the contents of this struct are private and may change betweeen
// Python releases without a deprecation period.
struct PyCriticalSection2 {
PyCriticalSection _cs_base;
PyMutex *_cs_mutex2;
};
# define Py_BEGIN_CRITICAL_SECTION(op) \
{ \
PyCriticalSection _py_cs; \
PyCriticalSection_Begin(&_py_cs, _PyObject_CAST(op))
# define Py_END_CRITICAL_SECTION() \
PyCriticalSection_End(&_py_cs); \
}
# define Py_BEGIN_CRITICAL_SECTION2(a, b) \
{ \
PyCriticalSection2 _py_cs2; \
PyCriticalSection2_Begin(&_py_cs2, _PyObject_CAST(a), _PyObject_CAST(b))
# define Py_END_CRITICAL_SECTION2() \
PyCriticalSection2_End(&_py_cs2); \
}
#endif

View File

@@ -0,0 +1,62 @@
#ifndef Py_CPYTHON_DESCROBJECT_H
# error "this header file must not be included directly"
#endif
typedef PyObject *(*wrapperfunc)(PyObject *self, PyObject *args,
void *wrapped);
typedef PyObject *(*wrapperfunc_kwds)(PyObject *self, PyObject *args,
void *wrapped, PyObject *kwds);
struct wrapperbase {
const char *name;
int offset;
void *function;
wrapperfunc wrapper;
const char *doc;
int flags;
PyObject *name_strobj;
};
/* Flags for above struct */
#define PyWrapperFlag_KEYWORDS 1 /* wrapper function takes keyword args */
/* Various kinds of descriptor objects */
typedef struct {
PyObject_HEAD
PyTypeObject *d_type;
PyObject *d_name;
PyObject *d_qualname;
} PyDescrObject;
#define PyDescr_COMMON PyDescrObject d_common
#define PyDescr_TYPE(x) (((PyDescrObject *)(x))->d_type)
#define PyDescr_NAME(x) (((PyDescrObject *)(x))->d_name)
typedef struct {
PyDescr_COMMON;
PyMethodDef *d_method;
vectorcallfunc vectorcall;
} PyMethodDescrObject;
typedef struct {
PyDescr_COMMON;
PyMemberDef *d_member;
} PyMemberDescrObject;
typedef struct {
PyDescr_COMMON;
PyGetSetDef *d_getset;
} PyGetSetDescrObject;
typedef struct {
PyDescr_COMMON;
struct wrapperbase *d_base;
void *d_wrapped; /* This can be any function pointer */
} PyWrapperDescrObject;
PyAPI_FUNC(PyObject *) PyDescr_NewWrapper(PyTypeObject *,
struct wrapperbase *, void *);
PyAPI_FUNC(int) PyDescr_IsData(PyObject *);

View File

@@ -0,0 +1,102 @@
#ifndef Py_CPYTHON_DICTOBJECT_H
# error "this header file must not be included directly"
#endif
typedef struct _dictkeysobject PyDictKeysObject;
typedef struct _dictvalues PyDictValues;
/* The ma_values pointer is NULL for a combined table
* or points to an array of PyObject* for a split table
*/
typedef struct {
PyObject_HEAD
/* Number of items in the dictionary */
Py_ssize_t ma_used;
/* Dictionary version: globally unique, value change each time
the dictionary is modified */
#ifdef Py_BUILD_CORE
/* Bits 0-7 are for dict watchers.
* Bits 8-11 are for the watched mutation counter (used by tier2 optimization)
* The remaining bits (12-63) are the actual version tag. */
uint64_t ma_version_tag;
#else
Py_DEPRECATED(3.12) uint64_t ma_version_tag;
#endif
PyDictKeysObject *ma_keys;
/* If ma_values is NULL, the table is "combined": keys and values
are stored in ma_keys.
If ma_values is not NULL, the table is split:
keys are stored in ma_keys and values are stored in ma_values */
PyDictValues *ma_values;
} PyDictObject;
PyAPI_FUNC(PyObject *) _PyDict_GetItem_KnownHash(PyObject *mp, PyObject *key,
Py_hash_t hash);
PyAPI_FUNC(PyObject *) _PyDict_GetItemStringWithError(PyObject *, const char *);
PyAPI_FUNC(PyObject *) PyDict_SetDefault(
PyObject *mp, PyObject *key, PyObject *defaultobj);
// Inserts `key` with a value `default_value`, if `key` is not already present
// in the dictionary. If `result` is not NULL, then the value associated
// with `key` is returned in `*result` (either the existing value, or the now
// inserted `default_value`).
// Returns:
// -1 on error
// 0 if `key` was not present and `default_value` was inserted
// 1 if `key` was present and `default_value` was not inserted
PyAPI_FUNC(int) PyDict_SetDefaultRef(PyObject *mp, PyObject *key, PyObject *default_value, PyObject **result);
/* Get the number of items of a dictionary. */
static inline Py_ssize_t PyDict_GET_SIZE(PyObject *op) {
PyDictObject *mp;
assert(PyDict_Check(op));
mp = _Py_CAST(PyDictObject*, op);
#ifdef Py_GIL_DISABLED
return _Py_atomic_load_ssize_relaxed(&mp->ma_used);
#else
return mp->ma_used;
#endif
}
#define PyDict_GET_SIZE(op) PyDict_GET_SIZE(_PyObject_CAST(op))
PyAPI_FUNC(int) PyDict_ContainsString(PyObject *mp, const char *key);
PyAPI_FUNC(PyObject *) _PyDict_NewPresized(Py_ssize_t minused);
PyAPI_FUNC(int) PyDict_Pop(PyObject *dict, PyObject *key, PyObject **result);
PyAPI_FUNC(int) PyDict_PopString(PyObject *dict, const char *key, PyObject **result);
PyAPI_FUNC(PyObject *) _PyDict_Pop(PyObject *dict, PyObject *key, PyObject *default_value);
/* Dictionary watchers */
#define PY_FOREACH_DICT_EVENT(V) \
V(ADDED) \
V(MODIFIED) \
V(DELETED) \
V(CLONED) \
V(CLEARED) \
V(DEALLOCATED)
typedef enum {
#define PY_DEF_EVENT(EVENT) PyDict_EVENT_##EVENT,
PY_FOREACH_DICT_EVENT(PY_DEF_EVENT)
#undef PY_DEF_EVENT
} PyDict_WatchEvent;
// Callback to be invoked when a watched dict is cleared, dealloced, or modified.
// In clear/dealloc case, key and new_value will be NULL. Otherwise, new_value will be the
// new value for key, NULL if key is being deleted.
typedef int(*PyDict_WatchCallback)(PyDict_WatchEvent event, PyObject* dict, PyObject* key, PyObject* new_value);
// Register/unregister a dict-watcher callback
PyAPI_FUNC(int) PyDict_AddWatcher(PyDict_WatchCallback callback);
PyAPI_FUNC(int) PyDict_ClearWatcher(int watcher_id);
// Mark given dictionary as "watched" (callback will be called if it is modified)
PyAPI_FUNC(int) PyDict_Watch(int watcher_id, PyObject* dict);
PyAPI_FUNC(int) PyDict_Unwatch(int watcher_id, PyObject* dict);

View File

@@ -0,0 +1,16 @@
#ifndef Py_CPYTHON_FILEOBJECT_H
# error "this header file must not be included directly"
#endif
PyAPI_FUNC(char *) Py_UniversalNewlineFgets(char *, int, FILE*, PyObject *);
/* The std printer acts as a preliminary sys.stderr until the new io
infrastructure is in place. */
PyAPI_FUNC(PyObject *) PyFile_NewStdPrinter(int);
PyAPI_DATA(PyTypeObject) PyStdPrinter_Type;
typedef PyObject * (*Py_OpenCodeHookFunction)(PyObject *, void *);
PyAPI_FUNC(PyObject *) PyFile_OpenCode(const char *utf8path);
PyAPI_FUNC(PyObject *) PyFile_OpenCodeObject(PyObject *path);
PyAPI_FUNC(int) PyFile_SetOpenCodeHook(Py_OpenCodeHookFunction hook, void *userData);

View File

@@ -0,0 +1,8 @@
#ifndef Py_CPYTHON_FILEUTILS_H
# error "this header file must not be included directly"
#endif
// Used by _testcapi which must not use the internal C API
PyAPI_FUNC(FILE*) _Py_fopen_obj(
PyObject *path,
const char *mode);

View File

@@ -0,0 +1,27 @@
#ifndef Py_CPYTHON_FLOATOBJECT_H
# error "this header file must not be included directly"
#endif
typedef struct {
PyObject_HEAD
double ob_fval;
} PyFloatObject;
#define _PyFloat_CAST(op) \
(assert(PyFloat_Check(op)), _Py_CAST(PyFloatObject*, op))
// Static inline version of PyFloat_AsDouble() trading safety for speed.
// It doesn't check if op is a double object.
static inline double PyFloat_AS_DOUBLE(PyObject *op) {
return _PyFloat_CAST(op)->ob_fval;
}
#define PyFloat_AS_DOUBLE(op) PyFloat_AS_DOUBLE(_PyObject_CAST(op))
PyAPI_FUNC(int) PyFloat_Pack2(double x, char *p, int le);
PyAPI_FUNC(int) PyFloat_Pack4(double x, char *p, int le);
PyAPI_FUNC(int) PyFloat_Pack8(double x, char *p, int le);
PyAPI_FUNC(double) PyFloat_Unpack2(const char *p, int le);
PyAPI_FUNC(double) PyFloat_Unpack4(const char *p, int le);
PyAPI_FUNC(double) PyFloat_Unpack8(const char *p, int le);

View File

@@ -0,0 +1,35 @@
/* Frame object interface */
#ifndef Py_CPYTHON_FRAMEOBJECT_H
# error "this header file must not be included directly"
#endif
/* Standard object interface */
PyAPI_FUNC(PyFrameObject *) PyFrame_New(PyThreadState *, PyCodeObject *,
PyObject *, PyObject *);
/* The rest of the interface is specific for frame objects */
/* Conversions between "fast locals" and locals in dictionary */
PyAPI_FUNC(void) PyFrame_LocalsToFast(PyFrameObject *, int);
/* -- Caveat emptor --
* The concept of entry frames is an implementation detail of the CPython
* interpreter. This API is considered unstable and is provided for the
* convenience of debuggers, profilers and state-inspecting tools. Notice that
* this API can be changed in future minor versions if the underlying frame
* mechanism change or the concept of an 'entry frame' or its semantics becomes
* obsolete or outdated. */
PyAPI_FUNC(int) _PyFrame_IsEntryFrame(PyFrameObject *frame);
PyAPI_FUNC(int) PyFrame_FastToLocalsWithError(PyFrameObject *f);
PyAPI_FUNC(void) PyFrame_FastToLocals(PyFrameObject *);
typedef struct {
PyObject_HEAD
PyFrameObject* frame;
} PyFrameLocalsProxyObject;

View File

@@ -0,0 +1,184 @@
/* Function object interface */
#ifndef Py_LIMITED_API
#ifndef Py_FUNCOBJECT_H
#define Py_FUNCOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#define _Py_COMMON_FIELDS(PREFIX) \
PyObject *PREFIX ## globals; \
PyObject *PREFIX ## builtins; \
PyObject *PREFIX ## name; \
PyObject *PREFIX ## qualname; \
PyObject *PREFIX ## code; /* A code object, the __code__ attribute */ \
PyObject *PREFIX ## defaults; /* NULL or a tuple */ \
PyObject *PREFIX ## kwdefaults; /* NULL or a dict */ \
PyObject *PREFIX ## closure; /* NULL or a tuple of cell objects */
typedef struct {
_Py_COMMON_FIELDS(fc_)
} PyFrameConstructor;
/* Function objects and code objects should not be confused with each other:
*
* Function objects are created by the execution of the 'def' statement.
* They reference a code object in their __code__ attribute, which is a
* purely syntactic object, i.e. nothing more than a compiled version of some
* source code lines. There is one code object per source code "fragment",
* but each code object can be referenced by zero or many function objects
* depending only on how many times the 'def' statement in the source was
* executed so far.
*/
typedef struct {
PyObject_HEAD
_Py_COMMON_FIELDS(func_)
PyObject *func_doc; /* The __doc__ attribute, can be anything */
PyObject *func_dict; /* The __dict__ attribute, a dict or NULL */
PyObject *func_weakreflist; /* List of weak references */
PyObject *func_module; /* The __module__ attribute, can be anything */
PyObject *func_annotations; /* Annotations, a dict or NULL */
PyObject *func_typeparams; /* Tuple of active type variables or NULL */
vectorcallfunc vectorcall;
/* Version number for use by specializer.
* Can set to non-zero when we want to specialize.
* Will be set to zero if any of these change:
* defaults
* kwdefaults (only if the object changes, not the contents of the dict)
* code
* annotations
* vectorcall function pointer */
uint32_t func_version;
/* Invariant:
* func_closure contains the bindings for func_code->co_freevars, so
* PyTuple_Size(func_closure) == PyCode_GetNumFree(func_code)
* (func_closure may be NULL if PyCode_GetNumFree(func_code) == 0).
*/
} PyFunctionObject;
#undef _Py_COMMON_FIELDS
PyAPI_DATA(PyTypeObject) PyFunction_Type;
#define PyFunction_Check(op) Py_IS_TYPE((op), &PyFunction_Type)
PyAPI_FUNC(PyObject *) PyFunction_New(PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_NewWithQualName(PyObject *, PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_GetCode(PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_GetGlobals(PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_GetModule(PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_GetDefaults(PyObject *);
PyAPI_FUNC(int) PyFunction_SetDefaults(PyObject *, PyObject *);
PyAPI_FUNC(void) PyFunction_SetVectorcall(PyFunctionObject *, vectorcallfunc);
PyAPI_FUNC(PyObject *) PyFunction_GetKwDefaults(PyObject *);
PyAPI_FUNC(int) PyFunction_SetKwDefaults(PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_GetClosure(PyObject *);
PyAPI_FUNC(int) PyFunction_SetClosure(PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_GetAnnotations(PyObject *);
PyAPI_FUNC(int) PyFunction_SetAnnotations(PyObject *, PyObject *);
#define _PyFunction_CAST(func) \
(assert(PyFunction_Check(func)), _Py_CAST(PyFunctionObject*, func))
/* Static inline functions for direct access to these values.
Type checks are *not* done, so use with care. */
static inline PyObject* PyFunction_GET_CODE(PyObject *func) {
return _PyFunction_CAST(func)->func_code;
}
#define PyFunction_GET_CODE(func) PyFunction_GET_CODE(_PyObject_CAST(func))
static inline PyObject* PyFunction_GET_GLOBALS(PyObject *func) {
return _PyFunction_CAST(func)->func_globals;
}
#define PyFunction_GET_GLOBALS(func) PyFunction_GET_GLOBALS(_PyObject_CAST(func))
static inline PyObject* PyFunction_GET_MODULE(PyObject *func) {
return _PyFunction_CAST(func)->func_module;
}
#define PyFunction_GET_MODULE(func) PyFunction_GET_MODULE(_PyObject_CAST(func))
static inline PyObject* PyFunction_GET_DEFAULTS(PyObject *func) {
return _PyFunction_CAST(func)->func_defaults;
}
#define PyFunction_GET_DEFAULTS(func) PyFunction_GET_DEFAULTS(_PyObject_CAST(func))
static inline PyObject* PyFunction_GET_KW_DEFAULTS(PyObject *func) {
return _PyFunction_CAST(func)->func_kwdefaults;
}
#define PyFunction_GET_KW_DEFAULTS(func) PyFunction_GET_KW_DEFAULTS(_PyObject_CAST(func))
static inline PyObject* PyFunction_GET_CLOSURE(PyObject *func) {
return _PyFunction_CAST(func)->func_closure;
}
#define PyFunction_GET_CLOSURE(func) PyFunction_GET_CLOSURE(_PyObject_CAST(func))
static inline PyObject* PyFunction_GET_ANNOTATIONS(PyObject *func) {
return _PyFunction_CAST(func)->func_annotations;
}
#define PyFunction_GET_ANNOTATIONS(func) PyFunction_GET_ANNOTATIONS(_PyObject_CAST(func))
/* The classmethod and staticmethod types lives here, too */
PyAPI_DATA(PyTypeObject) PyClassMethod_Type;
PyAPI_DATA(PyTypeObject) PyStaticMethod_Type;
PyAPI_FUNC(PyObject *) PyClassMethod_New(PyObject *);
PyAPI_FUNC(PyObject *) PyStaticMethod_New(PyObject *);
#define PY_FOREACH_FUNC_EVENT(V) \
V(CREATE) \
V(DESTROY) \
V(MODIFY_CODE) \
V(MODIFY_DEFAULTS) \
V(MODIFY_KWDEFAULTS)
typedef enum {
#define PY_DEF_EVENT(EVENT) PyFunction_EVENT_##EVENT,
PY_FOREACH_FUNC_EVENT(PY_DEF_EVENT)
#undef PY_DEF_EVENT
} PyFunction_WatchEvent;
/*
* A callback that is invoked for different events in a function's lifecycle.
*
* The callback is invoked with a borrowed reference to func, after it is
* created and before it is modified or destroyed. The callback should not
* modify func.
*
* When a function's code object, defaults, or kwdefaults are modified the
* callback will be invoked with the respective event and new_value will
* contain a borrowed reference to the new value that is about to be stored in
* the function. Otherwise the third argument is NULL.
*
* If the callback returns with an exception set, it must return -1. Otherwise
* it should return 0.
*/
typedef int (*PyFunction_WatchCallback)(
PyFunction_WatchEvent event,
PyFunctionObject *func,
PyObject *new_value);
/*
* Register a per-interpreter callback that will be invoked for function lifecycle
* events.
*
* Returns a handle that may be passed to PyFunction_ClearWatcher on success,
* or -1 and sets an error if no more handles are available.
*/
PyAPI_FUNC(int) PyFunction_AddWatcher(PyFunction_WatchCallback callback);
/*
* Clear the watcher associated with the watcher_id handle.
*
* Returns 0 on success or -1 if no watcher exists for the supplied id.
*/
PyAPI_FUNC(int) PyFunction_ClearWatcher(int watcher_id);
#ifdef __cplusplus
}
#endif
#endif /* !Py_FUNCOBJECT_H */
#endif /* Py_LIMITED_API */

View File

@@ -0,0 +1,83 @@
/* Generator object interface */
#ifndef Py_LIMITED_API
#ifndef Py_GENOBJECT_H
#define Py_GENOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
/* --- Generators --------------------------------------------------------- */
/* _PyGenObject_HEAD defines the initial segment of generator
and coroutine objects. */
#define _PyGenObject_HEAD(prefix) \
PyObject_HEAD \
/* List of weak reference. */ \
PyObject *prefix##_weakreflist; \
/* Name of the generator. */ \
PyObject *prefix##_name; \
/* Qualified name of the generator. */ \
PyObject *prefix##_qualname; \
_PyErr_StackItem prefix##_exc_state; \
PyObject *prefix##_origin_or_finalizer; \
char prefix##_hooks_inited; \
char prefix##_closed; \
char prefix##_running_async; \
/* The frame */ \
int8_t prefix##_frame_state; \
PyObject *prefix##_iframe[1]; \
typedef struct {
/* The gi_ prefix is intended to remind of generator-iterator. */
_PyGenObject_HEAD(gi)
} PyGenObject;
PyAPI_DATA(PyTypeObject) PyGen_Type;
#define PyGen_Check(op) PyObject_TypeCheck((op), &PyGen_Type)
#define PyGen_CheckExact(op) Py_IS_TYPE((op), &PyGen_Type)
PyAPI_FUNC(PyObject *) PyGen_New(PyFrameObject *);
PyAPI_FUNC(PyObject *) PyGen_NewWithQualName(PyFrameObject *,
PyObject *name, PyObject *qualname);
PyAPI_FUNC(PyCodeObject *) PyGen_GetCode(PyGenObject *gen);
/* --- PyCoroObject ------------------------------------------------------- */
typedef struct {
_PyGenObject_HEAD(cr)
} PyCoroObject;
PyAPI_DATA(PyTypeObject) PyCoro_Type;
#define PyCoro_CheckExact(op) Py_IS_TYPE((op), &PyCoro_Type)
PyAPI_FUNC(PyObject *) PyCoro_New(PyFrameObject *,
PyObject *name, PyObject *qualname);
/* --- Asynchronous Generators -------------------------------------------- */
typedef struct {
_PyGenObject_HEAD(ag)
} PyAsyncGenObject;
PyAPI_DATA(PyTypeObject) PyAsyncGen_Type;
PyAPI_DATA(PyTypeObject) _PyAsyncGenASend_Type;
PyAPI_FUNC(PyObject *) PyAsyncGen_New(PyFrameObject *,
PyObject *name, PyObject *qualname);
#define PyAsyncGen_CheckExact(op) Py_IS_TYPE((op), &PyAsyncGen_Type)
#define PyAsyncGenASend_CheckExact(op) Py_IS_TYPE((op), &_PyAsyncGenASend_Type)
#undef _PyGenObject_HEAD
#ifdef __cplusplus
}
#endif
#endif /* !Py_GENOBJECT_H */
#endif /* Py_LIMITED_API */

View File

@@ -0,0 +1,25 @@
#ifndef Py_CPYTHON_IMPORT_H
# error "this header file must not be included directly"
#endif
PyMODINIT_FUNC PyInit__imp(void);
struct _inittab {
const char *name; /* ASCII encoded string */
PyObject* (*initfunc)(void);
};
// This is not used after Py_Initialize() is called.
PyAPI_DATA(struct _inittab *) PyImport_Inittab;
PyAPI_FUNC(int) PyImport_ExtendInittab(struct _inittab *newtab);
struct _frozen {
const char *name; /* ASCII encoded string */
const unsigned char *code;
int size;
int is_package;
};
/* Embedding apps may change this pointer to point to their favorite
collection of frozen modules: */
PyAPI_DATA(const struct _frozen *) PyImport_FrozenModules;

View File

@@ -0,0 +1,274 @@
#ifndef Py_PYCORECONFIG_H
#define Py_PYCORECONFIG_H
#ifndef Py_LIMITED_API
#ifdef __cplusplus
extern "C" {
#endif
/* --- PyStatus ----------------------------------------------- */
typedef struct {
enum {
_PyStatus_TYPE_OK=0,
_PyStatus_TYPE_ERROR=1,
_PyStatus_TYPE_EXIT=2
} _type;
const char *func;
const char *err_msg;
int exitcode;
} PyStatus;
PyAPI_FUNC(PyStatus) PyStatus_Ok(void);
PyAPI_FUNC(PyStatus) PyStatus_Error(const char *err_msg);
PyAPI_FUNC(PyStatus) PyStatus_NoMemory(void);
PyAPI_FUNC(PyStatus) PyStatus_Exit(int exitcode);
PyAPI_FUNC(int) PyStatus_IsError(PyStatus err);
PyAPI_FUNC(int) PyStatus_IsExit(PyStatus err);
PyAPI_FUNC(int) PyStatus_Exception(PyStatus err);
/* --- PyWideStringList ------------------------------------------------ */
typedef struct {
/* If length is greater than zero, items must be non-NULL
and all items strings must be non-NULL */
Py_ssize_t length;
wchar_t **items;
} PyWideStringList;
PyAPI_FUNC(PyStatus) PyWideStringList_Append(PyWideStringList *list,
const wchar_t *item);
PyAPI_FUNC(PyStatus) PyWideStringList_Insert(PyWideStringList *list,
Py_ssize_t index,
const wchar_t *item);
/* --- PyPreConfig ----------------------------------------------- */
typedef struct PyPreConfig {
int _config_init; /* _PyConfigInitEnum value */
/* Parse Py_PreInitializeFromBytesArgs() arguments?
See PyConfig.parse_argv */
int parse_argv;
/* If greater than 0, enable isolated mode: sys.path contains
neither the script's directory nor the user's site-packages directory.
Set to 1 by the -I command line option. If set to -1 (default), inherit
Py_IsolatedFlag value. */
int isolated;
/* If greater than 0: use environment variables.
Set to 0 by -E command line option. If set to -1 (default), it is
set to !Py_IgnoreEnvironmentFlag. */
int use_environment;
/* Set the LC_CTYPE locale to the user preferred locale? If equals to 0,
set coerce_c_locale and coerce_c_locale_warn to 0. */
int configure_locale;
/* Coerce the LC_CTYPE locale if it's equal to "C"? (PEP 538)
Set to 0 by PYTHONCOERCECLOCALE=0. Set to 1 by PYTHONCOERCECLOCALE=1.
Set to 2 if the user preferred LC_CTYPE locale is "C".
If it is equal to 1, LC_CTYPE locale is read to decide if it should be
coerced or not (ex: PYTHONCOERCECLOCALE=1). Internally, it is set to 2
if the LC_CTYPE locale must be coerced.
Disable by default (set to 0). Set it to -1 to let Python decide if it
should be enabled or not. */
int coerce_c_locale;
/* Emit a warning if the LC_CTYPE locale is coerced?
Set to 1 by PYTHONCOERCECLOCALE=warn.
Disable by default (set to 0). Set it to -1 to let Python decide if it
should be enabled or not. */
int coerce_c_locale_warn;
#ifdef MS_WINDOWS
/* If greater than 1, use the "mbcs" encoding instead of the UTF-8
encoding for the filesystem encoding.
Set to 1 if the PYTHONLEGACYWINDOWSFSENCODING environment variable is
set to a non-empty string. If set to -1 (default), inherit
Py_LegacyWindowsFSEncodingFlag value.
See PEP 529 for more details. */
int legacy_windows_fs_encoding;
#endif
/* Enable UTF-8 mode? (PEP 540)
Disabled by default (equals to 0).
Set to 1 by "-X utf8" and "-X utf8=1" command line options.
Set to 1 by PYTHONUTF8=1 environment variable.
Set to 0 by "-X utf8=0" and PYTHONUTF8=0.
If equals to -1, it is set to 1 if the LC_CTYPE locale is "C" or
"POSIX", otherwise it is set to 0. Inherit Py_UTF8Mode value value. */
int utf8_mode;
/* If non-zero, enable the Python Development Mode.
Set to 1 by the -X dev command line option. Set by the PYTHONDEVMODE
environment variable. */
int dev_mode;
/* Memory allocator: PYTHONMALLOC env var.
See PyMemAllocatorName for valid values. */
int allocator;
} PyPreConfig;
PyAPI_FUNC(void) PyPreConfig_InitPythonConfig(PyPreConfig *config);
PyAPI_FUNC(void) PyPreConfig_InitIsolatedConfig(PyPreConfig *config);
/* --- PyConfig ---------------------------------------------- */
/* This structure is best documented in the Doc/c-api/init_config.rst file. */
typedef struct PyConfig {
int _config_init; /* _PyConfigInitEnum value */
int isolated;
int use_environment;
int dev_mode;
int install_signal_handlers;
int use_hash_seed;
unsigned long hash_seed;
int faulthandler;
int tracemalloc;
int perf_profiling;
int import_time;
int code_debug_ranges;
int show_ref_count;
int dump_refs;
wchar_t *dump_refs_file;
int malloc_stats;
wchar_t *filesystem_encoding;
wchar_t *filesystem_errors;
wchar_t *pycache_prefix;
int parse_argv;
PyWideStringList orig_argv;
PyWideStringList argv;
PyWideStringList xoptions;
PyWideStringList warnoptions;
int site_import;
int bytes_warning;
int warn_default_encoding;
int inspect;
int interactive;
int optimization_level;
int parser_debug;
int write_bytecode;
int verbose;
int quiet;
int user_site_directory;
int configure_c_stdio;
int buffered_stdio;
wchar_t *stdio_encoding;
wchar_t *stdio_errors;
#ifdef MS_WINDOWS
int legacy_windows_stdio;
#endif
wchar_t *check_hash_pycs_mode;
int use_frozen_modules;
int safe_path;
int int_max_str_digits;
int cpu_count;
#ifdef Py_GIL_DISABLED
int enable_gil;
#endif
/* --- Path configuration inputs ------------ */
int pathconfig_warnings;
wchar_t *program_name;
wchar_t *pythonpath_env;
wchar_t *home;
wchar_t *platlibdir;
/* --- Path configuration outputs ----------- */
int module_search_paths_set;
PyWideStringList module_search_paths;
wchar_t *stdlib_dir;
wchar_t *executable;
wchar_t *base_executable;
wchar_t *prefix;
wchar_t *base_prefix;
wchar_t *exec_prefix;
wchar_t *base_exec_prefix;
/* --- Parameter only used by Py_Main() ---------- */
int skip_source_first_line;
wchar_t *run_command;
wchar_t *run_module;
wchar_t *run_filename;
/* --- Set by Py_Main() -------------------------- */
wchar_t *sys_path_0;
/* --- Private fields ---------------------------- */
// Install importlib? If equals to 0, importlib is not initialized at all.
// Needed by freeze_importlib.
int _install_importlib;
// If equal to 0, stop Python initialization before the "main" phase.
int _init_main;
// If non-zero, we believe we're running from a source tree.
int _is_python_build;
#ifdef Py_STATS
// If non-zero, turns on statistics gathering.
int _pystats;
#endif
#ifdef Py_DEBUG
// If not empty, import a non-__main__ module before site.py is executed.
// PYTHON_PRESITE=package.module or -X presite=package.module
wchar_t *run_presite;
#endif
} PyConfig;
PyAPI_FUNC(void) PyConfig_InitPythonConfig(PyConfig *config);
PyAPI_FUNC(void) PyConfig_InitIsolatedConfig(PyConfig *config);
PyAPI_FUNC(void) PyConfig_Clear(PyConfig *);
PyAPI_FUNC(PyStatus) PyConfig_SetString(
PyConfig *config,
wchar_t **config_str,
const wchar_t *str);
PyAPI_FUNC(PyStatus) PyConfig_SetBytesString(
PyConfig *config,
wchar_t **config_str,
const char *str);
PyAPI_FUNC(PyStatus) PyConfig_Read(PyConfig *config);
PyAPI_FUNC(PyStatus) PyConfig_SetBytesArgv(
PyConfig *config,
Py_ssize_t argc,
char * const *argv);
PyAPI_FUNC(PyStatus) PyConfig_SetArgv(PyConfig *config,
Py_ssize_t argc,
wchar_t * const *argv);
PyAPI_FUNC(PyStatus) PyConfig_SetWideStringList(PyConfig *config,
PyWideStringList *list,
Py_ssize_t length, wchar_t **items);
/* --- Helper functions --------------------------------------- */
/* Get the original command line arguments, before Python modified them.
See also PyConfig.orig_argv. */
PyAPI_FUNC(void) Py_GetArgcArgv(int *argc, wchar_t ***argv);
#ifdef __cplusplus
}
#endif
#endif /* !Py_LIMITED_API */
#endif /* !Py_PYCORECONFIG_H */

View File

@@ -0,0 +1,53 @@
#ifndef Py_CPYTHON_LISTOBJECT_H
# error "this header file must not be included directly"
#endif
typedef struct {
PyObject_VAR_HEAD
/* Vector of pointers to list elements. list[0] is ob_item[0], etc. */
PyObject **ob_item;
/* ob_item contains space for 'allocated' elements. The number
* currently in use is ob_size.
* Invariants:
* 0 <= ob_size <= allocated
* len(list) == ob_size
* ob_item == NULL implies ob_size == allocated == 0
* list.sort() temporarily sets allocated to -1 to detect mutations.
*
* Items must normally not be NULL, except during construction when
* the list is not yet visible outside the function that builds it.
*/
Py_ssize_t allocated;
} PyListObject;
/* Cast argument to PyListObject* type. */
#define _PyList_CAST(op) \
(assert(PyList_Check(op)), _Py_CAST(PyListObject*, (op)))
// Macros and static inline functions, trading safety for speed
static inline Py_ssize_t PyList_GET_SIZE(PyObject *op) {
PyListObject *list = _PyList_CAST(op);
#ifdef Py_GIL_DISABLED
return _Py_atomic_load_ssize_relaxed(&(_PyVarObject_CAST(list)->ob_size));
#else
return Py_SIZE(list);
#endif
}
#define PyList_GET_SIZE(op) PyList_GET_SIZE(_PyObject_CAST(op))
#define PyList_GET_ITEM(op, index) (_PyList_CAST(op)->ob_item[(index)])
static inline void
PyList_SET_ITEM(PyObject *op, Py_ssize_t index, PyObject *value) {
PyListObject *list = _PyList_CAST(op);
assert(0 <= index);
assert(index < list->allocated);
list->ob_item[index] = value;
}
#define PyList_SET_ITEM(op, index, value) \
PyList_SET_ITEM(_PyObject_CAST(op), (index), _PyObject_CAST(value))
PyAPI_FUNC(int) PyList_Extend(PyObject *self, PyObject *iterable);
PyAPI_FUNC(int) PyList_Clear(PyObject *self);

View File

@@ -0,0 +1,63 @@
#ifndef Py_CPYTHON_LOCK_H
# error "this header file must not be included directly"
#endif
#define _Py_UNLOCKED 0
#define _Py_LOCKED 1
// A mutex that occupies one byte. The lock can be zero initialized to
// represent the unlocked state.
//
// Typical initialization:
// PyMutex m = (PyMutex){0};
//
// Or initialize as global variables:
// static PyMutex m;
//
// Typical usage:
// PyMutex_Lock(&m);
// ...
// PyMutex_Unlock(&m);
//
// The contents of the PyMutex are not part of the public API, but are
// described to aid in understanding the implementation and debugging. Only
// the two least significant bits are used. The remaining bits are always zero:
// 0b00: unlocked
// 0b01: locked
// 0b10: unlocked and has parked threads
// 0b11: locked and has parked threads
typedef struct PyMutex {
uint8_t _bits; // (private)
} PyMutex;
// exported function for locking the mutex
PyAPI_FUNC(void) PyMutex_Lock(PyMutex *m);
// exported function for unlocking the mutex
PyAPI_FUNC(void) PyMutex_Unlock(PyMutex *m);
// Locks the mutex.
//
// If the mutex is currently locked, the calling thread will be parked until
// the mutex is unlocked. If the current thread holds the GIL, then the GIL
// will be released while the thread is parked.
static inline void
_PyMutex_Lock(PyMutex *m)
{
uint8_t expected = _Py_UNLOCKED;
if (!_Py_atomic_compare_exchange_uint8(&m->_bits, &expected, _Py_LOCKED)) {
PyMutex_Lock(m);
}
}
#define PyMutex_Lock _PyMutex_Lock
// Unlocks the mutex.
static inline void
_PyMutex_Unlock(PyMutex *m)
{
uint8_t expected = _Py_LOCKED;
if (!_Py_atomic_compare_exchange_uint8(&m->_bits, &expected, _Py_UNLOCKED)) {
PyMutex_Unlock(m);
}
}
#define PyMutex_Unlock _PyMutex_Unlock

View File

@@ -0,0 +1,146 @@
#ifndef Py_LIMITED_API
#ifndef Py_LONGINTREPR_H
#define Py_LONGINTREPR_H
#ifdef __cplusplus
extern "C" {
#endif
/* This is published for the benefit of "friends" marshal.c and _decimal.c. */
/* Parameters of the integer representation. There are two different
sets of parameters: one set for 30-bit digits, stored in an unsigned 32-bit
integer type, and one set for 15-bit digits with each digit stored in an
unsigned short. The value of PYLONG_BITS_IN_DIGIT, defined either at
configure time or in pyport.h, is used to decide which digit size to use.
Type 'digit' should be able to hold 2*PyLong_BASE-1, and type 'twodigits'
should be an unsigned integer type able to hold all integers up to
PyLong_BASE*PyLong_BASE-1. x_sub assumes that 'digit' is an unsigned type,
and that overflow is handled by taking the result modulo 2**N for some N >
PyLong_SHIFT. The majority of the code doesn't care about the precise
value of PyLong_SHIFT, but there are some notable exceptions:
- PyLong_{As,From}ByteArray require that PyLong_SHIFT be at least 8
- long_hash() requires that PyLong_SHIFT is *strictly* less than the number
of bits in an unsigned long, as do the PyLong <-> long (or unsigned long)
conversion functions
- the Python int <-> size_t/Py_ssize_t conversion functions expect that
PyLong_SHIFT is strictly less than the number of bits in a size_t
- the marshal code currently expects that PyLong_SHIFT is a multiple of 15
- NSMALLNEGINTS and NSMALLPOSINTS should be small enough to fit in a single
digit; with the current values this forces PyLong_SHIFT >= 9
The values 15 and 30 should fit all of the above requirements, on any
platform.
*/
#if PYLONG_BITS_IN_DIGIT == 30
typedef uint32_t digit;
typedef int32_t sdigit; /* signed variant of digit */
typedef uint64_t twodigits;
typedef int64_t stwodigits; /* signed variant of twodigits */
#define PyLong_SHIFT 30
#define _PyLong_DECIMAL_SHIFT 9 /* max(e such that 10**e fits in a digit) */
#define _PyLong_DECIMAL_BASE ((digit)1000000000) /* 10 ** DECIMAL_SHIFT */
#elif PYLONG_BITS_IN_DIGIT == 15
typedef unsigned short digit;
typedef short sdigit; /* signed variant of digit */
typedef unsigned long twodigits;
typedef long stwodigits; /* signed variant of twodigits */
#define PyLong_SHIFT 15
#define _PyLong_DECIMAL_SHIFT 4 /* max(e such that 10**e fits in a digit) */
#define _PyLong_DECIMAL_BASE ((digit)10000) /* 10 ** DECIMAL_SHIFT */
#else
#error "PYLONG_BITS_IN_DIGIT should be 15 or 30"
#endif
#define PyLong_BASE ((digit)1 << PyLong_SHIFT)
#define PyLong_MASK ((digit)(PyLong_BASE - 1))
/* Long integer representation.
Long integers are made up of a number of 30- or 15-bit digits, depending on
the platform. The number of digits (ndigits) is stored in the high bits of
the lv_tag field (lvtag >> _PyLong_NON_SIZE_BITS).
The absolute value of a number is equal to
SUM(for i=0 through ndigits-1) ob_digit[i] * 2**(PyLong_SHIFT*i)
The sign of the value is stored in the lower 2 bits of lv_tag.
- 0: Positive
- 1: Zero
- 2: Negative
The third lowest bit of lv_tag is reserved for an immortality flag, but is
not currently used.
In a normalized number, ob_digit[ndigits-1] (the most significant
digit) is never zero. Also, in all cases, for all valid i,
0 <= ob_digit[i] <= PyLong_MASK.
The allocation function takes care of allocating extra memory
so that ob_digit[0] ... ob_digit[ndigits-1] are actually available.
We always allocate memory for at least one digit, so accessing ob_digit[0]
is always safe. However, in the case ndigits == 0, the contents of
ob_digit[0] may be undefined.
*/
typedef struct _PyLongValue {
uintptr_t lv_tag; /* Number of digits, sign and flags */
digit ob_digit[1];
} _PyLongValue;
struct _longobject {
PyObject_HEAD
_PyLongValue long_value;
};
PyAPI_FUNC(PyLongObject*) _PyLong_New(Py_ssize_t);
// Return a copy of src.
PyAPI_FUNC(PyObject*) _PyLong_Copy(PyLongObject *src);
PyAPI_FUNC(PyLongObject*) _PyLong_FromDigits(
int negative,
Py_ssize_t digit_count,
digit *digits);
/* Inline some internals for speed. These should be in pycore_long.h
* if user code didn't need them inlined. */
#define _PyLong_SIGN_MASK 3
#define _PyLong_NON_SIZE_BITS 3
static inline int
_PyLong_IsCompact(const PyLongObject* op) {
assert(PyType_HasFeature((op)->ob_base.ob_type, Py_TPFLAGS_LONG_SUBCLASS));
return op->long_value.lv_tag < (2 << _PyLong_NON_SIZE_BITS);
}
#define PyUnstable_Long_IsCompact _PyLong_IsCompact
static inline Py_ssize_t
_PyLong_CompactValue(const PyLongObject *op)
{
Py_ssize_t sign;
assert(PyType_HasFeature((op)->ob_base.ob_type, Py_TPFLAGS_LONG_SUBCLASS));
assert(PyUnstable_Long_IsCompact(op));
sign = 1 - (op->long_value.lv_tag & _PyLong_SIGN_MASK);
return sign * (Py_ssize_t)op->long_value.ob_digit[0];
}
#define PyUnstable_Long_CompactValue _PyLong_CompactValue
#ifdef __cplusplus
}
#endif
#endif /* !Py_LONGINTREPR_H */
#endif /* Py_LIMITED_API */

View File

@@ -0,0 +1,116 @@
#ifndef Py_CPYTHON_LONGOBJECT_H
# error "this header file must not be included directly"
#endif
PyAPI_FUNC(PyObject*) PyLong_FromUnicodeObject(PyObject *u, int base);
#define Py_ASNATIVEBYTES_DEFAULTS -1
#define Py_ASNATIVEBYTES_BIG_ENDIAN 0
#define Py_ASNATIVEBYTES_LITTLE_ENDIAN 1
#define Py_ASNATIVEBYTES_NATIVE_ENDIAN 3
#define Py_ASNATIVEBYTES_UNSIGNED_BUFFER 4
#define Py_ASNATIVEBYTES_REJECT_NEGATIVE 8
#define Py_ASNATIVEBYTES_ALLOW_INDEX 16
/* PyLong_AsNativeBytes: Copy the integer value to a native variable.
buffer points to the first byte of the variable.
n_bytes is the number of bytes available in the buffer. Pass 0 to request
the required size for the value.
flags is a bitfield of the following flags:
* 1 - little endian
* 2 - native endian
* 4 - unsigned destination (e.g. don't reject copying 255 into one byte)
* 8 - raise an exception for negative inputs
* 16 - call __index__ on non-int types
If flags is -1 (all bits set), native endian is used, value truncation
behaves most like C (allows negative inputs and allow MSB set), and non-int
objects will raise a TypeError.
Big endian mode will write the most significant byte into the address
directly referenced by buffer; little endian will write the least significant
byte into that address.
If an exception is raised, returns a negative value.
Otherwise, returns the number of bytes that are required to store the value.
To check that the full value is represented, ensure that the return value is
equal or less than n_bytes.
All n_bytes are guaranteed to be written (unless an exception occurs), and
so ignoring a positive return value is the equivalent of a downcast in C.
In cases where the full value could not be represented, the returned value
may be larger than necessary - this function is not an accurate way to
calculate the bit length of an integer object.
*/
PyAPI_FUNC(Py_ssize_t) PyLong_AsNativeBytes(PyObject* v, void* buffer,
Py_ssize_t n_bytes, int flags);
/* PyLong_FromNativeBytes: Create an int value from a native integer
n_bytes is the number of bytes to read from the buffer. Passing 0 will
always produce the zero int.
PyLong_FromUnsignedNativeBytes always produces a non-negative int.
flags is the same as for PyLong_AsNativeBytes, but only supports selecting
the endianness or forcing an unsigned buffer.
Returns the int object, or NULL with an exception set. */
PyAPI_FUNC(PyObject*) PyLong_FromNativeBytes(const void* buffer, size_t n_bytes,
int flags);
PyAPI_FUNC(PyObject*) PyLong_FromUnsignedNativeBytes(const void* buffer,
size_t n_bytes, int flags);
PyAPI_FUNC(int) PyUnstable_Long_IsCompact(const PyLongObject* op);
PyAPI_FUNC(Py_ssize_t) PyUnstable_Long_CompactValue(const PyLongObject* op);
// _PyLong_Sign. Return 0 if v is 0, -1 if v < 0, +1 if v > 0.
// v must not be NULL, and must be a normalized long.
// There are no error cases.
PyAPI_FUNC(int) _PyLong_Sign(PyObject *v);
/* _PyLong_NumBits. Return the number of bits needed to represent the
absolute value of a long. For example, this returns 1 for 1 and -1, 2
for 2 and -2, and 2 for 3 and -3. It returns 0 for 0.
v must not be NULL, and must be a normalized long.
(size_t)-1 is returned and OverflowError set if the true result doesn't
fit in a size_t.
*/
PyAPI_FUNC(size_t) _PyLong_NumBits(PyObject *v);
/* _PyLong_FromByteArray: View the n unsigned bytes as a binary integer in
base 256, and return a Python int with the same numeric value.
If n is 0, the integer is 0. Else:
If little_endian is 1/true, bytes[n-1] is the MSB and bytes[0] the LSB;
else (little_endian is 0/false) bytes[0] is the MSB and bytes[n-1] the
LSB.
If is_signed is 0/false, view the bytes as a non-negative integer.
If is_signed is 1/true, view the bytes as a 2's-complement integer,
non-negative if bit 0x80 of the MSB is clear, negative if set.
Error returns:
+ Return NULL with the appropriate exception set if there's not
enough memory to create the Python int.
*/
PyAPI_FUNC(PyObject *) _PyLong_FromByteArray(
const unsigned char* bytes, size_t n,
int little_endian, int is_signed);
/* _PyLong_AsByteArray: Convert the least-significant 8*n bits of long
v to a base-256 integer, stored in array bytes. Normally return 0,
return -1 on error.
If little_endian is 1/true, store the MSB at bytes[n-1] and the LSB at
bytes[0]; else (little_endian is 0/false) store the MSB at bytes[0] and
the LSB at bytes[n-1].
If is_signed is 0/false, it's an error if v < 0; else (v >= 0) n bytes
are filled and there's nothing special about bit 0x80 of the MSB.
If is_signed is 1/true, bytes is filled with the 2's-complement
representation of v's value. Bit 0x80 of the MSB is the sign bit.
Error returns (-1):
+ is_signed is 0 and v < 0. TypeError is set in this case, and bytes
isn't altered.
+ n isn't big enough to hold the full mathematical value of v. For
example, if is_signed is 0 and there are more digits in the v than
fit in n; or if is_signed is 1, v < 0, and n is just 1 bit shy of
being large enough to hold a sign bit. OverflowError is set in this
case, but bytes holds the least-significant n bytes of the true value.
*/
PyAPI_FUNC(int) _PyLong_AsByteArray(PyLongObject* v,
unsigned char* bytes, size_t n,
int little_endian, int is_signed, int with_exceptions);
/* For use by the gcd function in mathmodule.c */
PyAPI_FUNC(PyObject *) _PyLong_GCD(PyObject *, PyObject *);

View File

@@ -0,0 +1,50 @@
#ifndef Py_CPYTHON_MEMORYOBJECT_H
# error "this header file must not be included directly"
#endif
/* The structs are declared here so that macros can work, but they shouldn't
be considered public. Don't access their fields directly, use the macros
and functions instead! */
#define _Py_MANAGED_BUFFER_RELEASED 0x001 /* access to exporter blocked */
#define _Py_MANAGED_BUFFER_FREE_FORMAT 0x002 /* free format */
typedef struct {
PyObject_HEAD
int flags; /* state flags */
Py_ssize_t exports; /* number of direct memoryview exports */
Py_buffer master; /* snapshot buffer obtained from the original exporter */
} _PyManagedBufferObject;
/* memoryview state flags */
#define _Py_MEMORYVIEW_RELEASED 0x001 /* access to master buffer blocked */
#define _Py_MEMORYVIEW_C 0x002 /* C-contiguous layout */
#define _Py_MEMORYVIEW_FORTRAN 0x004 /* Fortran contiguous layout */
#define _Py_MEMORYVIEW_SCALAR 0x008 /* scalar: ndim = 0 */
#define _Py_MEMORYVIEW_PIL 0x010 /* PIL-style layout */
#define _Py_MEMORYVIEW_RESTRICTED 0x020 /* Disallow new references to the memoryview's buffer */
typedef struct {
PyObject_VAR_HEAD
_PyManagedBufferObject *mbuf; /* managed buffer */
Py_hash_t hash; /* hash value for read-only views */
int flags; /* state flags */
Py_ssize_t exports; /* number of buffer re-exports */
Py_buffer view; /* private copy of the exporter's view */
PyObject *weakreflist;
Py_ssize_t ob_array[1]; /* shape, strides, suboffsets */
} PyMemoryViewObject;
#define _PyMemoryView_CAST(op) _Py_CAST(PyMemoryViewObject*, op)
/* Get a pointer to the memoryview's private copy of the exporter's buffer. */
static inline Py_buffer* PyMemoryView_GET_BUFFER(PyObject *op) {
return (&_PyMemoryView_CAST(op)->view);
}
#define PyMemoryView_GET_BUFFER(op) PyMemoryView_GET_BUFFER(_PyObject_CAST(op))
/* Get a pointer to the exporting object (this may be NULL!). */
static inline PyObject* PyMemoryView_GET_BASE(PyObject *op) {
return _PyMemoryView_CAST(op)->view.obj;
}
#define PyMemoryView_GET_BASE(op) PyMemoryView_GET_BASE(_PyObject_CAST(op))

View File

@@ -0,0 +1,66 @@
#ifndef Py_CPYTHON_METHODOBJECT_H
# error "this header file must not be included directly"
#endif
// PyCFunctionObject structure
typedef struct {
PyObject_HEAD
PyMethodDef *m_ml; /* Description of the C function to call */
PyObject *m_self; /* Passed as 'self' arg to the C func, can be NULL */
PyObject *m_module; /* The __module__ attribute, can be anything */
PyObject *m_weakreflist; /* List of weak references */
vectorcallfunc vectorcall;
} PyCFunctionObject;
#define _PyCFunctionObject_CAST(func) \
(assert(PyCFunction_Check(func)), \
_Py_CAST(PyCFunctionObject*, (func)))
// PyCMethodObject structure
typedef struct {
PyCFunctionObject func;
PyTypeObject *mm_class; /* Class that defines this method */
} PyCMethodObject;
#define _PyCMethodObject_CAST(func) \
(assert(PyCMethod_Check(func)), \
_Py_CAST(PyCMethodObject*, (func)))
PyAPI_DATA(PyTypeObject) PyCMethod_Type;
#define PyCMethod_CheckExact(op) Py_IS_TYPE((op), &PyCMethod_Type)
#define PyCMethod_Check(op) PyObject_TypeCheck((op), &PyCMethod_Type)
/* Static inline functions for direct access to these values.
Type checks are *not* done, so use with care. */
static inline PyCFunction PyCFunction_GET_FUNCTION(PyObject *func) {
return _PyCFunctionObject_CAST(func)->m_ml->ml_meth;
}
#define PyCFunction_GET_FUNCTION(func) PyCFunction_GET_FUNCTION(_PyObject_CAST(func))
static inline PyObject* PyCFunction_GET_SELF(PyObject *func_obj) {
PyCFunctionObject *func = _PyCFunctionObject_CAST(func_obj);
if (func->m_ml->ml_flags & METH_STATIC) {
return _Py_NULL;
}
return func->m_self;
}
#define PyCFunction_GET_SELF(func) PyCFunction_GET_SELF(_PyObject_CAST(func))
static inline int PyCFunction_GET_FLAGS(PyObject *func) {
return _PyCFunctionObject_CAST(func)->m_ml->ml_flags;
}
#define PyCFunction_GET_FLAGS(func) PyCFunction_GET_FLAGS(_PyObject_CAST(func))
static inline PyTypeObject* PyCFunction_GET_CLASS(PyObject *func_obj) {
PyCFunctionObject *func = _PyCFunctionObject_CAST(func_obj);
if (func->m_ml->ml_flags & METH_METHOD) {
return _PyCMethodObject_CAST(func)->mm_class;
}
return _Py_NULL;
}
#define PyCFunction_GET_CLASS(func) PyCFunction_GET_CLASS(_PyObject_CAST(func))

View File

@@ -0,0 +1,26 @@
#ifndef Py_CPYTHON_MODSUPPORT_H
# error "this header file must not be included directly"
#endif
// A data structure that can be used to run initialization code once in a
// thread-safe manner. The C++11 equivalent is std::call_once.
typedef struct {
uint8_t v;
} _PyOnceFlag;
typedef struct _PyArg_Parser {
const char *format;
const char * const *keywords;
const char *fname;
const char *custom_msg;
_PyOnceFlag once; /* atomic one-time initialization flag */
int is_kwtuple_owned; /* does this parser own the kwtuple object? */
int pos; /* number of positional-only arguments */
int min; /* minimal number of arguments */
int max; /* maximal number of positional arguments */
PyObject *kwtuple; /* tuple of keyword parameter names */
struct _PyArg_Parser *next;
} _PyArg_Parser;
PyAPI_FUNC(int) _PyArg_ParseTupleAndKeywordsFast(PyObject *, PyObject *,
struct _PyArg_Parser *, ...);

View File

@@ -0,0 +1,250 @@
#ifndef Py_CPYTHON_MONITORING_H
# error "this header file must not be included directly"
#endif
/* Local events.
* These require bytecode instrumentation */
#define PY_MONITORING_EVENT_PY_START 0
#define PY_MONITORING_EVENT_PY_RESUME 1
#define PY_MONITORING_EVENT_PY_RETURN 2
#define PY_MONITORING_EVENT_PY_YIELD 3
#define PY_MONITORING_EVENT_CALL 4
#define PY_MONITORING_EVENT_LINE 5
#define PY_MONITORING_EVENT_INSTRUCTION 6
#define PY_MONITORING_EVENT_JUMP 7
#define PY_MONITORING_EVENT_BRANCH 8
#define PY_MONITORING_EVENT_STOP_ITERATION 9
#define PY_MONITORING_IS_INSTRUMENTED_EVENT(ev) \
((ev) < _PY_MONITORING_LOCAL_EVENTS)
/* Other events, mainly exceptions */
#define PY_MONITORING_EVENT_RAISE 10
#define PY_MONITORING_EVENT_EXCEPTION_HANDLED 11
#define PY_MONITORING_EVENT_PY_UNWIND 12
#define PY_MONITORING_EVENT_PY_THROW 13
#define PY_MONITORING_EVENT_RERAISE 14
/* Ancillary events */
#define PY_MONITORING_EVENT_C_RETURN 15
#define PY_MONITORING_EVENT_C_RAISE 16
typedef struct _PyMonitoringState {
uint8_t active;
uint8_t opaque;
} PyMonitoringState;
PyAPI_FUNC(int)
PyMonitoring_EnterScope(PyMonitoringState *state_array, uint64_t *version,
const uint8_t *event_types, Py_ssize_t length);
PyAPI_FUNC(int)
PyMonitoring_ExitScope(void);
PyAPI_FUNC(int)
_PyMonitoring_FirePyStartEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FirePyResumeEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FirePyReturnEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *retval);
PyAPI_FUNC(int)
_PyMonitoring_FirePyYieldEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *retval);
PyAPI_FUNC(int)
_PyMonitoring_FireCallEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject* callable, PyObject *arg0);
PyAPI_FUNC(int)
_PyMonitoring_FireLineEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
int lineno);
PyAPI_FUNC(int)
_PyMonitoring_FireJumpEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *target_offset);
PyAPI_FUNC(int)
_PyMonitoring_FireBranchEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *target_offset);
PyAPI_FUNC(int)
_PyMonitoring_FireCReturnEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *retval);
PyAPI_FUNC(int)
_PyMonitoring_FirePyThrowEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FireRaiseEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FireReraiseEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FireExceptionHandledEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FireCRaiseEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FirePyUnwindEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FireStopIterationEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset, PyObject *value);
#define _PYMONITORING_IF_ACTIVE(STATE, X) \
if ((STATE)->active) { \
return (X); \
} \
else { \
return 0; \
}
static inline int
PyMonitoring_FirePyStartEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FirePyStartEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FirePyResumeEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FirePyResumeEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FirePyReturnEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *retval)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FirePyReturnEvent(state, codelike, offset, retval));
}
static inline int
PyMonitoring_FirePyYieldEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *retval)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FirePyYieldEvent(state, codelike, offset, retval));
}
static inline int
PyMonitoring_FireCallEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject* callable, PyObject *arg0)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireCallEvent(state, codelike, offset, callable, arg0));
}
static inline int
PyMonitoring_FireLineEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
int lineno)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireLineEvent(state, codelike, offset, lineno));
}
static inline int
PyMonitoring_FireJumpEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *target_offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireJumpEvent(state, codelike, offset, target_offset));
}
static inline int
PyMonitoring_FireBranchEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *target_offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireBranchEvent(state, codelike, offset, target_offset));
}
static inline int
PyMonitoring_FireCReturnEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *retval)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireCReturnEvent(state, codelike, offset, retval));
}
static inline int
PyMonitoring_FirePyThrowEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FirePyThrowEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FireRaiseEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireRaiseEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FireReraiseEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireReraiseEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FireExceptionHandledEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireExceptionHandledEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FireCRaiseEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireCRaiseEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FirePyUnwindEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FirePyUnwindEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FireStopIterationEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset, PyObject *value)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireStopIterationEvent(state, codelike, offset, value));
}
#undef _PYMONITORING_IF_ACTIVE

View File

@@ -0,0 +1,525 @@
#ifndef Py_CPYTHON_OBJECT_H
# error "this header file must not be included directly"
#endif
PyAPI_FUNC(void) _Py_NewReference(PyObject *op);
PyAPI_FUNC(void) _Py_NewReferenceNoTotal(PyObject *op);
PyAPI_FUNC(void) _Py_ResurrectReference(PyObject *op);
#ifdef Py_REF_DEBUG
/* These are useful as debugging aids when chasing down refleaks. */
PyAPI_FUNC(Py_ssize_t) _Py_GetGlobalRefTotal(void);
# define _Py_GetRefTotal() _Py_GetGlobalRefTotal()
PyAPI_FUNC(Py_ssize_t) _Py_GetLegacyRefTotal(void);
PyAPI_FUNC(Py_ssize_t) _PyInterpreterState_GetRefTotal(PyInterpreterState *);
#endif
/********************* String Literals ****************************************/
/* This structure helps managing static strings. The basic usage goes like this:
Instead of doing
r = PyObject_CallMethod(o, "foo", "args", ...);
do
_Py_IDENTIFIER(foo);
...
r = _PyObject_CallMethodId(o, &PyId_foo, "args", ...);
PyId_foo is a static variable, either on block level or file level. On first
usage, the string "foo" is interned, and the structures are linked. On interpreter
shutdown, all strings are released.
Alternatively, _Py_static_string allows choosing the variable name.
_PyUnicode_FromId returns a borrowed reference to the interned string.
_PyObject_{Get,Set,Has}AttrId are __getattr__ versions using _Py_Identifier*.
*/
typedef struct _Py_Identifier {
const char* string;
// Index in PyInterpreterState.unicode.ids.array. It is process-wide
// unique and must be initialized to -1.
Py_ssize_t index;
// Hidden PyMutex struct for non free-threaded build.
struct {
uint8_t v;
} mutex;
} _Py_Identifier;
#ifndef Py_BUILD_CORE
// For now we are keeping _Py_IDENTIFIER for continued use
// in non-builtin extensions (and naughty PyPI modules).
#define _Py_static_string_init(value) { .string = (value), .index = -1 }
#define _Py_static_string(varname, value) static _Py_Identifier varname = _Py_static_string_init(value)
#define _Py_IDENTIFIER(varname) _Py_static_string(PyId_##varname, #varname)
#endif /* !Py_BUILD_CORE */
typedef struct {
/* Number implementations must check *both*
arguments for proper type and implement the necessary conversions
in the slot functions themselves. */
binaryfunc nb_add;
binaryfunc nb_subtract;
binaryfunc nb_multiply;
binaryfunc nb_remainder;
binaryfunc nb_divmod;
ternaryfunc nb_power;
unaryfunc nb_negative;
unaryfunc nb_positive;
unaryfunc nb_absolute;
inquiry nb_bool;
unaryfunc nb_invert;
binaryfunc nb_lshift;
binaryfunc nb_rshift;
binaryfunc nb_and;
binaryfunc nb_xor;
binaryfunc nb_or;
unaryfunc nb_int;
void *nb_reserved; /* the slot formerly known as nb_long */
unaryfunc nb_float;
binaryfunc nb_inplace_add;
binaryfunc nb_inplace_subtract;
binaryfunc nb_inplace_multiply;
binaryfunc nb_inplace_remainder;
ternaryfunc nb_inplace_power;
binaryfunc nb_inplace_lshift;
binaryfunc nb_inplace_rshift;
binaryfunc nb_inplace_and;
binaryfunc nb_inplace_xor;
binaryfunc nb_inplace_or;
binaryfunc nb_floor_divide;
binaryfunc nb_true_divide;
binaryfunc nb_inplace_floor_divide;
binaryfunc nb_inplace_true_divide;
unaryfunc nb_index;
binaryfunc nb_matrix_multiply;
binaryfunc nb_inplace_matrix_multiply;
} PyNumberMethods;
typedef struct {
lenfunc sq_length;
binaryfunc sq_concat;
ssizeargfunc sq_repeat;
ssizeargfunc sq_item;
void *was_sq_slice;
ssizeobjargproc sq_ass_item;
void *was_sq_ass_slice;
objobjproc sq_contains;
binaryfunc sq_inplace_concat;
ssizeargfunc sq_inplace_repeat;
} PySequenceMethods;
typedef struct {
lenfunc mp_length;
binaryfunc mp_subscript;
objobjargproc mp_ass_subscript;
} PyMappingMethods;
typedef PySendResult (*sendfunc)(PyObject *iter, PyObject *value, PyObject **result);
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
sendfunc am_send;
} PyAsyncMethods;
typedef struct {
getbufferproc bf_getbuffer;
releasebufferproc bf_releasebuffer;
} PyBufferProcs;
/* Allow printfunc in the tp_vectorcall_offset slot for
* backwards-compatibility */
typedef Py_ssize_t printfunc;
// If this structure is modified, Doc/includes/typestruct.h should be updated
// as well.
struct _typeobject {
PyObject_VAR_HEAD
const char *tp_name; /* For printing, in format "<module>.<name>" */
Py_ssize_t tp_basicsize, tp_itemsize; /* For allocation */
/* Methods to implement standard operations */
destructor tp_dealloc;
Py_ssize_t tp_vectorcall_offset;
getattrfunc tp_getattr;
setattrfunc tp_setattr;
PyAsyncMethods *tp_as_async; /* formerly known as tp_compare (Python 2)
or tp_reserved (Python 3) */
reprfunc tp_repr;
/* Method suites for standard classes */
PyNumberMethods *tp_as_number;
PySequenceMethods *tp_as_sequence;
PyMappingMethods *tp_as_mapping;
/* More standard operations (here for binary compatibility) */
hashfunc tp_hash;
ternaryfunc tp_call;
reprfunc tp_str;
getattrofunc tp_getattro;
setattrofunc tp_setattro;
/* Functions to access object as input/output buffer */
PyBufferProcs *tp_as_buffer;
/* Flags to define presence of optional/expanded features */
unsigned long tp_flags;
const char *tp_doc; /* Documentation string */
/* Assigned meaning in release 2.0 */
/* call function for all accessible objects */
traverseproc tp_traverse;
/* delete references to contained objects */
inquiry tp_clear;
/* Assigned meaning in release 2.1 */
/* rich comparisons */
richcmpfunc tp_richcompare;
/* weak reference enabler */
Py_ssize_t tp_weaklistoffset;
/* Iterators */
getiterfunc tp_iter;
iternextfunc tp_iternext;
/* Attribute descriptor and subclassing stuff */
PyMethodDef *tp_methods;
PyMemberDef *tp_members;
PyGetSetDef *tp_getset;
// Strong reference on a heap type, borrowed reference on a static type
PyTypeObject *tp_base;
PyObject *tp_dict;
descrgetfunc tp_descr_get;
descrsetfunc tp_descr_set;
Py_ssize_t tp_dictoffset;
initproc tp_init;
allocfunc tp_alloc;
newfunc tp_new;
freefunc tp_free; /* Low-level free-memory routine */
inquiry tp_is_gc; /* For PyObject_IS_GC */
PyObject *tp_bases;
PyObject *tp_mro; /* method resolution order */
PyObject *tp_cache; /* no longer used */
void *tp_subclasses; /* for static builtin types this is an index */
PyObject *tp_weaklist; /* not used for static builtin types */
destructor tp_del;
/* Type attribute cache version tag. Added in version 2.6 */
unsigned int tp_version_tag;
destructor tp_finalize;
vectorcallfunc tp_vectorcall;
/* bitset of which type-watchers care about this type */
unsigned char tp_watched;
uint16_t tp_versions_used;
};
/* This struct is used by the specializer
* It should be treated as an opaque blob
* by code other than the specializer and interpreter. */
struct _specialization_cache {
// In order to avoid bloating the bytecode with lots of inline caches, the
// members of this structure have a somewhat unique contract. They are set
// by the specialization machinery, and are invalidated by PyType_Modified.
// The rules for using them are as follows:
// - If getitem is non-NULL, then it is the same Python function that
// PyType_Lookup(cls, "__getitem__") would return.
// - If getitem is NULL, then getitem_version is meaningless.
// - If getitem->func_version == getitem_version, then getitem can be called
// with two positional arguments and no keyword arguments, and has neither
// *args nor **kwargs (as required by BINARY_SUBSCR_GETITEM):
PyObject *getitem;
uint32_t getitem_version;
PyObject *init;
};
/* The *real* layout of a type object when allocated on the heap */
typedef struct _heaptypeobject {
/* Note: there's a dependency on the order of these members
in slotptr() in typeobject.c . */
PyTypeObject ht_type;
PyAsyncMethods as_async;
PyNumberMethods as_number;
PyMappingMethods as_mapping;
PySequenceMethods as_sequence; /* as_sequence comes after as_mapping,
so that the mapping wins when both
the mapping and the sequence define
a given operator (e.g. __getitem__).
see add_operators() in typeobject.c . */
PyBufferProcs as_buffer;
PyObject *ht_name, *ht_slots, *ht_qualname;
struct _dictkeysobject *ht_cached_keys;
PyObject *ht_module;
char *_ht_tpname; // Storage for "tp_name"; see PyType_FromModuleAndSpec
struct _specialization_cache _spec_cache; // For use by the specializer.
/* here are optional user slots, followed by the members. */
} PyHeapTypeObject;
PyAPI_FUNC(const char *) _PyType_Name(PyTypeObject *);
PyAPI_FUNC(PyObject *) _PyType_Lookup(PyTypeObject *, PyObject *);
PyAPI_FUNC(PyObject *) _PyType_LookupRef(PyTypeObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyType_GetDict(PyTypeObject *);
PyAPI_FUNC(int) PyObject_Print(PyObject *, FILE *, int);
PyAPI_FUNC(void) _Py_BreakPoint(void);
PyAPI_FUNC(void) _PyObject_Dump(PyObject *);
PyAPI_FUNC(PyObject*) _PyObject_GetAttrId(PyObject *, _Py_Identifier *);
PyAPI_FUNC(PyObject **) _PyObject_GetDictPtr(PyObject *);
PyAPI_FUNC(void) PyObject_CallFinalizer(PyObject *);
PyAPI_FUNC(int) PyObject_CallFinalizerFromDealloc(PyObject *);
PyAPI_FUNC(void) PyUnstable_Object_ClearWeakRefsNoCallbacks(PyObject *);
/* Same as PyObject_Generic{Get,Set}Attr, but passing the attributes
dict as the last parameter. */
PyAPI_FUNC(PyObject *)
_PyObject_GenericGetAttrWithDict(PyObject *, PyObject *, PyObject *, int);
PyAPI_FUNC(int)
_PyObject_GenericSetAttrWithDict(PyObject *, PyObject *,
PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) _PyObject_FunctionStr(PyObject *);
/* Safely decref `dst` and set `dst` to `src`.
*
* As in case of Py_CLEAR "the obvious" code can be deadly:
*
* Py_DECREF(dst);
* dst = src;
*
* The safe way is:
*
* Py_SETREF(dst, src);
*
* That arranges to set `dst` to `src` _before_ decref'ing, so that any code
* triggered as a side-effect of `dst` getting torn down no longer believes
* `dst` points to a valid object.
*
* Temporary variables are used to only evalutate macro arguments once and so
* avoid the duplication of side effects. _Py_TYPEOF() or memcpy() is used to
* avoid a miscompilation caused by type punning. See Py_CLEAR() comment for
* implementation details about type punning.
*
* The memcpy() implementation does not emit a compiler warning if 'src' has
* not the same type than 'src': any pointer type is accepted for 'src'.
*/
#ifdef _Py_TYPEOF
#define Py_SETREF(dst, src) \
do { \
_Py_TYPEOF(dst)* _tmp_dst_ptr = &(dst); \
_Py_TYPEOF(dst) _tmp_old_dst = (*_tmp_dst_ptr); \
*_tmp_dst_ptr = (src); \
Py_DECREF(_tmp_old_dst); \
} while (0)
#else
#define Py_SETREF(dst, src) \
do { \
PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \
PyObject *_tmp_old_dst = (*_tmp_dst_ptr); \
PyObject *_tmp_src = _PyObject_CAST(src); \
memcpy(_tmp_dst_ptr, &_tmp_src, sizeof(PyObject*)); \
Py_DECREF(_tmp_old_dst); \
} while (0)
#endif
/* Py_XSETREF() is a variant of Py_SETREF() that uses Py_XDECREF() instead of
* Py_DECREF().
*/
#ifdef _Py_TYPEOF
#define Py_XSETREF(dst, src) \
do { \
_Py_TYPEOF(dst)* _tmp_dst_ptr = &(dst); \
_Py_TYPEOF(dst) _tmp_old_dst = (*_tmp_dst_ptr); \
*_tmp_dst_ptr = (src); \
Py_XDECREF(_tmp_old_dst); \
} while (0)
#else
#define Py_XSETREF(dst, src) \
do { \
PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \
PyObject *_tmp_old_dst = (*_tmp_dst_ptr); \
PyObject *_tmp_src = _PyObject_CAST(src); \
memcpy(_tmp_dst_ptr, &_tmp_src, sizeof(PyObject*)); \
Py_XDECREF(_tmp_old_dst); \
} while (0)
#endif
/* Define a pair of assertion macros:
_PyObject_ASSERT_FROM(), _PyObject_ASSERT_WITH_MSG() and _PyObject_ASSERT().
These work like the regular C assert(), in that they will abort the
process with a message on stderr if the given condition fails to hold,
but compile away to nothing if NDEBUG is defined.
However, before aborting, Python will also try to call _PyObject_Dump() on
the given object. This may be of use when investigating bugs in which a
particular object is corrupt (e.g. buggy a tp_visit method in an extension
module breaking the garbage collector), to help locate the broken objects.
The WITH_MSG variant allows you to supply an additional message that Python
will attempt to print to stderr, after the object dump. */
#ifdef NDEBUG
/* No debugging: compile away the assertions: */
# define _PyObject_ASSERT_FROM(obj, expr, msg, filename, lineno, func) \
((void)0)
#else
/* With debugging: generate checks: */
# define _PyObject_ASSERT_FROM(obj, expr, msg, filename, lineno, func) \
((expr) \
? (void)(0) \
: _PyObject_AssertFailed((obj), Py_STRINGIFY(expr), \
(msg), (filename), (lineno), (func)))
#endif
#define _PyObject_ASSERT_WITH_MSG(obj, expr, msg) \
_PyObject_ASSERT_FROM((obj), expr, (msg), __FILE__, __LINE__, __func__)
#define _PyObject_ASSERT(obj, expr) \
_PyObject_ASSERT_WITH_MSG((obj), expr, NULL)
#define _PyObject_ASSERT_FAILED_MSG(obj, msg) \
_PyObject_AssertFailed((obj), NULL, (msg), __FILE__, __LINE__, __func__)
/* Declare and define _PyObject_AssertFailed() even when NDEBUG is defined,
to avoid causing compiler/linker errors when building extensions without
NDEBUG against a Python built with NDEBUG defined.
msg, expr and function can be NULL. */
PyAPI_FUNC(void) _Py_NO_RETURN _PyObject_AssertFailed(
PyObject *obj,
const char *expr,
const char *msg,
const char *file,
int line,
const char *function);
/* Trashcan mechanism, thanks to Christian Tismer.
When deallocating a container object, it's possible to trigger an unbounded
chain of deallocations, as each Py_DECREF in turn drops the refcount on "the
next" object in the chain to 0. This can easily lead to stack overflows,
especially in threads (which typically have less stack space to work with).
A container object can avoid this by bracketing the body of its tp_dealloc
function with a pair of macros:
static void
mytype_dealloc(mytype *p)
{
... declarations go here ...
PyObject_GC_UnTrack(p); // must untrack first
Py_TRASHCAN_BEGIN(p, mytype_dealloc)
... The body of the deallocator goes here, including all calls ...
... to Py_DECREF on contained objects. ...
Py_TRASHCAN_END // there should be no code after this
}
CAUTION: Never return from the middle of the body! If the body needs to
"get out early", put a label immediately before the Py_TRASHCAN_END
call, and goto it. Else the call-depth counter (see below) will stay
above 0 forever, and the trashcan will never get emptied.
How it works: The BEGIN macro increments a call-depth counter. So long
as this counter is small, the body of the deallocator is run directly without
further ado. But if the counter gets large, it instead adds p to a list of
objects to be deallocated later, skips the body of the deallocator, and
resumes execution after the END macro. The tp_dealloc routine then returns
without deallocating anything (and so unbounded call-stack depth is avoided).
When the call stack finishes unwinding again, code generated by the END macro
notices this, and calls another routine to deallocate all the objects that
may have been added to the list of deferred deallocations. In effect, a
chain of N deallocations is broken into (N-1)/(Py_TRASHCAN_HEADROOM-1) pieces,
with the call stack never exceeding a depth of Py_TRASHCAN_HEADROOM.
Since the tp_dealloc of a subclass typically calls the tp_dealloc of the base
class, we need to ensure that the trashcan is only triggered on the tp_dealloc
of the actual class being deallocated. Otherwise we might end up with a
partially-deallocated object. To check this, the tp_dealloc function must be
passed as second argument to Py_TRASHCAN_BEGIN().
*/
/* Python 3.9 private API, invoked by the macros below. */
PyAPI_FUNC(int) _PyTrash_begin(PyThreadState *tstate, PyObject *op);
PyAPI_FUNC(void) _PyTrash_end(PyThreadState *tstate);
PyAPI_FUNC(void) _PyTrash_thread_deposit_object(PyThreadState *tstate, PyObject *op);
PyAPI_FUNC(void) _PyTrash_thread_destroy_chain(PyThreadState *tstate);
/* Python 3.10 private API, invoked by the Py_TRASHCAN_BEGIN(). */
/* To avoid raising recursion errors during dealloc trigger trashcan before we reach
* recursion limit. To avoid trashing, we don't attempt to empty the trashcan until
* we have headroom above the trigger limit */
#define Py_TRASHCAN_HEADROOM 50
#define Py_TRASHCAN_BEGIN(op, dealloc) \
do { \
PyThreadState *tstate = PyThreadState_Get(); \
if (tstate->c_recursion_remaining <= Py_TRASHCAN_HEADROOM && Py_TYPE(op)->tp_dealloc == (destructor)dealloc) { \
_PyTrash_thread_deposit_object(tstate, (PyObject *)op); \
break; \
} \
tstate->c_recursion_remaining--;
/* The body of the deallocator is here. */
#define Py_TRASHCAN_END \
tstate->c_recursion_remaining++; \
if (tstate->delete_later && tstate->c_recursion_remaining > (Py_TRASHCAN_HEADROOM*2)) { \
_PyTrash_thread_destroy_chain(tstate); \
} \
} while (0);
PyAPI_FUNC(void *) PyObject_GetItemData(PyObject *obj);
PyAPI_FUNC(int) PyObject_VisitManagedDict(PyObject *obj, visitproc visit, void *arg);
PyAPI_FUNC(int) _PyObject_SetManagedDict(PyObject *obj, PyObject *new_dict);
PyAPI_FUNC(void) PyObject_ClearManagedDict(PyObject *obj);
#define TYPE_MAX_WATCHERS 8
typedef int(*PyType_WatchCallback)(PyTypeObject *);
PyAPI_FUNC(int) PyType_AddWatcher(PyType_WatchCallback callback);
PyAPI_FUNC(int) PyType_ClearWatcher(int watcher_id);
PyAPI_FUNC(int) PyType_Watch(int watcher_id, PyObject *type);
PyAPI_FUNC(int) PyType_Unwatch(int watcher_id, PyObject *type);
/* Attempt to assign a version tag to the given type.
*
* Returns 1 if the type already had a valid version tag or a new one was
* assigned, or 0 if a new tag could not be assigned.
*/
PyAPI_FUNC(int) PyUnstable_Type_AssignVersionTag(PyTypeObject *type);
typedef enum {
PyRefTracer_CREATE = 0,
PyRefTracer_DESTROY = 1,
} PyRefTracerEvent;
typedef int (*PyRefTracer)(PyObject *, PyRefTracerEvent event, void *);
PyAPI_FUNC(int) PyRefTracer_SetTracer(PyRefTracer tracer, void *data);
PyAPI_FUNC(PyRefTracer) PyRefTracer_GetTracer(void**);

View File

@@ -0,0 +1,104 @@
#ifndef Py_CPYTHON_OBJIMPL_H
# error "this header file must not be included directly"
#endif
static inline size_t _PyObject_SIZE(PyTypeObject *type) {
return _Py_STATIC_CAST(size_t, type->tp_basicsize);
}
/* _PyObject_VAR_SIZE returns the number of bytes (as size_t) allocated for a
vrbl-size object with nitems items, exclusive of gc overhead (if any). The
value is rounded up to the closest multiple of sizeof(void *), in order to
ensure that pointer fields at the end of the object are correctly aligned
for the platform (this is of special importance for subclasses of, e.g.,
str or int, so that pointers can be stored after the embedded data).
Note that there's no memory wastage in doing this, as malloc has to
return (at worst) pointer-aligned memory anyway.
*/
#if ((SIZEOF_VOID_P - 1) & SIZEOF_VOID_P) != 0
# error "_PyObject_VAR_SIZE requires SIZEOF_VOID_P be a power of 2"
#endif
static inline size_t _PyObject_VAR_SIZE(PyTypeObject *type, Py_ssize_t nitems) {
size_t size = _Py_STATIC_CAST(size_t, type->tp_basicsize);
size += _Py_STATIC_CAST(size_t, nitems) * _Py_STATIC_CAST(size_t, type->tp_itemsize);
return _Py_SIZE_ROUND_UP(size, SIZEOF_VOID_P);
}
/* This example code implements an object constructor with a custom
allocator, where PyObject_New is inlined, and shows the important
distinction between two steps (at least):
1) the actual allocation of the object storage;
2) the initialization of the Python specific fields
in this storage with PyObject_{Init, InitVar}.
PyObject *
YourObject_New(...)
{
PyObject *op;
op = (PyObject *) Your_Allocator(_PyObject_SIZE(YourTypeStruct));
if (op == NULL) {
return PyErr_NoMemory();
}
PyObject_Init(op, &YourTypeStruct);
op->ob_field = value;
...
return op;
}
Note that in C++, the use of the new operator usually implies that
the 1st step is performed automatically for you, so in a C++ class
constructor you would start directly with PyObject_Init/InitVar. */
typedef struct {
/* user context passed as the first argument to the 2 functions */
void *ctx;
/* allocate an arena of size bytes */
void* (*alloc) (void *ctx, size_t size);
/* free an arena */
void (*free) (void *ctx, void *ptr, size_t size);
} PyObjectArenaAllocator;
/* Get the arena allocator. */
PyAPI_FUNC(void) PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator);
/* Set the arena allocator. */
PyAPI_FUNC(void) PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator);
/* Test if an object implements the garbage collector protocol */
PyAPI_FUNC(int) PyObject_IS_GC(PyObject *obj);
// Test if a type supports weak references
PyAPI_FUNC(int) PyType_SUPPORTS_WEAKREFS(PyTypeObject *type);
PyAPI_FUNC(PyObject **) PyObject_GET_WEAKREFS_LISTPTR(PyObject *op);
PyAPI_FUNC(PyObject *) PyUnstable_Object_GC_NewWithExtraData(PyTypeObject *,
size_t);
/* Visit all live GC-capable objects, similar to gc.get_objects(None). The
* supplied callback is called on every such object with the void* arg set
* to the supplied arg. Returning 0 from the callback ends iteration, returning
* 1 allows iteration to continue. Returning any other value may result in
* undefined behaviour.
*
* If new objects are (de)allocated by the callback it is undefined if they
* will be visited.
* Garbage collection is disabled during operation. Explicitly running a
* collection in the callback may lead to undefined behaviour e.g. visiting the
* same objects multiple times or not at all.
*/
typedef int (*gcvisitobjects_t)(PyObject*, void*);
PyAPI_FUNC(void) PyUnstable_GC_VisitObjects(gcvisitobjects_t callback, void* arg);

View File

@@ -0,0 +1,43 @@
#ifndef Py_ODICTOBJECT_H
#define Py_ODICTOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
/* OrderedDict */
/* This API is optional and mostly redundant. */
#ifndef Py_LIMITED_API
typedef struct _odictobject PyODictObject;
PyAPI_DATA(PyTypeObject) PyODict_Type;
PyAPI_DATA(PyTypeObject) PyODictIter_Type;
PyAPI_DATA(PyTypeObject) PyODictKeys_Type;
PyAPI_DATA(PyTypeObject) PyODictItems_Type;
PyAPI_DATA(PyTypeObject) PyODictValues_Type;
#define PyODict_Check(op) PyObject_TypeCheck((op), &PyODict_Type)
#define PyODict_CheckExact(op) Py_IS_TYPE((op), &PyODict_Type)
#define PyODict_SIZE(op) PyDict_GET_SIZE((op))
PyAPI_FUNC(PyObject *) PyODict_New(void);
PyAPI_FUNC(int) PyODict_SetItem(PyObject *od, PyObject *key, PyObject *item);
PyAPI_FUNC(int) PyODict_DelItem(PyObject *od, PyObject *key);
/* wrappers around PyDict* functions */
#define PyODict_GetItem(od, key) PyDict_GetItem(_PyObject_CAST(od), (key))
#define PyODict_GetItemWithError(od, key) \
PyDict_GetItemWithError(_PyObject_CAST(od), (key))
#define PyODict_Contains(od, key) PyDict_Contains(_PyObject_CAST(od), (key))
#define PyODict_Size(od) PyDict_Size(_PyObject_CAST(od))
#define PyODict_GetItemString(od, key) \
PyDict_GetItemString(_PyObject_CAST(od), (key))
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_ODICTOBJECT_H */

View File

@@ -0,0 +1,31 @@
/* PickleBuffer object. This is built-in for ease of use from third-party
* C extensions.
*/
#ifndef Py_PICKLEBUFOBJECT_H
#define Py_PICKLEBUFOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_LIMITED_API
PyAPI_DATA(PyTypeObject) PyPickleBuffer_Type;
#define PyPickleBuffer_Check(op) Py_IS_TYPE((op), &PyPickleBuffer_Type)
/* Create a PickleBuffer redirecting to the given buffer-enabled object */
PyAPI_FUNC(PyObject *) PyPickleBuffer_FromObject(PyObject *);
/* Get the PickleBuffer's underlying view to the original object
* (NULL if released)
*/
PyAPI_FUNC(const Py_buffer *) PyPickleBuffer_GetBuffer(PyObject *);
/* Release the PickleBuffer. Returns 0 on success, -1 on error. */
PyAPI_FUNC(int) PyPickleBuffer_Release(PyObject *);
#endif /* !Py_LIMITED_API */
#ifdef __cplusplus
}
#endif
#endif /* !Py_PICKLEBUFOBJECT_H */

View File

@@ -0,0 +1,105 @@
#ifndef Py_CPYTHON_PTRHEAD_STUBS_H
#define Py_CPYTHON_PTRHEAD_STUBS_H
#if !defined(HAVE_PTHREAD_STUBS)
# error "this header file requires stubbed pthreads."
#endif
#ifndef _POSIX_THREADS
# define _POSIX_THREADS 1
#endif
/* Minimal pthread stubs for CPython.
*
* The stubs implement the minimum pthread API for CPython.
* - pthread_create() fails.
* - pthread_exit() calls exit(0).
* - pthread_key_*() functions implement minimal TSS without destructor.
* - all other functions do nothing and return 0.
*/
#ifdef __wasi__
// WASI's bits/alltypes.h provides type definitions when __NEED_ is set.
// The header file can be included multiple times.
//
// <sys/types.h> may also define these macros.
# ifndef __NEED_pthread_cond_t
# define __NEED_pthread_cond_t 1
# endif
# ifndef __NEED_pthread_condattr_t
# define __NEED_pthread_condattr_t 1
# endif
# ifndef __NEED_pthread_mutex_t
# define __NEED_pthread_mutex_t 1
# endif
# ifndef __NEED_pthread_mutexattr_t
# define __NEED_pthread_mutexattr_t 1
# endif
# ifndef __NEED_pthread_key_t
# define __NEED_pthread_key_t 1
# endif
# ifndef __NEED_pthread_t
# define __NEED_pthread_t 1
# endif
# ifndef __NEED_pthread_attr_t
# define __NEED_pthread_attr_t 1
# endif
# include <bits/alltypes.h>
#else
typedef struct { void *__x; } pthread_cond_t;
typedef struct { unsigned __attr; } pthread_condattr_t;
typedef struct { void *__x; } pthread_mutex_t;
typedef struct { unsigned __attr; } pthread_mutexattr_t;
typedef unsigned pthread_key_t;
typedef unsigned pthread_t;
typedef struct { unsigned __attr; } pthread_attr_t;
#endif
// mutex
PyAPI_FUNC(int) pthread_mutex_init(pthread_mutex_t *restrict mutex,
const pthread_mutexattr_t *restrict attr);
PyAPI_FUNC(int) pthread_mutex_destroy(pthread_mutex_t *mutex);
PyAPI_FUNC(int) pthread_mutex_trylock(pthread_mutex_t *mutex);
PyAPI_FUNC(int) pthread_mutex_lock(pthread_mutex_t *mutex);
PyAPI_FUNC(int) pthread_mutex_unlock(pthread_mutex_t *mutex);
// condition
PyAPI_FUNC(int) pthread_cond_init(pthread_cond_t *restrict cond,
const pthread_condattr_t *restrict attr);
PyAPI_FUNC(int) pthread_cond_destroy(pthread_cond_t *cond);
PyAPI_FUNC(int) pthread_cond_wait(pthread_cond_t *restrict cond,
pthread_mutex_t *restrict mutex);
PyAPI_FUNC(int) pthread_cond_timedwait(pthread_cond_t *restrict cond,
pthread_mutex_t *restrict mutex,
const struct timespec *restrict abstime);
PyAPI_FUNC(int) pthread_cond_signal(pthread_cond_t *cond);
PyAPI_FUNC(int) pthread_condattr_init(pthread_condattr_t *attr);
PyAPI_FUNC(int) pthread_condattr_setclock(
pthread_condattr_t *attr, clockid_t clock_id);
// pthread
PyAPI_FUNC(int) pthread_create(pthread_t *restrict thread,
const pthread_attr_t *restrict attr,
void *(*start_routine)(void *),
void *restrict arg);
PyAPI_FUNC(int) pthread_detach(pthread_t thread);
PyAPI_FUNC(int) pthread_join(pthread_t thread, void** value_ptr);
PyAPI_FUNC(pthread_t) pthread_self(void);
PyAPI_FUNC(int) pthread_exit(void *retval) __attribute__ ((__noreturn__));
PyAPI_FUNC(int) pthread_attr_init(pthread_attr_t *attr);
PyAPI_FUNC(int) pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize);
PyAPI_FUNC(int) pthread_attr_destroy(pthread_attr_t *attr);
// pthread_key
#ifndef PTHREAD_KEYS_MAX
# define PTHREAD_KEYS_MAX 128
#endif
PyAPI_FUNC(int) pthread_key_create(pthread_key_t *key,
void (*destr_function)(void *));
PyAPI_FUNC(int) pthread_key_delete(pthread_key_t key);
PyAPI_FUNC(void *) pthread_getspecific(pthread_key_t key);
PyAPI_FUNC(int) pthread_setspecific(pthread_key_t key, const void *value);
#endif // Py_CPYTHON_PTRHEAD_STUBS_H

View File

@@ -0,0 +1,569 @@
// This header provides cross-platform low-level atomic operations
// similar to C11 atomics.
//
// Operations are sequentially consistent unless they have a suffix indicating
// otherwise. If in doubt, prefer the sequentially consistent operations.
//
// The "_relaxed" suffix for load and store operations indicates the "relaxed"
// memory order. They don't provide synchronization, but (roughly speaking)
// guarantee somewhat sane behavior for races instead of undefined behavior.
// In practice, they correspond to "normal" hardware load and store
// instructions, so they are almost as inexpensive as plain loads and stores
// in C.
//
// Note that atomic read-modify-write operations like _Py_atomic_add_* return
// the previous value of the atomic variable, not the new value.
//
// See https://en.cppreference.com/w/c/atomic for more information on C11
// atomics.
// See https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf
// "A Relaxed Guide to memory_order_relaxed" for discussion of and common usage
// or relaxed atomics.
//
// Functions with pseudo Python code:
//
// def _Py_atomic_load(obj):
// return obj # sequential consistency
//
// def _Py_atomic_load_relaxed(obj):
// return obj # relaxed consistency
//
// def _Py_atomic_store(obj, value):
// obj = value # sequential consistency
//
// def _Py_atomic_store_relaxed(obj, value):
// obj = value # relaxed consistency
//
// def _Py_atomic_exchange(obj, value):
// # sequential consistency
// old_obj = obj
// obj = value
// return old_obj
//
// def _Py_atomic_compare_exchange(obj, expected, desired):
// # sequential consistency
// if obj == expected:
// obj = desired
// return True
// else:
// expected = obj
// return False
//
// def _Py_atomic_add(obj, value):
// # sequential consistency
// old_obj = obj
// obj += value
// return old_obj
//
// def _Py_atomic_and(obj, value):
// # sequential consistency
// old_obj = obj
// obj &= value
// return old_obj
//
// def _Py_atomic_or(obj, value):
// # sequential consistency
// old_obj = obj
// obj |= value
// return old_obj
//
// Other functions:
//
// def _Py_atomic_load_ptr_acquire(obj):
// return obj # acquire
//
// def _Py_atomic_store_ptr_release(obj):
// return obj # release
//
// def _Py_atomic_fence_seq_cst():
// # sequential consistency
// ...
//
// def _Py_atomic_fence_release():
// # release
// ...
#ifndef Py_CPYTHON_ATOMIC_H
# error "this header file must not be included directly"
#endif
// --- _Py_atomic_add --------------------------------------------------------
// Atomically adds `value` to `obj` and returns the previous value
static inline int
_Py_atomic_add_int(int *obj, int value);
static inline int8_t
_Py_atomic_add_int8(int8_t *obj, int8_t value);
static inline int16_t
_Py_atomic_add_int16(int16_t *obj, int16_t value);
static inline int32_t
_Py_atomic_add_int32(int32_t *obj, int32_t value);
static inline int64_t
_Py_atomic_add_int64(int64_t *obj, int64_t value);
static inline intptr_t
_Py_atomic_add_intptr(intptr_t *obj, intptr_t value);
static inline unsigned int
_Py_atomic_add_uint(unsigned int *obj, unsigned int value);
static inline uint8_t
_Py_atomic_add_uint8(uint8_t *obj, uint8_t value);
static inline uint16_t
_Py_atomic_add_uint16(uint16_t *obj, uint16_t value);
static inline uint32_t
_Py_atomic_add_uint32(uint32_t *obj, uint32_t value);
static inline uint64_t
_Py_atomic_add_uint64(uint64_t *obj, uint64_t value);
static inline uintptr_t
_Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value);
static inline Py_ssize_t
_Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value);
// --- _Py_atomic_compare_exchange -------------------------------------------
// Performs an atomic compare-and-exchange.
//
// - If `*obj` and `*expected` are equal, store `desired` into `*obj`
// and return 1 (success).
// - Otherwise, store the `*obj` current value into `*expected`
// and return 0 (failure).
//
// These correspond to the C11 atomic_compare_exchange_strong() function.
static inline int
_Py_atomic_compare_exchange_int(int *obj, int *expected, int desired);
static inline int
_Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t desired);
static inline int
_Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t desired);
static inline int
_Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t desired);
static inline int
_Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t desired);
static inline int
_Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t desired);
static inline int
_Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int desired);
static inline int
_Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t desired);
static inline int
_Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t desired);
static inline int
_Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t desired);
static inline int
_Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t desired);
static inline int
_Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t desired);
static inline int
_Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t desired);
// NOTE: `obj` and `expected` are logically `void**` types, but we use `void*`
// so that we can pass types like `PyObject**` without a cast.
static inline int
_Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *value);
// --- _Py_atomic_exchange ---------------------------------------------------
// Atomically replaces `*obj` with `value` and returns the previous value of `*obj`.
static inline int
_Py_atomic_exchange_int(int *obj, int value);
static inline int8_t
_Py_atomic_exchange_int8(int8_t *obj, int8_t value);
static inline int16_t
_Py_atomic_exchange_int16(int16_t *obj, int16_t value);
static inline int32_t
_Py_atomic_exchange_int32(int32_t *obj, int32_t value);
static inline int64_t
_Py_atomic_exchange_int64(int64_t *obj, int64_t value);
static inline intptr_t
_Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value);
static inline unsigned int
_Py_atomic_exchange_uint(unsigned int *obj, unsigned int value);
static inline uint8_t
_Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value);
static inline uint16_t
_Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value);
static inline uint32_t
_Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value);
static inline uint64_t
_Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value);
static inline uintptr_t
_Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value);
static inline Py_ssize_t
_Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value);
static inline void *
_Py_atomic_exchange_ptr(void *obj, void *value);
// --- _Py_atomic_and --------------------------------------------------------
// Performs `*obj &= value` atomically and returns the previous value of `*obj`.
static inline uint8_t
_Py_atomic_and_uint8(uint8_t *obj, uint8_t value);
static inline uint16_t
_Py_atomic_and_uint16(uint16_t *obj, uint16_t value);
static inline uint32_t
_Py_atomic_and_uint32(uint32_t *obj, uint32_t value);
static inline uint64_t
_Py_atomic_and_uint64(uint64_t *obj, uint64_t value);
static inline uintptr_t
_Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value);
// --- _Py_atomic_or ---------------------------------------------------------
// Performs `*obj |= value` atomically and returns the previous value of `*obj`.
static inline uint8_t
_Py_atomic_or_uint8(uint8_t *obj, uint8_t value);
static inline uint16_t
_Py_atomic_or_uint16(uint16_t *obj, uint16_t value);
static inline uint32_t
_Py_atomic_or_uint32(uint32_t *obj, uint32_t value);
static inline uint64_t
_Py_atomic_or_uint64(uint64_t *obj, uint64_t value);
static inline uintptr_t
_Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value);
// --- _Py_atomic_load -------------------------------------------------------
// Atomically loads `*obj` (sequential consistency)
static inline int
_Py_atomic_load_int(const int *obj);
static inline int8_t
_Py_atomic_load_int8(const int8_t *obj);
static inline int16_t
_Py_atomic_load_int16(const int16_t *obj);
static inline int32_t
_Py_atomic_load_int32(const int32_t *obj);
static inline int64_t
_Py_atomic_load_int64(const int64_t *obj);
static inline intptr_t
_Py_atomic_load_intptr(const intptr_t *obj);
static inline uint8_t
_Py_atomic_load_uint8(const uint8_t *obj);
static inline uint16_t
_Py_atomic_load_uint16(const uint16_t *obj);
static inline uint32_t
_Py_atomic_load_uint32(const uint32_t *obj);
static inline uint64_t
_Py_atomic_load_uint64(const uint64_t *obj);
static inline uintptr_t
_Py_atomic_load_uintptr(const uintptr_t *obj);
static inline unsigned int
_Py_atomic_load_uint(const unsigned int *obj);
static inline Py_ssize_t
_Py_atomic_load_ssize(const Py_ssize_t *obj);
static inline void *
_Py_atomic_load_ptr(const void *obj);
// --- _Py_atomic_load_relaxed -----------------------------------------------
// Loads `*obj` (relaxed consistency, i.e., no ordering)
static inline int
_Py_atomic_load_int_relaxed(const int *obj);
static inline int8_t
_Py_atomic_load_int8_relaxed(const int8_t *obj);
static inline int16_t
_Py_atomic_load_int16_relaxed(const int16_t *obj);
static inline int32_t
_Py_atomic_load_int32_relaxed(const int32_t *obj);
static inline int64_t
_Py_atomic_load_int64_relaxed(const int64_t *obj);
static inline intptr_t
_Py_atomic_load_intptr_relaxed(const intptr_t *obj);
static inline uint8_t
_Py_atomic_load_uint8_relaxed(const uint8_t *obj);
static inline uint16_t
_Py_atomic_load_uint16_relaxed(const uint16_t *obj);
static inline uint32_t
_Py_atomic_load_uint32_relaxed(const uint32_t *obj);
static inline uint64_t
_Py_atomic_load_uint64_relaxed(const uint64_t *obj);
static inline uintptr_t
_Py_atomic_load_uintptr_relaxed(const uintptr_t *obj);
static inline unsigned int
_Py_atomic_load_uint_relaxed(const unsigned int *obj);
static inline Py_ssize_t
_Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj);
static inline void *
_Py_atomic_load_ptr_relaxed(const void *obj);
static inline unsigned long long
_Py_atomic_load_ullong_relaxed(const unsigned long long *obj);
// --- _Py_atomic_store ------------------------------------------------------
// Atomically performs `*obj = value` (sequential consistency)
static inline void
_Py_atomic_store_int(int *obj, int value);
static inline void
_Py_atomic_store_int8(int8_t *obj, int8_t value);
static inline void
_Py_atomic_store_int16(int16_t *obj, int16_t value);
static inline void
_Py_atomic_store_int32(int32_t *obj, int32_t value);
static inline void
_Py_atomic_store_int64(int64_t *obj, int64_t value);
static inline void
_Py_atomic_store_intptr(intptr_t *obj, intptr_t value);
static inline void
_Py_atomic_store_uint8(uint8_t *obj, uint8_t value);
static inline void
_Py_atomic_store_uint16(uint16_t *obj, uint16_t value);
static inline void
_Py_atomic_store_uint32(uint32_t *obj, uint32_t value);
static inline void
_Py_atomic_store_uint64(uint64_t *obj, uint64_t value);
static inline void
_Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value);
static inline void
_Py_atomic_store_uint(unsigned int *obj, unsigned int value);
static inline void
_Py_atomic_store_ptr(void *obj, void *value);
static inline void
_Py_atomic_store_ssize(Py_ssize_t* obj, Py_ssize_t value);
// --- _Py_atomic_store_relaxed ----------------------------------------------
// Stores `*obj = value` (relaxed consistency, i.e., no ordering)
static inline void
_Py_atomic_store_int_relaxed(int *obj, int value);
static inline void
_Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value);
static inline void
_Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value);
static inline void
_Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value);
static inline void
_Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value);
static inline void
_Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value);
static inline void
_Py_atomic_store_uint8_relaxed(uint8_t* obj, uint8_t value);
static inline void
_Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value);
static inline void
_Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value);
static inline void
_Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value);
static inline void
_Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value);
static inline void
_Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value);
static inline void
_Py_atomic_store_ptr_relaxed(void *obj, void *value);
static inline void
_Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value);
static inline void
_Py_atomic_store_ullong_relaxed(unsigned long long *obj,
unsigned long long value);
// --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------
// Loads `*obj` (acquire operation)
static inline void *
_Py_atomic_load_ptr_acquire(const void *obj);
static inline uintptr_t
_Py_atomic_load_uintptr_acquire(const uintptr_t *obj);
// Stores `*obj = value` (release operation)
static inline void
_Py_atomic_store_ptr_release(void *obj, void *value);
static inline void
_Py_atomic_store_uintptr_release(uintptr_t *obj, uintptr_t value);
static inline void
_Py_atomic_store_ssize_release(Py_ssize_t *obj, Py_ssize_t value);
static inline void
_Py_atomic_store_int_release(int *obj, int value);
static inline int
_Py_atomic_load_int_acquire(const int *obj);
static inline void
_Py_atomic_store_uint32_release(uint32_t *obj, uint32_t value);
static inline void
_Py_atomic_store_uint64_release(uint64_t *obj, uint64_t value);
static inline uint64_t
_Py_atomic_load_uint64_acquire(const uint64_t *obj);
static inline uint32_t
_Py_atomic_load_uint32_acquire(const uint32_t *obj);
static inline Py_ssize_t
_Py_atomic_load_ssize_acquire(const Py_ssize_t *obj);
// --- _Py_atomic_fence ------------------------------------------------------
// Sequential consistency fence. C11 fences have complex semantics. When
// possible, use the atomic operations on variables defined above, which
// generally do not require explicit use of a fence.
// See https://en.cppreference.com/w/cpp/atomic/atomic_thread_fence
static inline void _Py_atomic_fence_seq_cst(void);
// Acquire fence
static inline void _Py_atomic_fence_acquire(void);
// Release fence
static inline void _Py_atomic_fence_release(void);
#ifndef _Py_USE_GCC_BUILTIN_ATOMICS
# if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
# define _Py_USE_GCC_BUILTIN_ATOMICS 1
# elif defined(__clang__)
# if __has_builtin(__atomic_load)
# define _Py_USE_GCC_BUILTIN_ATOMICS 1
# endif
# endif
#endif
#if _Py_USE_GCC_BUILTIN_ATOMICS
# define Py_ATOMIC_GCC_H
# include "pyatomic_gcc.h"
# undef Py_ATOMIC_GCC_H
#elif __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
# define Py_ATOMIC_STD_H
# include "pyatomic_std.h"
# undef Py_ATOMIC_STD_H
#elif defined(_MSC_VER)
# define Py_ATOMIC_MSC_H
# include "pyatomic_msc.h"
# undef Py_ATOMIC_MSC_H
#else
# error "no available pyatomic implementation for this platform/compiler"
#endif
// --- aliases ---------------------------------------------------------------
#if SIZEOF_LONG == 8
# define _Py_atomic_load_ulong(p) \
_Py_atomic_load_uint64((uint64_t *)p)
# define _Py_atomic_load_ulong_relaxed(p) \
_Py_atomic_load_uint64_relaxed((uint64_t *)p)
# define _Py_atomic_store_ulong(p, v) \
_Py_atomic_store_uint64((uint64_t *)p, v)
# define _Py_atomic_store_ulong_relaxed(p, v) \
_Py_atomic_store_uint64_relaxed((uint64_t *)p, v)
#elif SIZEOF_LONG == 4
# define _Py_atomic_load_ulong(p) \
_Py_atomic_load_uint32((uint32_t *)p)
# define _Py_atomic_load_ulong_relaxed(p) \
_Py_atomic_load_uint32_relaxed((uint32_t *)p)
# define _Py_atomic_store_ulong(p, v) \
_Py_atomic_store_uint32((uint32_t *)p, v)
# define _Py_atomic_store_ulong_relaxed(p, v) \
_Py_atomic_store_uint32_relaxed((uint32_t *)p, v)
#else
# error "long must be 4 or 8 bytes in size"
#endif // SIZEOF_LONG

View File

@@ -0,0 +1,551 @@
// This is the implementation of Python atomic operations using GCC's built-in
// functions that match the C+11 memory model. This implementation is preferred
// for GCC compatible compilers, such as Clang. These functions are available
// in GCC 4.8+ without needing to compile with --std=c11 or --std=gnu11.
#ifndef Py_ATOMIC_GCC_H
# error "this header file must not be included directly"
#endif
// --- _Py_atomic_add --------------------------------------------------------
static inline int
_Py_atomic_add_int(int *obj, int value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline int8_t
_Py_atomic_add_int8(int8_t *obj, int8_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline int16_t
_Py_atomic_add_int16(int16_t *obj, int16_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline int32_t
_Py_atomic_add_int32(int32_t *obj, int32_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline int64_t
_Py_atomic_add_int64(int64_t *obj, int64_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline intptr_t
_Py_atomic_add_intptr(intptr_t *obj, intptr_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline unsigned int
_Py_atomic_add_uint(unsigned int *obj, unsigned int value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uint8_t
_Py_atomic_add_uint8(uint8_t *obj, uint8_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_add_uint16(uint16_t *obj, uint16_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_add_uint32(uint32_t *obj, uint32_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_add_uint64(uint64_t *obj, uint64_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline Py_ssize_t
_Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_compare_exchange -------------------------------------------
static inline int
_Py_atomic_compare_exchange_int(int *obj, int *expected, int desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *desired)
{ return __atomic_compare_exchange_n((void **)obj, (void **)expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_exchange ---------------------------------------------------
static inline int
_Py_atomic_exchange_int(int *obj, int value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline int8_t
_Py_atomic_exchange_int8(int8_t *obj, int8_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline int16_t
_Py_atomic_exchange_int16(int16_t *obj, int16_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline int32_t
_Py_atomic_exchange_int32(int32_t *obj, int32_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline int64_t
_Py_atomic_exchange_int64(int64_t *obj, int64_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline intptr_t
_Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline unsigned int
_Py_atomic_exchange_uint(unsigned int *obj, unsigned int value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uint8_t
_Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline Py_ssize_t
_Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void *
_Py_atomic_exchange_ptr(void *obj, void *value)
{ return __atomic_exchange_n((void **)obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_and --------------------------------------------------------
static inline uint8_t
_Py_atomic_and_uint8(uint8_t *obj, uint8_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_and_uint16(uint16_t *obj, uint16_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_and_uint32(uint32_t *obj, uint32_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_and_uint64(uint64_t *obj, uint64_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_or ---------------------------------------------------------
static inline uint8_t
_Py_atomic_or_uint8(uint8_t *obj, uint8_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_or_uint16(uint16_t *obj, uint16_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_or_uint32(uint32_t *obj, uint32_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_or_uint64(uint64_t *obj, uint64_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_load -------------------------------------------------------
static inline int
_Py_atomic_load_int(const int *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline int8_t
_Py_atomic_load_int8(const int8_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline int16_t
_Py_atomic_load_int16(const int16_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline int32_t
_Py_atomic_load_int32(const int32_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline int64_t
_Py_atomic_load_int64(const int64_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline intptr_t
_Py_atomic_load_intptr(const intptr_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uint8_t
_Py_atomic_load_uint8(const uint8_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_load_uint16(const uint16_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_load_uint32(const uint32_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_load_uint64(const uint64_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_load_uintptr(const uintptr_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline unsigned int
_Py_atomic_load_uint(const unsigned int *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline Py_ssize_t
_Py_atomic_load_ssize(const Py_ssize_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline void *
_Py_atomic_load_ptr(const void *obj)
{ return (void *)__atomic_load_n((void * const *)obj, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_load_relaxed -----------------------------------------------
static inline int
_Py_atomic_load_int_relaxed(const int *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline int8_t
_Py_atomic_load_int8_relaxed(const int8_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline int16_t
_Py_atomic_load_int16_relaxed(const int16_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline int32_t
_Py_atomic_load_int32_relaxed(const int32_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline int64_t
_Py_atomic_load_int64_relaxed(const int64_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline intptr_t
_Py_atomic_load_intptr_relaxed(const intptr_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uint8_t
_Py_atomic_load_uint8_relaxed(const uint8_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uint16_t
_Py_atomic_load_uint16_relaxed(const uint16_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uint32_t
_Py_atomic_load_uint32_relaxed(const uint32_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uint64_t
_Py_atomic_load_uint64_relaxed(const uint64_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uintptr_t
_Py_atomic_load_uintptr_relaxed(const uintptr_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline unsigned int
_Py_atomic_load_uint_relaxed(const unsigned int *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline Py_ssize_t
_Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline void *
_Py_atomic_load_ptr_relaxed(const void *obj)
{ return (void *)__atomic_load_n((void * const *)obj, __ATOMIC_RELAXED); }
static inline unsigned long long
_Py_atomic_load_ullong_relaxed(const unsigned long long *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
// --- _Py_atomic_store ------------------------------------------------------
static inline void
_Py_atomic_store_int(int *obj, int value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_int8(int8_t *obj, int8_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_int16(int16_t *obj, int16_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_int32(int32_t *obj, int32_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_int64(int64_t *obj, int64_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_intptr(intptr_t *obj, intptr_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint8(uint8_t *obj, uint8_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint16(uint16_t *obj, uint16_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint32(uint32_t *obj, uint32_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint64(uint64_t *obj, uint64_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint(unsigned int *obj, unsigned int value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_ptr(void *obj, void *value)
{ __atomic_store_n((void **)obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_ssize(Py_ssize_t *obj, Py_ssize_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_store_relaxed ----------------------------------------------
static inline void
_Py_atomic_store_int_relaxed(int *obj, int value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint8_relaxed(uint8_t *obj, uint8_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_ptr_relaxed(void *obj, void *value)
{ __atomic_store_n((void **)obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_ullong_relaxed(unsigned long long *obj,
unsigned long long value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
// --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------
static inline void *
_Py_atomic_load_ptr_acquire(const void *obj)
{ return (void *)__atomic_load_n((void * const *)obj, __ATOMIC_ACQUIRE); }
static inline uintptr_t
_Py_atomic_load_uintptr_acquire(const uintptr_t *obj)
{ return (uintptr_t)__atomic_load_n(obj, __ATOMIC_ACQUIRE); }
static inline void
_Py_atomic_store_ptr_release(void *obj, void *value)
{ __atomic_store_n((void **)obj, value, __ATOMIC_RELEASE); }
static inline void
_Py_atomic_store_uintptr_release(uintptr_t *obj, uintptr_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }
static inline void
_Py_atomic_store_int_release(int *obj, int value)
{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }
static inline void
_Py_atomic_store_ssize_release(Py_ssize_t *obj, Py_ssize_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }
static inline int
_Py_atomic_load_int_acquire(const int *obj)
{ return __atomic_load_n(obj, __ATOMIC_ACQUIRE); }
static inline void
_Py_atomic_store_uint32_release(uint32_t *obj, uint32_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }
static inline void
_Py_atomic_store_uint64_release(uint64_t *obj, uint64_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }
static inline uint64_t
_Py_atomic_load_uint64_acquire(const uint64_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_ACQUIRE); }
static inline uint32_t
_Py_atomic_load_uint32_acquire(const uint32_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_ACQUIRE); }
static inline Py_ssize_t
_Py_atomic_load_ssize_acquire(const Py_ssize_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_ACQUIRE); }
// --- _Py_atomic_fence ------------------------------------------------------
static inline void
_Py_atomic_fence_seq_cst(void)
{ __atomic_thread_fence(__ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_fence_acquire(void)
{ __atomic_thread_fence(__ATOMIC_ACQUIRE); }
static inline void
_Py_atomic_fence_release(void)
{ __atomic_thread_fence(__ATOMIC_RELEASE); }

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,976 @@
// This is the implementation of Python atomic operations using C++11 or C11
// atomics. Note that the pyatomic_gcc.h implementation is preferred for GCC
// compatible compilers, even if they support C++11 atomics.
#ifndef Py_ATOMIC_STD_H
# error "this header file must not be included directly"
#endif
#ifdef __cplusplus
extern "C++" {
# include <atomic>
}
# define _Py_USING_STD using namespace std
# define _Atomic(tp) atomic<tp>
#else
# define _Py_USING_STD
# include <stdatomic.h>
#endif
// --- _Py_atomic_add --------------------------------------------------------
static inline int
_Py_atomic_add_int(int *obj, int value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int)*)obj, value);
}
static inline int8_t
_Py_atomic_add_int8(int8_t *obj, int8_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int8_t)*)obj, value);
}
static inline int16_t
_Py_atomic_add_int16(int16_t *obj, int16_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int16_t)*)obj, value);
}
static inline int32_t
_Py_atomic_add_int32(int32_t *obj, int32_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int32_t)*)obj, value);
}
static inline int64_t
_Py_atomic_add_int64(int64_t *obj, int64_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int64_t)*)obj, value);
}
static inline intptr_t
_Py_atomic_add_intptr(intptr_t *obj, intptr_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(intptr_t)*)obj, value);
}
static inline unsigned int
_Py_atomic_add_uint(unsigned int *obj, unsigned int value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(unsigned int)*)obj, value);
}
static inline uint8_t
_Py_atomic_add_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uint8_t)*)obj, value);
}
static inline uint16_t
_Py_atomic_add_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uint16_t)*)obj, value);
}
static inline uint32_t
_Py_atomic_add_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uint32_t)*)obj, value);
}
static inline uint64_t
_Py_atomic_add_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uint64_t)*)obj, value);
}
static inline uintptr_t
_Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uintptr_t)*)obj, value);
}
static inline Py_ssize_t
_Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(Py_ssize_t)*)obj, value);
}
// --- _Py_atomic_compare_exchange -------------------------------------------
static inline int
_Py_atomic_compare_exchange_int(int *obj, int *expected, int desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int8_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int16_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int32_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int64_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(intptr_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(unsigned int)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uint8_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uint16_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uint32_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uint64_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uintptr_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(Py_ssize_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(void *)*)obj,
(void **)expected, desired);
}
// --- _Py_atomic_exchange ---------------------------------------------------
static inline int
_Py_atomic_exchange_int(int *obj, int value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int)*)obj, value);
}
static inline int8_t
_Py_atomic_exchange_int8(int8_t *obj, int8_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int8_t)*)obj, value);
}
static inline int16_t
_Py_atomic_exchange_int16(int16_t *obj, int16_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int16_t)*)obj, value);
}
static inline int32_t
_Py_atomic_exchange_int32(int32_t *obj, int32_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int32_t)*)obj, value);
}
static inline int64_t
_Py_atomic_exchange_int64(int64_t *obj, int64_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int64_t)*)obj, value);
}
static inline intptr_t
_Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(intptr_t)*)obj, value);
}
static inline unsigned int
_Py_atomic_exchange_uint(unsigned int *obj, unsigned int value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(unsigned int)*)obj, value);
}
static inline uint8_t
_Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uint8_t)*)obj, value);
}
static inline uint16_t
_Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uint16_t)*)obj, value);
}
static inline uint32_t
_Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uint32_t)*)obj, value);
}
static inline uint64_t
_Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uint64_t)*)obj, value);
}
static inline uintptr_t
_Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uintptr_t)*)obj, value);
}
static inline Py_ssize_t
_Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(Py_ssize_t)*)obj, value);
}
static inline void*
_Py_atomic_exchange_ptr(void *obj, void *value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(void *)*)obj, value);
}
// --- _Py_atomic_and --------------------------------------------------------
static inline uint8_t
_Py_atomic_and_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uint8_t)*)obj, value);
}
static inline uint16_t
_Py_atomic_and_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uint16_t)*)obj, value);
}
static inline uint32_t
_Py_atomic_and_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uint32_t)*)obj, value);
}
static inline uint64_t
_Py_atomic_and_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uint64_t)*)obj, value);
}
static inline uintptr_t
_Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uintptr_t)*)obj, value);
}
// --- _Py_atomic_or ---------------------------------------------------------
static inline uint8_t
_Py_atomic_or_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uint8_t)*)obj, value);
}
static inline uint16_t
_Py_atomic_or_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uint16_t)*)obj, value);
}
static inline uint32_t
_Py_atomic_or_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uint32_t)*)obj, value);
}
static inline uint64_t
_Py_atomic_or_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uint64_t)*)obj, value);
}
static inline uintptr_t
_Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uintptr_t)*)obj, value);
}
// --- _Py_atomic_load -------------------------------------------------------
static inline int
_Py_atomic_load_int(const int *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int)*)obj);
}
static inline int8_t
_Py_atomic_load_int8(const int8_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int8_t)*)obj);
}
static inline int16_t
_Py_atomic_load_int16(const int16_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int16_t)*)obj);
}
static inline int32_t
_Py_atomic_load_int32(const int32_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int32_t)*)obj);
}
static inline int64_t
_Py_atomic_load_int64(const int64_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int64_t)*)obj);
}
static inline intptr_t
_Py_atomic_load_intptr(const intptr_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(intptr_t)*)obj);
}
static inline uint8_t
_Py_atomic_load_uint8(const uint8_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uint8_t)*)obj);
}
static inline uint16_t
_Py_atomic_load_uint16(const uint16_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uint32_t)*)obj);
}
static inline uint32_t
_Py_atomic_load_uint32(const uint32_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uint32_t)*)obj);
}
static inline uint64_t
_Py_atomic_load_uint64(const uint64_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uint64_t)*)obj);
}
static inline uintptr_t
_Py_atomic_load_uintptr(const uintptr_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uintptr_t)*)obj);
}
static inline unsigned int
_Py_atomic_load_uint(const unsigned int *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(unsigned int)*)obj);
}
static inline Py_ssize_t
_Py_atomic_load_ssize(const Py_ssize_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(Py_ssize_t)*)obj);
}
static inline void*
_Py_atomic_load_ptr(const void *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(void*)*)obj);
}
// --- _Py_atomic_load_relaxed -----------------------------------------------
static inline int
_Py_atomic_load_int_relaxed(const int *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int)*)obj,
memory_order_relaxed);
}
static inline int8_t
_Py_atomic_load_int8_relaxed(const int8_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int8_t)*)obj,
memory_order_relaxed);
}
static inline int16_t
_Py_atomic_load_int16_relaxed(const int16_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int16_t)*)obj,
memory_order_relaxed);
}
static inline int32_t
_Py_atomic_load_int32_relaxed(const int32_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int32_t)*)obj,
memory_order_relaxed);
}
static inline int64_t
_Py_atomic_load_int64_relaxed(const int64_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int64_t)*)obj,
memory_order_relaxed);
}
static inline intptr_t
_Py_atomic_load_intptr_relaxed(const intptr_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(intptr_t)*)obj,
memory_order_relaxed);
}
static inline uint8_t
_Py_atomic_load_uint8_relaxed(const uint8_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint8_t)*)obj,
memory_order_relaxed);
}
static inline uint16_t
_Py_atomic_load_uint16_relaxed(const uint16_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint16_t)*)obj,
memory_order_relaxed);
}
static inline uint32_t
_Py_atomic_load_uint32_relaxed(const uint32_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint32_t)*)obj,
memory_order_relaxed);
}
static inline uint64_t
_Py_atomic_load_uint64_relaxed(const uint64_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint64_t)*)obj,
memory_order_relaxed);
}
static inline uintptr_t
_Py_atomic_load_uintptr_relaxed(const uintptr_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uintptr_t)*)obj,
memory_order_relaxed);
}
static inline unsigned int
_Py_atomic_load_uint_relaxed(const unsigned int *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(unsigned int)*)obj,
memory_order_relaxed);
}
static inline Py_ssize_t
_Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(Py_ssize_t)*)obj,
memory_order_relaxed);
}
static inline void*
_Py_atomic_load_ptr_relaxed(const void *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(void*)*)obj,
memory_order_relaxed);
}
static inline unsigned long long
_Py_atomic_load_ullong_relaxed(const unsigned long long *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(unsigned long long)*)obj,
memory_order_relaxed);
}
// --- _Py_atomic_store ------------------------------------------------------
static inline void
_Py_atomic_store_int(int *obj, int value)
{
_Py_USING_STD;
atomic_store((_Atomic(int)*)obj, value);
}
static inline void
_Py_atomic_store_int8(int8_t *obj, int8_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(int8_t)*)obj, value);
}
static inline void
_Py_atomic_store_int16(int16_t *obj, int16_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(int16_t)*)obj, value);
}
static inline void
_Py_atomic_store_int32(int32_t *obj, int32_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(int32_t)*)obj, value);
}
static inline void
_Py_atomic_store_int64(int64_t *obj, int64_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(int64_t)*)obj, value);
}
static inline void
_Py_atomic_store_intptr(intptr_t *obj, intptr_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(intptr_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uint8_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uint16_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uint32_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uint64_t)*)obj, value);
}
static inline void
_Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uintptr_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint(unsigned int *obj, unsigned int value)
{
_Py_USING_STD;
atomic_store((_Atomic(unsigned int)*)obj, value);
}
static inline void
_Py_atomic_store_ptr(void *obj, void *value)
{
_Py_USING_STD;
atomic_store((_Atomic(void*)*)obj, value);
}
static inline void
_Py_atomic_store_ssize(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(Py_ssize_t)*)obj, value);
}
// --- _Py_atomic_store_relaxed ----------------------------------------------
static inline void
_Py_atomic_store_int_relaxed(int *obj, int value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int8_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int16_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int32_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int64_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(intptr_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint8_relaxed(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint8_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint16_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint32_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint64_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uintptr_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(unsigned int)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_ptr_relaxed(void *obj, void *value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(void*)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(Py_ssize_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_ullong_relaxed(unsigned long long *obj,
unsigned long long value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(unsigned long long)*)obj, value,
memory_order_relaxed);
}
// --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------
static inline void *
_Py_atomic_load_ptr_acquire(const void *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(void*)*)obj,
memory_order_acquire);
}
static inline uintptr_t
_Py_atomic_load_uintptr_acquire(const uintptr_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uintptr_t)*)obj,
memory_order_acquire);
}
static inline void
_Py_atomic_store_ptr_release(void *obj, void *value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(void*)*)obj, value,
memory_order_release);
}
static inline void
_Py_atomic_store_uintptr_release(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uintptr_t)*)obj, value,
memory_order_release);
}
static inline void
_Py_atomic_store_int_release(int *obj, int value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int)*)obj, value,
memory_order_release);
}
static inline void
_Py_atomic_store_ssize_release(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(Py_ssize_t)*)obj, value,
memory_order_release);
}
static inline int
_Py_atomic_load_int_acquire(const int *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int)*)obj,
memory_order_acquire);
}
static inline void
_Py_atomic_store_uint32_release(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint32_t)*)obj, value,
memory_order_release);
}
static inline void
_Py_atomic_store_uint64_release(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint64_t)*)obj, value,
memory_order_release);
}
static inline uint64_t
_Py_atomic_load_uint64_acquire(const uint64_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint64_t)*)obj,
memory_order_acquire);
}
static inline uint32_t
_Py_atomic_load_uint32_acquire(const uint32_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint32_t)*)obj,
memory_order_acquire);
}
static inline Py_ssize_t
_Py_atomic_load_ssize_acquire(const Py_ssize_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(Py_ssize_t)*)obj,
memory_order_acquire);
}
// --- _Py_atomic_fence ------------------------------------------------------
static inline void
_Py_atomic_fence_seq_cst(void)
{
_Py_USING_STD;
atomic_thread_fence(memory_order_seq_cst);
}
static inline void
_Py_atomic_fence_acquire(void)
{
_Py_USING_STD;
atomic_thread_fence(memory_order_acquire);
}
static inline void
_Py_atomic_fence_release(void)
{
_Py_USING_STD;
atomic_thread_fence(memory_order_release);
}

View File

@@ -0,0 +1,39 @@
#ifndef Py_LIMITED_API
#ifndef PYCTYPE_H
#define PYCTYPE_H
#ifdef __cplusplus
extern "C" {
#endif
#define PY_CTF_LOWER 0x01
#define PY_CTF_UPPER 0x02
#define PY_CTF_ALPHA (PY_CTF_LOWER|PY_CTF_UPPER)
#define PY_CTF_DIGIT 0x04
#define PY_CTF_ALNUM (PY_CTF_ALPHA|PY_CTF_DIGIT)
#define PY_CTF_SPACE 0x08
#define PY_CTF_XDIGIT 0x10
PyAPI_DATA(const unsigned int) _Py_ctype_table[256];
/* Unlike their C counterparts, the following macros are not meant to
* handle an int with any of the values [EOF, 0-UCHAR_MAX]. The argument
* must be a signed/unsigned char. */
#define Py_ISLOWER(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_LOWER)
#define Py_ISUPPER(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_UPPER)
#define Py_ISALPHA(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_ALPHA)
#define Py_ISDIGIT(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_DIGIT)
#define Py_ISXDIGIT(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_XDIGIT)
#define Py_ISALNUM(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_ALNUM)
#define Py_ISSPACE(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_SPACE)
PyAPI_DATA(const unsigned char) _Py_ctype_tolower[256];
PyAPI_DATA(const unsigned char) _Py_ctype_toupper[256];
#define Py_TOLOWER(c) (_Py_ctype_tolower[Py_CHARMASK(c)])
#define Py_TOUPPER(c) (_Py_ctype_toupper[Py_CHARMASK(c)])
#ifdef __cplusplus
}
#endif
#endif /* !PYCTYPE_H */
#endif /* !Py_LIMITED_API */

View File

@@ -0,0 +1,38 @@
#ifndef Py_LIMITED_API
#ifndef Py_PYDEBUG_H
#define Py_PYDEBUG_H
#ifdef __cplusplus
extern "C" {
#endif
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_DebugFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_VerboseFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_QuietFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_InteractiveFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_InspectFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_OptimizeFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_NoSiteFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_BytesWarningFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_FrozenFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_IgnoreEnvironmentFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_DontWriteBytecodeFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_NoUserSiteDirectory;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_UnbufferedStdioFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_HashRandomizationFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_IsolatedFlag;
#ifdef MS_WINDOWS
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_LegacyWindowsFSEncodingFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_LegacyWindowsStdioFlag;
#endif
/* this is a wrapper around getenv() that pays attention to
Py_IgnoreEnvironmentFlag. It should be used for getting variables like
PYTHONPATH and PYTHONHOME from the environment */
PyAPI_FUNC(char*) Py_GETENV(const char *name);
#ifdef __cplusplus
}
#endif
#endif /* !Py_PYDEBUG_H */
#endif /* Py_LIMITED_API */

View File

@@ -0,0 +1,131 @@
#ifndef Py_CPYTHON_ERRORS_H
# error "this header file must not be included directly"
#endif
/* Error objects */
/* PyException_HEAD defines the initial segment of every exception class. */
#define PyException_HEAD PyObject_HEAD PyObject *dict;\
PyObject *args; PyObject *notes; PyObject *traceback;\
PyObject *context; PyObject *cause;\
char suppress_context;
typedef struct {
PyException_HEAD
} PyBaseExceptionObject;
typedef struct {
PyException_HEAD
PyObject *msg;
PyObject *excs;
} PyBaseExceptionGroupObject;
typedef struct {
PyException_HEAD
PyObject *msg;
PyObject *filename;
PyObject *lineno;
PyObject *offset;
PyObject *end_lineno;
PyObject *end_offset;
PyObject *text;
PyObject *print_file_and_line;
} PySyntaxErrorObject;
typedef struct {
PyException_HEAD
PyObject *msg;
PyObject *name;
PyObject *path;
PyObject *name_from;
} PyImportErrorObject;
typedef struct {
PyException_HEAD
PyObject *encoding;
PyObject *object;
Py_ssize_t start;
Py_ssize_t end;
PyObject *reason;
} PyUnicodeErrorObject;
typedef struct {
PyException_HEAD
PyObject *code;
} PySystemExitObject;
typedef struct {
PyException_HEAD
PyObject *myerrno;
PyObject *strerror;
PyObject *filename;
PyObject *filename2;
#ifdef MS_WINDOWS
PyObject *winerror;
#endif
Py_ssize_t written; /* only for BlockingIOError, -1 otherwise */
} PyOSErrorObject;
typedef struct {
PyException_HEAD
PyObject *value;
} PyStopIterationObject;
typedef struct {
PyException_HEAD
PyObject *name;
} PyNameErrorObject;
typedef struct {
PyException_HEAD
PyObject *obj;
PyObject *name;
} PyAttributeErrorObject;
/* Compatibility typedefs */
typedef PyOSErrorObject PyEnvironmentErrorObject;
#ifdef MS_WINDOWS
typedef PyOSErrorObject PyWindowsErrorObject;
#endif
/* Context manipulation (PEP 3134) */
PyAPI_FUNC(void) _PyErr_ChainExceptions1(PyObject *);
/* In exceptions.c */
PyAPI_FUNC(PyObject*) PyUnstable_Exc_PrepReraiseStar(
PyObject *orig,
PyObject *excs);
/* In signalmodule.c */
PyAPI_FUNC(int) PySignal_SetWakeupFd(int fd);
/* Support for adding program text to SyntaxErrors */
PyAPI_FUNC(void) PyErr_SyntaxLocationObject(
PyObject *filename,
int lineno,
int col_offset);
PyAPI_FUNC(void) PyErr_RangedSyntaxLocationObject(
PyObject *filename,
int lineno,
int col_offset,
int end_lineno,
int end_col_offset);
PyAPI_FUNC(PyObject *) PyErr_ProgramTextObject(
PyObject *filename,
int lineno);
PyAPI_FUNC(void) _Py_NO_RETURN _Py_FatalErrorFunc(
const char *func,
const char *message);
PyAPI_FUNC(void) PyErr_FormatUnraisable(const char *, ...);
PyAPI_DATA(PyObject *) PyExc_PythonFinalizationError;
#define Py_FatalError(message) _Py_FatalErrorFunc(__func__, (message))

View File

@@ -0,0 +1,15 @@
#ifndef Py_PYFPE_H
#define Py_PYFPE_H
/* Header excluded from the stable API */
#ifndef Py_LIMITED_API
/* These macros used to do something when Python was built with --with-fpectl,
* but support for that was dropped in 3.7. We continue to define them though,
* to avoid breaking API users.
*/
#define PyFPE_START_PROTECT(err_string, leave_stmt)
#define PyFPE_END_PROTECT(v)
#endif /* !defined(Py_LIMITED_API) */
#endif /* !Py_PYFPE_H */

View File

@@ -0,0 +1,45 @@
#ifndef Py_CPYTHON_PYFRAME_H
# error "this header file must not be included directly"
#endif
PyAPI_DATA(PyTypeObject) PyFrame_Type;
PyAPI_DATA(PyTypeObject) PyFrameLocalsProxy_Type;
#define PyFrame_Check(op) Py_IS_TYPE((op), &PyFrame_Type)
#define PyFrameLocalsProxy_Check(op) Py_IS_TYPE((op), &PyFrameLocalsProxy_Type)
PyAPI_FUNC(PyFrameObject *) PyFrame_GetBack(PyFrameObject *frame);
PyAPI_FUNC(PyObject *) PyFrame_GetLocals(PyFrameObject *frame);
PyAPI_FUNC(PyObject *) PyFrame_GetGlobals(PyFrameObject *frame);
PyAPI_FUNC(PyObject *) PyFrame_GetBuiltins(PyFrameObject *frame);
PyAPI_FUNC(PyObject *) PyFrame_GetGenerator(PyFrameObject *frame);
PyAPI_FUNC(int) PyFrame_GetLasti(PyFrameObject *frame);
PyAPI_FUNC(PyObject*) PyFrame_GetVar(PyFrameObject *frame, PyObject *name);
PyAPI_FUNC(PyObject*) PyFrame_GetVarString(PyFrameObject *frame, const char *name);
/* The following functions are for use by debuggers and other tools
* implementing custom frame evaluators with PEP 523. */
struct _PyInterpreterFrame;
/* Returns the code object of the frame (strong reference).
* Does not raise an exception. */
PyAPI_FUNC(PyObject *) PyUnstable_InterpreterFrame_GetCode(struct _PyInterpreterFrame *frame);
/* Returns a byte ofsset into the last executed instruction.
* Does not raise an exception. */
PyAPI_FUNC(int) PyUnstable_InterpreterFrame_GetLasti(struct _PyInterpreterFrame *frame);
/* Returns the currently executing line number, or -1 if there is no line number.
* Does not raise an exception. */
PyAPI_FUNC(int) PyUnstable_InterpreterFrame_GetLine(struct _PyInterpreterFrame *frame);
#define PyUnstable_EXECUTABLE_KIND_SKIP 0
#define PyUnstable_EXECUTABLE_KIND_PY_FUNCTION 1
#define PyUnstable_EXECUTABLE_KIND_BUILTIN_FUNCTION 3
#define PyUnstable_EXECUTABLE_KIND_METHOD_DESCRIPTOR 4
#define PyUnstable_EXECUTABLE_KINDS 5
PyAPI_DATA(const PyTypeObject *) const PyUnstable_ExecutableKinds[PyUnstable_EXECUTABLE_KINDS+1];

View File

@@ -0,0 +1,47 @@
#ifndef Py_CPYTHON_HASH_H
# error "this header file must not be included directly"
#endif
/* Prime multiplier used in string and various other hashes. */
#define PyHASH_MULTIPLIER 1000003UL /* 0xf4243 */
/* Parameters used for the numeric hash implementation. See notes for
_Py_HashDouble in Python/pyhash.c. Numeric hashes are based on
reduction modulo the prime 2**_PyHASH_BITS - 1. */
#if SIZEOF_VOID_P >= 8
# define PyHASH_BITS 61
#else
# define PyHASH_BITS 31
#endif
#define PyHASH_MODULUS (((size_t)1 << _PyHASH_BITS) - 1)
#define PyHASH_INF 314159
#define PyHASH_IMAG PyHASH_MULTIPLIER
/* Aliases kept for backward compatibility with Python 3.12 */
#define _PyHASH_MULTIPLIER PyHASH_MULTIPLIER
#define _PyHASH_BITS PyHASH_BITS
#define _PyHASH_MODULUS PyHASH_MODULUS
#define _PyHASH_INF PyHASH_INF
#define _PyHASH_IMAG PyHASH_IMAG
/* Helpers for hash functions */
PyAPI_FUNC(Py_hash_t) _Py_HashDouble(PyObject *, double);
// Kept for backward compatibility
#define _Py_HashPointer Py_HashPointer
/* hash function definition */
typedef struct {
Py_hash_t (*const hash)(const void *, Py_ssize_t);
const char *name;
const int hash_bits;
const int seed_bits;
} PyHash_FuncDef;
PyAPI_FUNC(PyHash_FuncDef*) PyHash_GetFuncDef(void);
PyAPI_FUNC(Py_hash_t) Py_HashPointer(const void *ptr);
PyAPI_FUNC(Py_hash_t) PyObject_GenericHash(PyObject *);

View File

@@ -0,0 +1,92 @@
#ifndef Py_CPYTHON_PYLIFECYCLE_H
# error "this header file must not be included directly"
#endif
/* Py_FrozenMain is kept out of the Limited API until documented and present
in all builds of Python */
PyAPI_FUNC(int) Py_FrozenMain(int argc, char **argv);
/* PEP 432 Multi-phase initialization API (Private while provisional!) */
PyAPI_FUNC(PyStatus) Py_PreInitialize(
const PyPreConfig *src_config);
PyAPI_FUNC(PyStatus) Py_PreInitializeFromBytesArgs(
const PyPreConfig *src_config,
Py_ssize_t argc,
char **argv);
PyAPI_FUNC(PyStatus) Py_PreInitializeFromArgs(
const PyPreConfig *src_config,
Py_ssize_t argc,
wchar_t **argv);
/* Initialization and finalization */
PyAPI_FUNC(PyStatus) Py_InitializeFromConfig(
const PyConfig *config);
// Python 3.8 provisional API (PEP 587)
PyAPI_FUNC(PyStatus) _Py_InitializeMain(void);
PyAPI_FUNC(int) Py_RunMain(void);
PyAPI_FUNC(void) _Py_NO_RETURN Py_ExitStatusException(PyStatus err);
PyAPI_FUNC(int) Py_FdIsInteractive(FILE *, const char *);
/* --- PyInterpreterConfig ------------------------------------ */
#define PyInterpreterConfig_DEFAULT_GIL (0)
#define PyInterpreterConfig_SHARED_GIL (1)
#define PyInterpreterConfig_OWN_GIL (2)
typedef struct {
// XXX "allow_object_sharing"? "own_objects"?
int use_main_obmalloc;
int allow_fork;
int allow_exec;
int allow_threads;
int allow_daemon_threads;
int check_multi_interp_extensions;
int gil;
} PyInterpreterConfig;
#define _PyInterpreterConfig_INIT \
{ \
.use_main_obmalloc = 0, \
.allow_fork = 0, \
.allow_exec = 0, \
.allow_threads = 1, \
.allow_daemon_threads = 0, \
.check_multi_interp_extensions = 1, \
.gil = PyInterpreterConfig_OWN_GIL, \
}
// gh-117649: The free-threaded build does not currently support single-phase
// init extensions in subinterpreters. For now, we ensure that
// `check_multi_interp_extensions` is always `1`, even in the legacy config.
#ifdef Py_GIL_DISABLED
# define _PyInterpreterConfig_LEGACY_CHECK_MULTI_INTERP_EXTENSIONS 1
#else
# define _PyInterpreterConfig_LEGACY_CHECK_MULTI_INTERP_EXTENSIONS 0
#endif
#define _PyInterpreterConfig_LEGACY_INIT \
{ \
.use_main_obmalloc = 1, \
.allow_fork = 1, \
.allow_exec = 1, \
.allow_threads = 1, \
.allow_daemon_threads = 1, \
.check_multi_interp_extensions = _PyInterpreterConfig_LEGACY_CHECK_MULTI_INTERP_EXTENSIONS, \
.gil = PyInterpreterConfig_SHARED_GIL, \
}
PyAPI_FUNC(PyStatus) Py_NewInterpreterFromConfig(
PyThreadState **tstate_p,
const PyInterpreterConfig *config);
typedef void (*atexit_datacallbackfunc)(void *);
PyAPI_FUNC(int) PyUnstable_AtExit(
PyInterpreterState *, atexit_datacallbackfunc, void *);

View File

@@ -0,0 +1,84 @@
#ifndef Py_CPYTHON_PYMEM_H
# error "this header file must not be included directly"
#endif
typedef enum {
/* PyMem_RawMalloc(), PyMem_RawRealloc() and PyMem_RawFree() */
PYMEM_DOMAIN_RAW,
/* PyMem_Malloc(), PyMem_Realloc() and PyMem_Free() */
PYMEM_DOMAIN_MEM,
/* PyObject_Malloc(), PyObject_Realloc() and PyObject_Free() */
PYMEM_DOMAIN_OBJ
} PyMemAllocatorDomain;
typedef enum {
PYMEM_ALLOCATOR_NOT_SET = 0,
PYMEM_ALLOCATOR_DEFAULT = 1,
PYMEM_ALLOCATOR_DEBUG = 2,
PYMEM_ALLOCATOR_MALLOC = 3,
PYMEM_ALLOCATOR_MALLOC_DEBUG = 4,
#ifdef WITH_PYMALLOC
PYMEM_ALLOCATOR_PYMALLOC = 5,
PYMEM_ALLOCATOR_PYMALLOC_DEBUG = 6,
#endif
#ifdef WITH_MIMALLOC
PYMEM_ALLOCATOR_MIMALLOC = 7,
PYMEM_ALLOCATOR_MIMALLOC_DEBUG = 8,
#endif
} PyMemAllocatorName;
typedef struct {
/* user context passed as the first argument to the 4 functions */
void *ctx;
/* allocate a memory block */
void* (*malloc) (void *ctx, size_t size);
/* allocate a memory block initialized by zeros */
void* (*calloc) (void *ctx, size_t nelem, size_t elsize);
/* allocate or resize a memory block */
void* (*realloc) (void *ctx, void *ptr, size_t new_size);
/* release a memory block */
void (*free) (void *ctx, void *ptr);
} PyMemAllocatorEx;
/* Get the memory block allocator of the specified domain. */
PyAPI_FUNC(void) PyMem_GetAllocator(PyMemAllocatorDomain domain,
PyMemAllocatorEx *allocator);
/* Set the memory block allocator of the specified domain.
The new allocator must return a distinct non-NULL pointer when requesting
zero bytes.
For the PYMEM_DOMAIN_RAW domain, the allocator must be thread-safe: the GIL
is not held when the allocator is called.
If the new allocator is not a hook (don't call the previous allocator), the
PyMem_SetupDebugHooks() function must be called to reinstall the debug hooks
on top on the new allocator. */
PyAPI_FUNC(void) PyMem_SetAllocator(PyMemAllocatorDomain domain,
PyMemAllocatorEx *allocator);
/* Setup hooks to detect bugs in the following Python memory allocator
functions:
- PyMem_RawMalloc(), PyMem_RawRealloc(), PyMem_RawFree()
- PyMem_Malloc(), PyMem_Realloc(), PyMem_Free()
- PyObject_Malloc(), PyObject_Realloc() and PyObject_Free()
Newly allocated memory is filled with the byte 0xCB, freed memory is filled
with the byte 0xDB. Additional checks:
- detect API violations, ex: PyObject_Free() called on a buffer allocated
by PyMem_Malloc()
- detect write before the start of the buffer (buffer underflow)
- detect write after the end of the buffer (buffer overflow)
The function does nothing if Python is not compiled is debug mode. */
PyAPI_FUNC(void) PyMem_SetupDebugHooks(void);

View File

@@ -0,0 +1,277 @@
#ifndef Py_CPYTHON_PYSTATE_H
# error "this header file must not be included directly"
#endif
/* private interpreter helpers */
PyAPI_FUNC(int) _PyInterpreterState_RequiresIDRef(PyInterpreterState *);
PyAPI_FUNC(void) _PyInterpreterState_RequireIDRef(PyInterpreterState *, int);
PyAPI_FUNC(PyObject *) PyUnstable_InterpreterState_GetMainModule(PyInterpreterState *);
/* State unique per thread */
/* Py_tracefunc return -1 when raising an exception, or 0 for success. */
typedef int (*Py_tracefunc)(PyObject *, PyFrameObject *, int, PyObject *);
/* The following values are used for 'what' for tracefunc functions
*
* To add a new kind of trace event, also update "trace_init" in
* Python/sysmodule.c to define the Python level event name
*/
#define PyTrace_CALL 0
#define PyTrace_EXCEPTION 1
#define PyTrace_LINE 2
#define PyTrace_RETURN 3
#define PyTrace_C_CALL 4
#define PyTrace_C_EXCEPTION 5
#define PyTrace_C_RETURN 6
#define PyTrace_OPCODE 7
typedef struct _err_stackitem {
/* This struct represents a single execution context where we might
* be currently handling an exception. It is a per-coroutine state
* (coroutine in the computer science sense, including the thread
* and generators).
*
* This is used as an entry on the exception stack, where each
* entry indicates if it is currently handling an exception.
* This ensures that the exception state is not impacted
* by "yields" from an except handler. The thread
* always has an entry (the bottom-most one).
*/
/* The exception currently being handled in this context, if any. */
PyObject *exc_value;
struct _err_stackitem *previous_item;
} _PyErr_StackItem;
typedef struct _stack_chunk {
struct _stack_chunk *previous;
size_t size;
size_t top;
PyObject * data[1]; /* Variable sized */
} _PyStackChunk;
struct _ts {
/* See Python/ceval.c for comments explaining most fields */
PyThreadState *prev;
PyThreadState *next;
PyInterpreterState *interp;
/* The global instrumentation version in high bits, plus flags indicating
when to break out of the interpreter loop in lower bits. See details in
pycore_ceval.h. */
uintptr_t eval_breaker;
struct {
/* Has been initialized to a safe state.
In order to be effective, this must be set to 0 during or right
after allocation. */
unsigned int initialized:1;
/* Has been bound to an OS thread. */
unsigned int bound:1;
/* Has been unbound from its OS thread. */
unsigned int unbound:1;
/* Has been bound aa current for the GILState API. */
unsigned int bound_gilstate:1;
/* Currently in use (maybe holds the GIL). */
unsigned int active:1;
/* Currently holds the GIL. */
unsigned int holds_gil:1;
/* various stages of finalization */
unsigned int finalizing:1;
unsigned int cleared:1;
unsigned int finalized:1;
/* padding to align to 4 bytes */
unsigned int :23;
} _status;
#ifdef Py_BUILD_CORE
# define _PyThreadState_WHENCE_NOTSET -1
# define _PyThreadState_WHENCE_UNKNOWN 0
# define _PyThreadState_WHENCE_INIT 1
# define _PyThreadState_WHENCE_FINI 2
# define _PyThreadState_WHENCE_THREADING 3
# define _PyThreadState_WHENCE_GILSTATE 4
# define _PyThreadState_WHENCE_EXEC 5
#endif
int _whence;
/* Thread state (_Py_THREAD_ATTACHED, _Py_THREAD_DETACHED, _Py_THREAD_SUSPENDED).
See Include/internal/pycore_pystate.h for more details. */
int state;
int py_recursion_remaining;
int py_recursion_limit;
int c_recursion_remaining;
int recursion_headroom; /* Allow 50 more calls to handle any errors. */
/* 'tracing' keeps track of the execution depth when tracing/profiling.
This is to prevent the actual trace/profile code from being recorded in
the trace/profile. */
int tracing;
int what_event; /* The event currently being monitored, if any. */
/* Pointer to currently executing frame. */
struct _PyInterpreterFrame *current_frame;
Py_tracefunc c_profilefunc;
Py_tracefunc c_tracefunc;
PyObject *c_profileobj;
PyObject *c_traceobj;
/* The exception currently being raised */
PyObject *current_exception;
/* Pointer to the top of the exception stack for the exceptions
* we may be currently handling. (See _PyErr_StackItem above.)
* This is never NULL. */
_PyErr_StackItem *exc_info;
PyObject *dict; /* Stores per-thread state */
int gilstate_counter;
PyObject *async_exc; /* Asynchronous exception to raise */
unsigned long thread_id; /* Thread id where this tstate was created */
/* Native thread id where this tstate was created. This will be 0 except on
* those platforms that have the notion of native thread id, for which the
* macro PY_HAVE_THREAD_NATIVE_ID is then defined.
*/
unsigned long native_thread_id;
PyObject *delete_later;
/* Tagged pointer to top-most critical section, or zero if there is no
* active critical section. Critical sections are only used in
* `--disable-gil` builds (i.e., when Py_GIL_DISABLED is defined to 1). In the
* default build, this field is always zero.
*/
uintptr_t critical_section;
int coroutine_origin_tracking_depth;
PyObject *async_gen_firstiter;
PyObject *async_gen_finalizer;
PyObject *context;
uint64_t context_ver;
/* Unique thread state id. */
uint64_t id;
_PyStackChunk *datastack_chunk;
PyObject **datastack_top;
PyObject **datastack_limit;
/* XXX signal handlers should also be here */
/* The following fields are here to avoid allocation during init.
The data is exposed through PyThreadState pointer fields.
These fields should not be accessed directly outside of init.
This is indicated by an underscore prefix on the field names.
All other PyInterpreterState pointer fields are populated when
needed and default to NULL.
*/
// Note some fields do not have a leading underscore for backward
// compatibility. See https://bugs.python.org/issue45953#msg412046.
/* The thread's exception stack entry. (Always the last entry.) */
_PyErr_StackItem exc_state;
PyObject *previous_executor;
uint64_t dict_global_version;
/* Used to store/retrieve `threading.local` keys/values for this thread */
PyObject *threading_local_key;
/* Used by `threading.local`s to be remove keys/values for dying threads.
The PyThreadObject must hold the only reference to this value.
*/
PyObject *threading_local_sentinel;
};
#ifdef Py_DEBUG
// A debug build is likely built with low optimization level which implies
// higher stack memory usage than a release build: use a lower limit.
# define Py_C_RECURSION_LIMIT 500
#elif defined(__s390x__)
# define Py_C_RECURSION_LIMIT 800
#elif defined(_WIN32) && defined(_M_ARM64)
# define Py_C_RECURSION_LIMIT 1000
#elif defined(_WIN32)
# define Py_C_RECURSION_LIMIT 3000
#elif defined(__ANDROID__)
// On an ARM64 emulator, API level 34 was OK with 10000, but API level 21
// crashed in test_compiler_recursion_limit.
# define Py_C_RECURSION_LIMIT 3000
#elif defined(_Py_ADDRESS_SANITIZER)
# define Py_C_RECURSION_LIMIT 4000
#elif defined(__wasi__)
// Based on wasmtime 16.
# define Py_C_RECURSION_LIMIT 5000
#else
// This value is duplicated in Lib/test/support/__init__.py
# define Py_C_RECURSION_LIMIT 10000
#endif
/* other API */
/* Similar to PyThreadState_Get(), but don't issue a fatal error
* if it is NULL. */
PyAPI_FUNC(PyThreadState *) PyThreadState_GetUnchecked(void);
// Alias kept for backward compatibility
#define _PyThreadState_UncheckedGet PyThreadState_GetUnchecked
// Disable tracing and profiling.
PyAPI_FUNC(void) PyThreadState_EnterTracing(PyThreadState *tstate);
// Reset tracing and profiling: enable them if a trace function or a profile
// function is set, otherwise disable them.
PyAPI_FUNC(void) PyThreadState_LeaveTracing(PyThreadState *tstate);
/* PyGILState */
/* Helper/diagnostic function - return 1 if the current thread
currently holds the GIL, 0 otherwise.
The function returns 1 if _PyGILState_check_enabled is non-zero. */
PyAPI_FUNC(int) PyGILState_Check(void);
/* The implementation of sys._current_frames() Returns a dict mapping
thread id to that thread's current frame.
*/
PyAPI_FUNC(PyObject*) _PyThread_CurrentFrames(void);
/* Routines for advanced debuggers, requested by David Beazley.
Don't use unless you know what you are doing! */
PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_Main(void);
PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_Head(void);
PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_Next(PyInterpreterState *);
PyAPI_FUNC(PyThreadState *) PyInterpreterState_ThreadHead(PyInterpreterState *);
PyAPI_FUNC(PyThreadState *) PyThreadState_Next(PyThreadState *);
PyAPI_FUNC(void) PyThreadState_DeleteCurrent(void);
/* Frame evaluation API */
typedef PyObject* (*_PyFrameEvalFunction)(PyThreadState *tstate, struct _PyInterpreterFrame *, int);
PyAPI_FUNC(_PyFrameEvalFunction) _PyInterpreterState_GetEvalFrameFunc(
PyInterpreterState *interp);
PyAPI_FUNC(void) _PyInterpreterState_SetEvalFrameFunc(
PyInterpreterState *interp,
_PyFrameEvalFunction eval_frame);

View File

@@ -0,0 +1,175 @@
// Statistics on Python performance.
//
// API:
//
// - _Py_INCREF_STAT_INC() and _Py_DECREF_STAT_INC() used by Py_INCREF()
// and Py_DECREF().
// - _Py_stats variable
//
// Functions of the sys module:
//
// - sys._stats_on()
// - sys._stats_off()
// - sys._stats_clear()
// - sys._stats_dump()
//
// Python must be built with ./configure --enable-pystats to define the
// Py_STATS macro.
//
// Define _PY_INTERPRETER macro to increment interpreter_increfs and
// interpreter_decrefs. Otherwise, increment increfs and decrefs.
//
// The number of incref operations counted by `incref` and
// `interpreter_incref` is the number of increment operations, which is
// not equal to the total of all reference counts. A single increment
// operation may increase the reference count of an object by more than
// one. For example, see `_Py_RefcntAdd`.
#ifndef Py_CPYTHON_PYSTATS_H
# error "this header file must not be included directly"
#endif
#define PYSTATS_MAX_UOP_ID 512
#define SPECIALIZATION_FAILURE_KINDS 36
/* Stats for determining who is calling PyEval_EvalFrame */
#define EVAL_CALL_TOTAL 0
#define EVAL_CALL_VECTOR 1
#define EVAL_CALL_GENERATOR 2
#define EVAL_CALL_LEGACY 3
#define EVAL_CALL_FUNCTION_VECTORCALL 4
#define EVAL_CALL_BUILD_CLASS 5
#define EVAL_CALL_SLOT 6
#define EVAL_CALL_FUNCTION_EX 7
#define EVAL_CALL_API 8
#define EVAL_CALL_METHOD 9
#define EVAL_CALL_KINDS 10
typedef struct _specialization_stats {
uint64_t success;
uint64_t failure;
uint64_t hit;
uint64_t deferred;
uint64_t miss;
uint64_t deopt;
uint64_t failure_kinds[SPECIALIZATION_FAILURE_KINDS];
} SpecializationStats;
typedef struct _opcode_stats {
SpecializationStats specialization;
uint64_t execution_count;
uint64_t pair_count[256];
} OpcodeStats;
typedef struct _call_stats {
uint64_t inlined_py_calls;
uint64_t pyeval_calls;
uint64_t frames_pushed;
uint64_t frame_objects_created;
uint64_t eval_calls[EVAL_CALL_KINDS];
} CallStats;
typedef struct _object_stats {
uint64_t increfs;
uint64_t decrefs;
uint64_t interpreter_increfs;
uint64_t interpreter_decrefs;
uint64_t allocations;
uint64_t allocations512;
uint64_t allocations4k;
uint64_t allocations_big;
uint64_t frees;
uint64_t to_freelist;
uint64_t from_freelist;
uint64_t inline_values;
uint64_t dict_materialized_on_request;
uint64_t dict_materialized_new_key;
uint64_t dict_materialized_too_big;
uint64_t dict_materialized_str_subclass;
uint64_t type_cache_hits;
uint64_t type_cache_misses;
uint64_t type_cache_dunder_hits;
uint64_t type_cache_dunder_misses;
uint64_t type_cache_collisions;
/* Temporary value used during GC */
uint64_t object_visits;
} ObjectStats;
typedef struct _gc_stats {
uint64_t collections;
uint64_t object_visits;
uint64_t objects_collected;
} GCStats;
typedef struct _uop_stats {
uint64_t execution_count;
uint64_t miss;
uint64_t pair_count[PYSTATS_MAX_UOP_ID + 1];
} UOpStats;
#define _Py_UOP_HIST_SIZE 32
typedef struct _optimization_stats {
uint64_t attempts;
uint64_t traces_created;
uint64_t traces_executed;
uint64_t uops_executed;
uint64_t trace_stack_overflow;
uint64_t trace_stack_underflow;
uint64_t trace_too_long;
uint64_t trace_too_short;
uint64_t inner_loop;
uint64_t recursive_call;
uint64_t low_confidence;
uint64_t executors_invalidated;
UOpStats opcode[PYSTATS_MAX_UOP_ID + 1];
uint64_t unsupported_opcode[256];
uint64_t trace_length_hist[_Py_UOP_HIST_SIZE];
uint64_t trace_run_length_hist[_Py_UOP_HIST_SIZE];
uint64_t optimized_trace_length_hist[_Py_UOP_HIST_SIZE];
uint64_t optimizer_attempts;
uint64_t optimizer_successes;
uint64_t optimizer_failure_reason_no_memory;
uint64_t remove_globals_builtins_changed;
uint64_t remove_globals_incorrect_keys;
uint64_t error_in_opcode[PYSTATS_MAX_UOP_ID + 1];
} OptimizationStats;
typedef struct _rare_event_stats {
/* Setting an object's class, obj.__class__ = ... */
uint64_t set_class;
/* Setting the bases of a class, cls.__bases__ = ... */
uint64_t set_bases;
/* Setting the PEP 523 frame eval function, _PyInterpreterState_SetFrameEvalFunc() */
uint64_t set_eval_frame_func;
/* Modifying the builtins, __builtins__.__dict__[var] = ... */
uint64_t builtin_dict;
/* Modifying a function, e.g. func.__defaults__ = ..., etc. */
uint64_t func_modification;
/* Modifying a dict that is being watched */
uint64_t watched_dict_modification;
uint64_t watched_globals_modification;
} RareEventStats;
typedef struct _stats {
OpcodeStats opcode_stats[256];
CallStats call_stats;
ObjectStats object_stats;
OptimizationStats optimization_stats;
RareEventStats rare_event_stats;
GCStats *gc_stats;
} PyStats;
// Export for shared extensions like 'math'
PyAPI_DATA(PyStats*) _Py_stats;
#ifdef _PY_INTERPRETER
# define _Py_INCREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.interpreter_increfs++; } while (0)
# define _Py_DECREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.interpreter_decrefs++; } while (0)
#else
# define _Py_INCREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.increfs++; } while (0)
# define _Py_DECREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.decrefs++; } while (0)
#endif

View File

@@ -0,0 +1,96 @@
#ifndef Py_CPYTHON_PYTHONRUN_H
# error "this header file must not be included directly"
#endif
PyAPI_FUNC(int) PyRun_SimpleStringFlags(const char *, PyCompilerFlags *);
PyAPI_FUNC(int) PyRun_AnyFileExFlags(
FILE *fp,
const char *filename, /* decoded from the filesystem encoding */
int closeit,
PyCompilerFlags *flags);
PyAPI_FUNC(int) PyRun_SimpleFileExFlags(
FILE *fp,
const char *filename, /* decoded from the filesystem encoding */
int closeit,
PyCompilerFlags *flags);
PyAPI_FUNC(int) PyRun_InteractiveOneFlags(
FILE *fp,
const char *filename, /* decoded from the filesystem encoding */
PyCompilerFlags *flags);
PyAPI_FUNC(int) PyRun_InteractiveOneObject(
FILE *fp,
PyObject *filename,
PyCompilerFlags *flags);
PyAPI_FUNC(int) PyRun_InteractiveLoopFlags(
FILE *fp,
const char *filename, /* decoded from the filesystem encoding */
PyCompilerFlags *flags);
PyAPI_FUNC(PyObject *) PyRun_StringFlags(const char *, int, PyObject *,
PyObject *, PyCompilerFlags *);
PyAPI_FUNC(PyObject *) PyRun_FileExFlags(
FILE *fp,
const char *filename, /* decoded from the filesystem encoding */
int start,
PyObject *globals,
PyObject *locals,
int closeit,
PyCompilerFlags *flags);
PyAPI_FUNC(PyObject *) Py_CompileStringExFlags(
const char *str,
const char *filename, /* decoded from the filesystem encoding */
int start,
PyCompilerFlags *flags,
int optimize);
PyAPI_FUNC(PyObject *) Py_CompileStringObject(
const char *str,
PyObject *filename, int start,
PyCompilerFlags *flags,
int optimize);
#define Py_CompileString(str, p, s) Py_CompileStringExFlags((str), (p), (s), NULL, -1)
#define Py_CompileStringFlags(str, p, s, f) Py_CompileStringExFlags((str), (p), (s), (f), -1)
/* A function flavor is also exported by libpython. It is required when
libpython is accessed directly rather than using header files which defines
macros below. On Windows, for example, PyAPI_FUNC() uses dllexport to
export functions in pythonXX.dll. */
PyAPI_FUNC(PyObject *) PyRun_String(const char *str, int s, PyObject *g, PyObject *l);
PyAPI_FUNC(int) PyRun_AnyFile(FILE *fp, const char *name);
PyAPI_FUNC(int) PyRun_AnyFileEx(FILE *fp, const char *name, int closeit);
PyAPI_FUNC(int) PyRun_AnyFileFlags(FILE *, const char *, PyCompilerFlags *);
PyAPI_FUNC(int) PyRun_SimpleString(const char *s);
PyAPI_FUNC(int) PyRun_SimpleFile(FILE *f, const char *p);
PyAPI_FUNC(int) PyRun_SimpleFileEx(FILE *f, const char *p, int c);
PyAPI_FUNC(int) PyRun_InteractiveOne(FILE *f, const char *p);
PyAPI_FUNC(int) PyRun_InteractiveLoop(FILE *f, const char *p);
PyAPI_FUNC(PyObject *) PyRun_File(FILE *fp, const char *p, int s, PyObject *g, PyObject *l);
PyAPI_FUNC(PyObject *) PyRun_FileEx(FILE *fp, const char *p, int s, PyObject *g, PyObject *l, int c);
PyAPI_FUNC(PyObject *) PyRun_FileFlags(FILE *fp, const char *p, int s, PyObject *g, PyObject *l, PyCompilerFlags *flags);
/* Use macros for a bunch of old variants */
#define PyRun_String(str, s, g, l) PyRun_StringFlags((str), (s), (g), (l), NULL)
#define PyRun_AnyFile(fp, name) PyRun_AnyFileExFlags((fp), (name), 0, NULL)
#define PyRun_AnyFileEx(fp, name, closeit) \
PyRun_AnyFileExFlags((fp), (name), (closeit), NULL)
#define PyRun_AnyFileFlags(fp, name, flags) \
PyRun_AnyFileExFlags((fp), (name), 0, (flags))
#define PyRun_SimpleString(s) PyRun_SimpleStringFlags((s), NULL)
#define PyRun_SimpleFile(f, p) PyRun_SimpleFileExFlags((f), (p), 0, NULL)
#define PyRun_SimpleFileEx(f, p, c) PyRun_SimpleFileExFlags((f), (p), (c), NULL)
#define PyRun_InteractiveOne(f, p) PyRun_InteractiveOneFlags((f), (p), NULL)
#define PyRun_InteractiveLoop(f, p) PyRun_InteractiveLoopFlags((f), (p), NULL)
#define PyRun_File(fp, p, s, g, l) \
PyRun_FileExFlags((fp), (p), (s), (g), (l), 0, NULL)
#define PyRun_FileEx(fp, p, s, g, l, c) \
PyRun_FileExFlags((fp), (p), (s), (g), (l), (c), NULL)
#define PyRun_FileFlags(fp, p, s, g, l, flags) \
PyRun_FileExFlags((fp), (p), (s), (g), (l), 0, (flags))
/* Stuff with no proper home (yet) */
PyAPI_FUNC(char *) PyOS_Readline(FILE *, FILE *, const char *);
PyAPI_DATA(char) *(*PyOS_ReadlineFunctionPointer)(FILE *, FILE *, const char *);

View File

@@ -0,0 +1,43 @@
#ifndef Py_CPYTHON_PYTHREAD_H
# error "this header file must not be included directly"
#endif
// PY_TIMEOUT_MAX is the highest usable value (in microseconds) of PY_TIMEOUT_T
// type, and depends on the system threading API.
//
// NOTE: this isn't the same value as `_thread.TIMEOUT_MAX`. The _thread module
// exposes a higher-level API, with timeouts expressed in seconds and
// floating-point numbers allowed.
PyAPI_DATA(const long long) PY_TIMEOUT_MAX;
#define PYTHREAD_INVALID_THREAD_ID ((unsigned long)-1)
#ifdef HAVE_PTHREAD_H
/* Darwin needs pthread.h to know type name the pthread_key_t. */
# include <pthread.h>
# define NATIVE_TSS_KEY_T pthread_key_t
#elif defined(NT_THREADS)
/* In Windows, native TSS key type is DWORD,
but hardcode the unsigned long to avoid errors for include directive.
*/
# define NATIVE_TSS_KEY_T unsigned long
#elif defined(HAVE_PTHREAD_STUBS)
# include "pthread_stubs.h"
# define NATIVE_TSS_KEY_T pthread_key_t
#else
# error "Require native threads. See https://bugs.python.org/issue31370"
#endif
/* When Py_LIMITED_API is not defined, the type layout of Py_tss_t is
exposed to allow static allocation in the API clients. Even in this case,
you must handle TSS keys through API functions due to compatibility.
*/
struct _Py_tss_t {
int _is_initialized;
NATIVE_TSS_KEY_T _key;
};
#undef NATIVE_TSS_KEY_T
/* When static allocation, you must initialize with Py_tss_NEEDS_INIT. */
#define Py_tss_NEEDS_INIT {0}

View File

@@ -0,0 +1,27 @@
// PyTime_t C API: see Doc/c-api/time.rst for the documentation.
#ifndef Py_LIMITED_API
#ifndef Py_PYTIME_H
#define Py_PYTIME_H
#ifdef __cplusplus
extern "C" {
#endif
typedef int64_t PyTime_t;
#define PyTime_MIN INT64_MIN
#define PyTime_MAX INT64_MAX
PyAPI_FUNC(double) PyTime_AsSecondsDouble(PyTime_t t);
PyAPI_FUNC(int) PyTime_Monotonic(PyTime_t *result);
PyAPI_FUNC(int) PyTime_PerfCounter(PyTime_t *result);
PyAPI_FUNC(int) PyTime_Time(PyTime_t *result);
PyAPI_FUNC(int) PyTime_MonotonicRaw(PyTime_t *result);
PyAPI_FUNC(int) PyTime_PerfCounterRaw(PyTime_t *result);
PyAPI_FUNC(int) PyTime_TimeRaw(PyTime_t *result);
#ifdef __cplusplus
}
#endif
#endif /* Py_PYTIME_H */
#endif /* Py_LIMITED_API */

View File

@@ -0,0 +1,71 @@
#ifndef Py_CPYTHON_SETOBJECT_H
# error "this header file must not be included directly"
#endif
/* There are three kinds of entries in the table:
1. Unused: key == NULL and hash == 0
2. Dummy: key == dummy and hash == -1
3. Active: key != NULL and key != dummy and hash != -1
The hash field of Unused slots is always zero.
The hash field of Dummy slots are set to -1
meaning that dummy entries can be detected by
either entry->key==dummy or by entry->hash==-1.
*/
#define PySet_MINSIZE 8
typedef struct {
PyObject *key;
Py_hash_t hash; /* Cached hash code of the key */
} setentry;
/* The SetObject data structure is shared by set and frozenset objects.
Invariant for sets:
- hash is -1
Invariants for frozensets:
- data is immutable.
- hash is the hash of the frozenset or -1 if not computed yet.
*/
typedef struct {
PyObject_HEAD
Py_ssize_t fill; /* Number active and dummy entries*/
Py_ssize_t used; /* Number active entries */
/* The table contains mask + 1 slots, and that's a power of 2.
* We store the mask instead of the size because the mask is more
* frequently needed.
*/
Py_ssize_t mask;
/* The table points to a fixed-size smalltable for small tables
* or to additional malloc'ed memory for bigger tables.
* The table pointer is never NULL which saves us from repeated
* runtime null-tests.
*/
setentry *table;
Py_hash_t hash; /* Only used by frozenset objects */
Py_ssize_t finger; /* Search finger for pop() */
setentry smalltable[PySet_MINSIZE];
PyObject *weakreflist; /* List of weak references */
} PySetObject;
#define _PySet_CAST(so) \
(assert(PyAnySet_Check(so)), _Py_CAST(PySetObject*, so))
static inline Py_ssize_t PySet_GET_SIZE(PyObject *so) {
#ifdef Py_GIL_DISABLED
return _Py_atomic_load_ssize_relaxed(&(_PySet_CAST(so)->used));
#else
return _PySet_CAST(so)->used;
#endif
}
#define PySet_GET_SIZE(so) PySet_GET_SIZE(_PyObject_CAST(so))

View File

@@ -0,0 +1,22 @@
#ifndef Py_CPYTHON_SYSMODULE_H
# error "this header file must not be included directly"
#endif
typedef int(*Py_AuditHookFunction)(const char *, PyObject *, void *);
PyAPI_FUNC(int) PySys_AddAuditHook(Py_AuditHookFunction, void*);
typedef struct {
FILE* perf_map;
PyThread_type_lock map_lock;
} PerfMapState;
PyAPI_FUNC(int) PyUnstable_PerfMapState_Init(void);
PyAPI_FUNC(int) PyUnstable_WritePerfMapEntry(
const void *code_addr,
unsigned int code_size,
const char *entry_name);
PyAPI_FUNC(void) PyUnstable_PerfMapState_Fini(void);
PyAPI_FUNC(int) PyUnstable_CopyPerfMapFile(const char* parent_filename);
PyAPI_FUNC(int) PyUnstable_PerfTrampoline_CompileCode(PyCodeObject *);
PyAPI_FUNC(int) PyUnstable_PerfTrampoline_SetPersistAfterFork(int enable);

View File

@@ -0,0 +1,13 @@
#ifndef Py_CPYTHON_TRACEBACK_H
# error "this header file must not be included directly"
#endif
typedef struct _traceback PyTracebackObject;
struct _traceback {
PyObject_HEAD
PyTracebackObject *tb_next;
PyFrameObject *tb_frame;
int tb_lasti;
int tb_lineno;
};

View File

@@ -0,0 +1,32 @@
#ifndef Py_LIMITED_API
#ifndef Py_TRACEMALLOC_H
#define Py_TRACEMALLOC_H
#ifdef __cplusplus
extern "C" {
#endif
/* Track an allocated memory block in the tracemalloc module.
Return 0 on success, return -1 on error (failed to allocate memory to store
the trace).
Return -2 if tracemalloc is disabled.
If memory block is already tracked, update the existing trace. */
PyAPI_FUNC(int) PyTraceMalloc_Track(
unsigned int domain,
uintptr_t ptr,
size_t size);
/* Untrack an allocated memory block in the tracemalloc module.
Do nothing if the block was not tracked.
Return -2 if tracemalloc is disabled, otherwise return 0. */
PyAPI_FUNC(int) PyTraceMalloc_Untrack(
unsigned int domain,
uintptr_t ptr);
#ifdef __cplusplus
}
#endif
#endif // !Py_TRACEMALLOC_H
#endif // !Py_LIMITED_API

View File

@@ -0,0 +1,38 @@
#ifndef Py_CPYTHON_TUPLEOBJECT_H
# error "this header file must not be included directly"
#endif
typedef struct {
PyObject_VAR_HEAD
/* ob_item contains space for 'ob_size' elements.
Items must normally not be NULL, except during construction when
the tuple is not yet visible outside the function that builds it. */
PyObject *ob_item[1];
} PyTupleObject;
PyAPI_FUNC(int) _PyTuple_Resize(PyObject **, Py_ssize_t);
/* Cast argument to PyTupleObject* type. */
#define _PyTuple_CAST(op) \
(assert(PyTuple_Check(op)), _Py_CAST(PyTupleObject*, (op)))
// Macros and static inline functions, trading safety for speed
static inline Py_ssize_t PyTuple_GET_SIZE(PyObject *op) {
PyTupleObject *tuple = _PyTuple_CAST(op);
return Py_SIZE(tuple);
}
#define PyTuple_GET_SIZE(op) PyTuple_GET_SIZE(_PyObject_CAST(op))
#define PyTuple_GET_ITEM(op, index) (_PyTuple_CAST(op)->ob_item[(index)])
/* Function *only* to be used to fill in brand new tuples */
static inline void
PyTuple_SET_ITEM(PyObject *op, Py_ssize_t index, PyObject *value) {
PyTupleObject *tuple = _PyTuple_CAST(op);
assert(0 <= index);
assert(index < Py_SIZE(tuple));
tuple->ob_item[index] = value;
}
#define PyTuple_SET_ITEM(op, index, value) \
PyTuple_SET_ITEM(_PyObject_CAST(op), (index), _PyObject_CAST(value))

View File

@@ -0,0 +1,703 @@
#ifndef Py_CPYTHON_UNICODEOBJECT_H
# error "this header file must not be included directly"
#endif
/* Py_UNICODE was the native Unicode storage format (code unit) used by
Python and represents a single Unicode element in the Unicode type.
With PEP 393, Py_UNICODE is deprecated and replaced with a
typedef to wchar_t. */
Py_DEPRECATED(3.13) typedef wchar_t PY_UNICODE_TYPE;
Py_DEPRECATED(3.13) typedef wchar_t Py_UNICODE;
/* --- Internal Unicode Operations ---------------------------------------- */
// Static inline functions to work with surrogates
static inline int Py_UNICODE_IS_SURROGATE(Py_UCS4 ch) {
return (0xD800 <= ch && ch <= 0xDFFF);
}
static inline int Py_UNICODE_IS_HIGH_SURROGATE(Py_UCS4 ch) {
return (0xD800 <= ch && ch <= 0xDBFF);
}
static inline int Py_UNICODE_IS_LOW_SURROGATE(Py_UCS4 ch) {
return (0xDC00 <= ch && ch <= 0xDFFF);
}
// Join two surrogate characters and return a single Py_UCS4 value.
static inline Py_UCS4 Py_UNICODE_JOIN_SURROGATES(Py_UCS4 high, Py_UCS4 low) {
assert(Py_UNICODE_IS_HIGH_SURROGATE(high));
assert(Py_UNICODE_IS_LOW_SURROGATE(low));
return 0x10000 + (((high & 0x03FF) << 10) | (low & 0x03FF));
}
// High surrogate = top 10 bits added to 0xD800.
// The character must be in the range [U+10000; U+10ffff].
static inline Py_UCS4 Py_UNICODE_HIGH_SURROGATE(Py_UCS4 ch) {
assert(0x10000 <= ch && ch <= 0x10ffff);
return (0xD800 - (0x10000 >> 10) + (ch >> 10));
}
// Low surrogate = bottom 10 bits added to 0xDC00.
// The character must be in the range [U+10000; U+10ffff].
static inline Py_UCS4 Py_UNICODE_LOW_SURROGATE(Py_UCS4 ch) {
assert(0x10000 <= ch && ch <= 0x10ffff);
return (0xDC00 + (ch & 0x3FF));
}
/* --- Unicode Type ------------------------------------------------------- */
/* ASCII-only strings created through PyUnicode_New use the PyASCIIObject
structure. state.ascii and state.compact are set, and the data
immediately follow the structure. utf8_length can be found
in the length field; the utf8 pointer is equal to the data pointer. */
typedef struct {
/* There are 4 forms of Unicode strings:
- compact ascii:
* structure = PyASCIIObject
* test: PyUnicode_IS_COMPACT_ASCII(op)
* kind = PyUnicode_1BYTE_KIND
* compact = 1
* ascii = 1
* (length is the length of the utf8)
* (data starts just after the structure)
* (since ASCII is decoded from UTF-8, the utf8 string are the data)
- compact:
* structure = PyCompactUnicodeObject
* test: PyUnicode_IS_COMPACT(op) && !PyUnicode_IS_ASCII(op)
* kind = PyUnicode_1BYTE_KIND, PyUnicode_2BYTE_KIND or
PyUnicode_4BYTE_KIND
* compact = 1
* ascii = 0
* utf8 is not shared with data
* utf8_length = 0 if utf8 is NULL
* (data starts just after the structure)
- legacy string:
* structure = PyUnicodeObject structure
* test: !PyUnicode_IS_COMPACT(op)
* kind = PyUnicode_1BYTE_KIND, PyUnicode_2BYTE_KIND or
PyUnicode_4BYTE_KIND
* compact = 0
* data.any is not NULL
* utf8 is shared and utf8_length = length with data.any if ascii = 1
* utf8_length = 0 if utf8 is NULL
Compact strings use only one memory block (structure + characters),
whereas legacy strings use one block for the structure and one block
for characters.
Legacy strings are created by subclasses of Unicode.
See also _PyUnicode_CheckConsistency().
*/
PyObject_HEAD
Py_ssize_t length; /* Number of code points in the string */
Py_hash_t hash; /* Hash value; -1 if not set */
struct {
/* If interned is non-zero, the two references from the
dictionary to this object are *not* counted in ob_refcnt.
The possible values here are:
0: Not Interned
1: Interned
2: Interned and Immortal
3: Interned, Immortal, and Static
This categorization allows the runtime to determine the right
cleanup mechanism at runtime shutdown. */
unsigned int interned:2;
/* Character size:
- PyUnicode_1BYTE_KIND (1):
* character type = Py_UCS1 (8 bits, unsigned)
* all characters are in the range U+0000-U+00FF (latin1)
* if ascii is set, all characters are in the range U+0000-U+007F
(ASCII), otherwise at least one character is in the range
U+0080-U+00FF
- PyUnicode_2BYTE_KIND (2):
* character type = Py_UCS2 (16 bits, unsigned)
* all characters are in the range U+0000-U+FFFF (BMP)
* at least one character is in the range U+0100-U+FFFF
- PyUnicode_4BYTE_KIND (4):
* character type = Py_UCS4 (32 bits, unsigned)
* all characters are in the range U+0000-U+10FFFF
* at least one character is in the range U+10000-U+10FFFF
*/
unsigned int kind:3;
/* Compact is with respect to the allocation scheme. Compact unicode
objects only require one memory block while non-compact objects use
one block for the PyUnicodeObject struct and another for its data
buffer. */
unsigned int compact:1;
/* The string only contains characters in the range U+0000-U+007F (ASCII)
and the kind is PyUnicode_1BYTE_KIND. If ascii is set and compact is
set, use the PyASCIIObject structure. */
unsigned int ascii:1;
/* The object is statically allocated. */
unsigned int statically_allocated:1;
/* Padding to ensure that PyUnicode_DATA() is always aligned to
4 bytes (see issue #19537 on m68k). */
unsigned int :24;
} state;
} PyASCIIObject;
/* Non-ASCII strings allocated through PyUnicode_New use the
PyCompactUnicodeObject structure. state.compact is set, and the data
immediately follow the structure. */
typedef struct {
PyASCIIObject _base;
Py_ssize_t utf8_length; /* Number of bytes in utf8, excluding the
* terminating \0. */
char *utf8; /* UTF-8 representation (null-terminated) */
} PyCompactUnicodeObject;
/* Object format for Unicode subclasses. */
typedef struct {
PyCompactUnicodeObject _base;
union {
void *any;
Py_UCS1 *latin1;
Py_UCS2 *ucs2;
Py_UCS4 *ucs4;
} data; /* Canonical, smallest-form Unicode buffer */
} PyUnicodeObject;
#define _PyASCIIObject_CAST(op) \
(assert(PyUnicode_Check(op)), \
_Py_CAST(PyASCIIObject*, (op)))
#define _PyCompactUnicodeObject_CAST(op) \
(assert(PyUnicode_Check(op)), \
_Py_CAST(PyCompactUnicodeObject*, (op)))
#define _PyUnicodeObject_CAST(op) \
(assert(PyUnicode_Check(op)), \
_Py_CAST(PyUnicodeObject*, (op)))
/* --- Flexible String Representation Helper Macros (PEP 393) -------------- */
/* Values for PyASCIIObject.state: */
/* Interning state. */
#define SSTATE_NOT_INTERNED 0
#define SSTATE_INTERNED_MORTAL 1
#define SSTATE_INTERNED_IMMORTAL 2
#define SSTATE_INTERNED_IMMORTAL_STATIC 3
/* Use only if you know it's a string */
static inline unsigned int PyUnicode_CHECK_INTERNED(PyObject *op) {
return _PyASCIIObject_CAST(op)->state.interned;
}
#define PyUnicode_CHECK_INTERNED(op) PyUnicode_CHECK_INTERNED(_PyObject_CAST(op))
/* For backward compatibility */
static inline unsigned int PyUnicode_IS_READY(PyObject* Py_UNUSED(op)) {
return 1;
}
#define PyUnicode_IS_READY(op) PyUnicode_IS_READY(_PyObject_CAST(op))
/* Return true if the string contains only ASCII characters, or 0 if not. The
string may be compact (PyUnicode_IS_COMPACT_ASCII) or not, but must be
ready. */
static inline unsigned int PyUnicode_IS_ASCII(PyObject *op) {
return _PyASCIIObject_CAST(op)->state.ascii;
}
#define PyUnicode_IS_ASCII(op) PyUnicode_IS_ASCII(_PyObject_CAST(op))
/* Return true if the string is compact or 0 if not.
No type checks or Ready calls are performed. */
static inline unsigned int PyUnicode_IS_COMPACT(PyObject *op) {
return _PyASCIIObject_CAST(op)->state.compact;
}
#define PyUnicode_IS_COMPACT(op) PyUnicode_IS_COMPACT(_PyObject_CAST(op))
/* Return true if the string is a compact ASCII string (use PyASCIIObject
structure), or 0 if not. No type checks or Ready calls are performed. */
static inline int PyUnicode_IS_COMPACT_ASCII(PyObject *op) {
return (_PyASCIIObject_CAST(op)->state.ascii && PyUnicode_IS_COMPACT(op));
}
#define PyUnicode_IS_COMPACT_ASCII(op) PyUnicode_IS_COMPACT_ASCII(_PyObject_CAST(op))
enum PyUnicode_Kind {
/* Return values of the PyUnicode_KIND() function: */
PyUnicode_1BYTE_KIND = 1,
PyUnicode_2BYTE_KIND = 2,
PyUnicode_4BYTE_KIND = 4
};
// PyUnicode_KIND(): Return one of the PyUnicode_*_KIND values defined above.
//
// gh-89653: Converting this macro to a static inline function would introduce
// new compiler warnings on "kind < PyUnicode_KIND(str)" (compare signed and
// unsigned numbers) where kind type is an int or on
// "unsigned int kind = PyUnicode_KIND(str)" (cast signed to unsigned).
#define PyUnicode_KIND(op) _Py_RVALUE(_PyASCIIObject_CAST(op)->state.kind)
/* Return a void pointer to the raw unicode buffer. */
static inline void* _PyUnicode_COMPACT_DATA(PyObject *op) {
if (PyUnicode_IS_ASCII(op)) {
return _Py_STATIC_CAST(void*, (_PyASCIIObject_CAST(op) + 1));
}
return _Py_STATIC_CAST(void*, (_PyCompactUnicodeObject_CAST(op) + 1));
}
static inline void* _PyUnicode_NONCOMPACT_DATA(PyObject *op) {
void *data;
assert(!PyUnicode_IS_COMPACT(op));
data = _PyUnicodeObject_CAST(op)->data.any;
assert(data != NULL);
return data;
}
static inline void* PyUnicode_DATA(PyObject *op) {
if (PyUnicode_IS_COMPACT(op)) {
return _PyUnicode_COMPACT_DATA(op);
}
return _PyUnicode_NONCOMPACT_DATA(op);
}
#define PyUnicode_DATA(op) PyUnicode_DATA(_PyObject_CAST(op))
/* Return pointers to the canonical representation cast to unsigned char,
Py_UCS2, or Py_UCS4 for direct character access.
No checks are performed, use PyUnicode_KIND() before to ensure
these will work correctly. */
#define PyUnicode_1BYTE_DATA(op) _Py_STATIC_CAST(Py_UCS1*, PyUnicode_DATA(op))
#define PyUnicode_2BYTE_DATA(op) _Py_STATIC_CAST(Py_UCS2*, PyUnicode_DATA(op))
#define PyUnicode_4BYTE_DATA(op) _Py_STATIC_CAST(Py_UCS4*, PyUnicode_DATA(op))
/* Returns the length of the unicode string. */
static inline Py_ssize_t PyUnicode_GET_LENGTH(PyObject *op) {
return _PyASCIIObject_CAST(op)->length;
}
#define PyUnicode_GET_LENGTH(op) PyUnicode_GET_LENGTH(_PyObject_CAST(op))
/* Write into the canonical representation, this function does not do any sanity
checks and is intended for usage in loops. The caller should cache the
kind and data pointers obtained from other function calls.
index is the index in the string (starts at 0) and value is the new
code point value which should be written to that location. */
static inline void PyUnicode_WRITE(int kind, void *data,
Py_ssize_t index, Py_UCS4 value)
{
assert(index >= 0);
if (kind == PyUnicode_1BYTE_KIND) {
assert(value <= 0xffU);
_Py_STATIC_CAST(Py_UCS1*, data)[index] = _Py_STATIC_CAST(Py_UCS1, value);
}
else if (kind == PyUnicode_2BYTE_KIND) {
assert(value <= 0xffffU);
_Py_STATIC_CAST(Py_UCS2*, data)[index] = _Py_STATIC_CAST(Py_UCS2, value);
}
else {
assert(kind == PyUnicode_4BYTE_KIND);
assert(value <= 0x10ffffU);
_Py_STATIC_CAST(Py_UCS4*, data)[index] = value;
}
}
#define PyUnicode_WRITE(kind, data, index, value) \
PyUnicode_WRITE(_Py_STATIC_CAST(int, kind), _Py_CAST(void*, data), \
(index), _Py_STATIC_CAST(Py_UCS4, value))
/* Read a code point from the string's canonical representation. No checks
or ready calls are performed. */
static inline Py_UCS4 PyUnicode_READ(int kind,
const void *data, Py_ssize_t index)
{
assert(index >= 0);
if (kind == PyUnicode_1BYTE_KIND) {
return _Py_STATIC_CAST(const Py_UCS1*, data)[index];
}
if (kind == PyUnicode_2BYTE_KIND) {
return _Py_STATIC_CAST(const Py_UCS2*, data)[index];
}
assert(kind == PyUnicode_4BYTE_KIND);
return _Py_STATIC_CAST(const Py_UCS4*, data)[index];
}
#define PyUnicode_READ(kind, data, index) \
PyUnicode_READ(_Py_STATIC_CAST(int, kind), \
_Py_STATIC_CAST(const void*, data), \
(index))
/* PyUnicode_READ_CHAR() is less efficient than PyUnicode_READ() because it
calls PyUnicode_KIND() and might call it twice. For single reads, use
PyUnicode_READ_CHAR, for multiple consecutive reads callers should
cache kind and use PyUnicode_READ instead. */
static inline Py_UCS4 PyUnicode_READ_CHAR(PyObject *unicode, Py_ssize_t index)
{
int kind;
assert(index >= 0);
// Tolerate reading the NUL character at str[len(str)]
assert(index <= PyUnicode_GET_LENGTH(unicode));
kind = PyUnicode_KIND(unicode);
if (kind == PyUnicode_1BYTE_KIND) {
return PyUnicode_1BYTE_DATA(unicode)[index];
}
if (kind == PyUnicode_2BYTE_KIND) {
return PyUnicode_2BYTE_DATA(unicode)[index];
}
assert(kind == PyUnicode_4BYTE_KIND);
return PyUnicode_4BYTE_DATA(unicode)[index];
}
#define PyUnicode_READ_CHAR(unicode, index) \
PyUnicode_READ_CHAR(_PyObject_CAST(unicode), (index))
/* Return a maximum character value which is suitable for creating another
string based on op. This is always an approximation but more efficient
than iterating over the string. */
static inline Py_UCS4 PyUnicode_MAX_CHAR_VALUE(PyObject *op)
{
int kind;
if (PyUnicode_IS_ASCII(op)) {
return 0x7fU;
}
kind = PyUnicode_KIND(op);
if (kind == PyUnicode_1BYTE_KIND) {
return 0xffU;
}
if (kind == PyUnicode_2BYTE_KIND) {
return 0xffffU;
}
assert(kind == PyUnicode_4BYTE_KIND);
return 0x10ffffU;
}
#define PyUnicode_MAX_CHAR_VALUE(op) \
PyUnicode_MAX_CHAR_VALUE(_PyObject_CAST(op))
/* === Public API ========================================================= */
/* With PEP 393, this is the recommended way to allocate a new unicode object.
This function will allocate the object and its buffer in a single memory
block. Objects created using this function are not resizable. */
PyAPI_FUNC(PyObject*) PyUnicode_New(
Py_ssize_t size, /* Number of code points in the new string */
Py_UCS4 maxchar /* maximum code point value in the string */
);
/* For backward compatibility */
static inline int PyUnicode_READY(PyObject* Py_UNUSED(op))
{
return 0;
}
#define PyUnicode_READY(op) PyUnicode_READY(_PyObject_CAST(op))
/* Copy character from one unicode object into another, this function performs
character conversion when necessary and falls back to memcpy() if possible.
Fail if to is too small (smaller than *how_many* or smaller than
len(from)-from_start), or if kind(from[from_start:from_start+how_many]) >
kind(to), or if *to* has more than 1 reference.
Return the number of written character, or return -1 and raise an exception
on error.
Pseudo-code:
how_many = min(how_many, len(from) - from_start)
to[to_start:to_start+how_many] = from[from_start:from_start+how_many]
return how_many
Note: The function doesn't write a terminating null character.
*/
PyAPI_FUNC(Py_ssize_t) PyUnicode_CopyCharacters(
PyObject *to,
Py_ssize_t to_start,
PyObject *from,
Py_ssize_t from_start,
Py_ssize_t how_many
);
/* Fill a string with a character: write fill_char into
unicode[start:start+length].
Fail if fill_char is bigger than the string maximum character, or if the
string has more than 1 reference.
Return the number of written character, or return -1 and raise an exception
on error. */
PyAPI_FUNC(Py_ssize_t) PyUnicode_Fill(
PyObject *unicode,
Py_ssize_t start,
Py_ssize_t length,
Py_UCS4 fill_char
);
/* Create a new string from a buffer of Py_UCS1, Py_UCS2 or Py_UCS4 characters.
Scan the string to find the maximum character. */
PyAPI_FUNC(PyObject*) PyUnicode_FromKindAndData(
int kind,
const void *buffer,
Py_ssize_t size);
/* --- _PyUnicodeWriter API ----------------------------------------------- */
typedef struct {
PyObject *buffer;
void *data;
int kind;
Py_UCS4 maxchar;
Py_ssize_t size;
Py_ssize_t pos;
/* minimum number of allocated characters (default: 0) */
Py_ssize_t min_length;
/* minimum character (default: 127, ASCII) */
Py_UCS4 min_char;
/* If non-zero, overallocate the buffer (default: 0). */
unsigned char overallocate;
/* If readonly is 1, buffer is a shared string (cannot be modified)
and size is set to 0. */
unsigned char readonly;
} _PyUnicodeWriter ;
// Initialize a Unicode writer.
//
// By default, the minimum buffer size is 0 character and overallocation is
// disabled. Set min_length, min_char and overallocate attributes to control
// the allocation of the buffer.
PyAPI_FUNC(void)
_PyUnicodeWriter_Init(_PyUnicodeWriter *writer);
/* Prepare the buffer to write 'length' characters
with the specified maximum character.
Return 0 on success, raise an exception and return -1 on error. */
#define _PyUnicodeWriter_Prepare(WRITER, LENGTH, MAXCHAR) \
(((MAXCHAR) <= (WRITER)->maxchar \
&& (LENGTH) <= (WRITER)->size - (WRITER)->pos) \
? 0 \
: (((LENGTH) == 0) \
? 0 \
: _PyUnicodeWriter_PrepareInternal((WRITER), (LENGTH), (MAXCHAR))))
/* Don't call this function directly, use the _PyUnicodeWriter_Prepare() macro
instead. */
PyAPI_FUNC(int)
_PyUnicodeWriter_PrepareInternal(_PyUnicodeWriter *writer,
Py_ssize_t length, Py_UCS4 maxchar);
/* Prepare the buffer to have at least the kind KIND.
For example, kind=PyUnicode_2BYTE_KIND ensures that the writer will
support characters in range U+000-U+FFFF.
Return 0 on success, raise an exception and return -1 on error. */
#define _PyUnicodeWriter_PrepareKind(WRITER, KIND) \
((KIND) <= (WRITER)->kind \
? 0 \
: _PyUnicodeWriter_PrepareKindInternal((WRITER), (KIND)))
/* Don't call this function directly, use the _PyUnicodeWriter_PrepareKind()
macro instead. */
PyAPI_FUNC(int)
_PyUnicodeWriter_PrepareKindInternal(_PyUnicodeWriter *writer,
int kind);
/* Append a Unicode character.
Return 0 on success, raise an exception and return -1 on error. */
PyAPI_FUNC(int)
_PyUnicodeWriter_WriteChar(_PyUnicodeWriter *writer,
Py_UCS4 ch
);
/* Append a Unicode string.
Return 0 on success, raise an exception and return -1 on error. */
PyAPI_FUNC(int)
_PyUnicodeWriter_WriteStr(_PyUnicodeWriter *writer,
PyObject *str /* Unicode string */
);
/* Append a substring of a Unicode string.
Return 0 on success, raise an exception and return -1 on error. */
PyAPI_FUNC(int)
_PyUnicodeWriter_WriteSubstring(_PyUnicodeWriter *writer,
PyObject *str, /* Unicode string */
Py_ssize_t start,
Py_ssize_t end
);
/* Append an ASCII-encoded byte string.
Return 0 on success, raise an exception and return -1 on error. */
PyAPI_FUNC(int)
_PyUnicodeWriter_WriteASCIIString(_PyUnicodeWriter *writer,
const char *str, /* ASCII-encoded byte string */
Py_ssize_t len /* number of bytes, or -1 if unknown */
);
/* Append a latin1-encoded byte string.
Return 0 on success, raise an exception and return -1 on error. */
PyAPI_FUNC(int)
_PyUnicodeWriter_WriteLatin1String(_PyUnicodeWriter *writer,
const char *str, /* latin1-encoded byte string */
Py_ssize_t len /* length in bytes */
);
/* Get the value of the writer as a Unicode string. Clear the
buffer of the writer. Raise an exception and return NULL
on error. */
PyAPI_FUNC(PyObject *)
_PyUnicodeWriter_Finish(_PyUnicodeWriter *writer);
/* Deallocate memory of a writer (clear its internal buffer). */
PyAPI_FUNC(void)
_PyUnicodeWriter_Dealloc(_PyUnicodeWriter *writer);
/* --- Manage the default encoding ---------------------------------------- */
/* Returns a pointer to the default encoding (UTF-8) of the
Unicode object unicode.
Like PyUnicode_AsUTF8AndSize(), this also caches the UTF-8 representation
in the unicodeobject.
_PyUnicode_AsString is a #define for PyUnicode_AsUTF8 to
support the previous internal function with the same behaviour.
Use of this API is DEPRECATED since no size information can be
extracted from the returned data.
*/
PyAPI_FUNC(const char *) PyUnicode_AsUTF8(PyObject *unicode);
// Alias kept for backward compatibility
#define _PyUnicode_AsString PyUnicode_AsUTF8
/* === Characters Type APIs =============================================== */
/* These should not be used directly. Use the Py_UNICODE_IS* and
Py_UNICODE_TO* macros instead.
These APIs are implemented in Objects/unicodectype.c.
*/
PyAPI_FUNC(int) _PyUnicode_IsLowercase(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsUppercase(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsTitlecase(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsWhitespace(
const Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsLinebreak(
const Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(Py_UCS4) _PyUnicode_ToLowercase(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(Py_UCS4) _PyUnicode_ToUppercase(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(Py_UCS4) _PyUnicode_ToTitlecase(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_ToDecimalDigit(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_ToDigit(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(double) _PyUnicode_ToNumeric(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsDecimalDigit(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsDigit(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsNumeric(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsPrintable(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsAlpha(
Py_UCS4 ch /* Unicode character */
);
// Helper array used by Py_UNICODE_ISSPACE().
PyAPI_DATA(const unsigned char) _Py_ascii_whitespace[];
// Since splitting on whitespace is an important use case, and
// whitespace in most situations is solely ASCII whitespace, we
// optimize for the common case by using a quick look-up table
// _Py_ascii_whitespace (see below) with an inlined check.
static inline int Py_UNICODE_ISSPACE(Py_UCS4 ch) {
if (ch < 128) {
return _Py_ascii_whitespace[ch];
}
return _PyUnicode_IsWhitespace(ch);
}
#define Py_UNICODE_ISLOWER(ch) _PyUnicode_IsLowercase(ch)
#define Py_UNICODE_ISUPPER(ch) _PyUnicode_IsUppercase(ch)
#define Py_UNICODE_ISTITLE(ch) _PyUnicode_IsTitlecase(ch)
#define Py_UNICODE_ISLINEBREAK(ch) _PyUnicode_IsLinebreak(ch)
#define Py_UNICODE_TOLOWER(ch) _PyUnicode_ToLowercase(ch)
#define Py_UNICODE_TOUPPER(ch) _PyUnicode_ToUppercase(ch)
#define Py_UNICODE_TOTITLE(ch) _PyUnicode_ToTitlecase(ch)
#define Py_UNICODE_ISDECIMAL(ch) _PyUnicode_IsDecimalDigit(ch)
#define Py_UNICODE_ISDIGIT(ch) _PyUnicode_IsDigit(ch)
#define Py_UNICODE_ISNUMERIC(ch) _PyUnicode_IsNumeric(ch)
#define Py_UNICODE_ISPRINTABLE(ch) _PyUnicode_IsPrintable(ch)
#define Py_UNICODE_TODECIMAL(ch) _PyUnicode_ToDecimalDigit(ch)
#define Py_UNICODE_TODIGIT(ch) _PyUnicode_ToDigit(ch)
#define Py_UNICODE_TONUMERIC(ch) _PyUnicode_ToNumeric(ch)
#define Py_UNICODE_ISALPHA(ch) _PyUnicode_IsAlpha(ch)
static inline int Py_UNICODE_ISALNUM(Py_UCS4 ch) {
return (Py_UNICODE_ISALPHA(ch)
|| Py_UNICODE_ISDECIMAL(ch)
|| Py_UNICODE_ISDIGIT(ch)
|| Py_UNICODE_ISNUMERIC(ch));
}
/* === Misc functions ===================================================== */
// Return an interned Unicode object for an Identifier; may fail if there is no
// memory.
PyAPI_FUNC(PyObject*) _PyUnicode_FromId(_Py_Identifier*);

View File

@@ -0,0 +1,20 @@
#ifndef Py_CPYTHON_WARNINGS_H
# error "this header file must not be included directly"
#endif
PyAPI_FUNC(int) PyErr_WarnExplicitObject(
PyObject *category,
PyObject *message,
PyObject *filename,
int lineno,
PyObject *module,
PyObject *registry);
PyAPI_FUNC(int) PyErr_WarnExplicitFormat(
PyObject *category,
const char *filename, int lineno,
const char *module, PyObject *registry,
const char *format, ...);
// DEPRECATED: Use PyErr_WarnEx() instead.
#define PyErr_Warn(category, msg) PyErr_WarnEx((category), (msg), 1)

View File

@@ -0,0 +1,63 @@
#ifndef Py_CPYTHON_WEAKREFOBJECT_H
# error "this header file must not be included directly"
#endif
/* PyWeakReference is the base struct for the Python ReferenceType, ProxyType,
* and CallableProxyType.
*/
struct _PyWeakReference {
PyObject_HEAD
/* The object to which this is a weak reference, or Py_None if none.
* Note that this is a stealth reference: wr_object's refcount is
* not incremented to reflect this pointer.
*/
PyObject *wr_object;
/* A callable to invoke when wr_object dies, or NULL if none. */
PyObject *wr_callback;
/* A cache for wr_object's hash code. As usual for hashes, this is -1
* if the hash code isn't known yet.
*/
Py_hash_t hash;
/* If wr_object is weakly referenced, wr_object has a doubly-linked NULL-
* terminated list of weak references to it. These are the list pointers.
* If wr_object goes away, wr_object is set to Py_None, and these pointers
* have no meaning then.
*/
PyWeakReference *wr_prev;
PyWeakReference *wr_next;
vectorcallfunc vectorcall;
#ifdef Py_GIL_DISABLED
/* Pointer to the lock used when clearing in free-threaded builds.
* Normally this can be derived from wr_object, but in some cases we need
* to lock after wr_object has been set to Py_None.
*/
PyMutex *weakrefs_lock;
#endif
};
PyAPI_FUNC(void) _PyWeakref_ClearRef(PyWeakReference *self);
Py_DEPRECATED(3.13) static inline PyObject* PyWeakref_GET_OBJECT(PyObject *ref_obj)
{
PyWeakReference *ref;
PyObject *obj;
assert(PyWeakref_Check(ref_obj));
ref = _Py_CAST(PyWeakReference*, ref_obj);
obj = ref->wr_object;
// Explanation for the Py_REFCNT() check: when a weakref's target is part
// of a long chain of deallocations which triggers the trashcan mechanism,
// clearing the weakrefs can be delayed long after the target's refcount
// has dropped to zero. In the meantime, code accessing the weakref will
// be able to "see" the target object even though it is supposed to be
// unreachable. See issue gh-60806.
if (Py_REFCNT(obj) > 0) {
return obj;
}
return Py_None;
}
#define PyWeakref_GET_OBJECT(ref) PyWeakref_GET_OBJECT(_PyObject_CAST(ref))

View File

@@ -0,0 +1,16 @@
#ifndef Py_CRITICAL_SECTION_H
#define Py_CRITICAL_SECTION_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_LIMITED_API
# define Py_CPYTHON_CRITICAL_SECTION_H
# include "cpython/critical_section.h"
# undef Py_CPYTHON_CRITICAL_SECTION_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_CRITICAL_SECTION_H */

267
Dependencies/Python/include/datetime.h vendored Normal file
View File

@@ -0,0 +1,267 @@
/* datetime.h
*/
#ifndef Py_LIMITED_API
#ifndef DATETIME_H
#define DATETIME_H
#ifdef __cplusplus
extern "C" {
#endif
/* Fields are packed into successive bytes, each viewed as unsigned and
* big-endian, unless otherwise noted:
*
* byte offset
* 0 year 2 bytes, 1-9999
* 2 month 1 byte, 1-12
* 3 day 1 byte, 1-31
* 4 hour 1 byte, 0-23
* 5 minute 1 byte, 0-59
* 6 second 1 byte, 0-59
* 7 usecond 3 bytes, 0-999999
* 10
*/
/* # of bytes for year, month, and day. */
#define _PyDateTime_DATE_DATASIZE 4
/* # of bytes for hour, minute, second, and usecond. */
#define _PyDateTime_TIME_DATASIZE 6
/* # of bytes for year, month, day, hour, minute, second, and usecond. */
#define _PyDateTime_DATETIME_DATASIZE 10
typedef struct
{
PyObject_HEAD
Py_hash_t hashcode; /* -1 when unknown */
int days; /* -MAX_DELTA_DAYS <= days <= MAX_DELTA_DAYS */
int seconds; /* 0 <= seconds < 24*3600 is invariant */
int microseconds; /* 0 <= microseconds < 1000000 is invariant */
} PyDateTime_Delta;
typedef struct
{
PyObject_HEAD /* a pure abstract base class */
} PyDateTime_TZInfo;
/* The datetime and time types have hashcodes, and an optional tzinfo member,
* present if and only if hastzinfo is true.
*/
#define _PyTZINFO_HEAD \
PyObject_HEAD \
Py_hash_t hashcode; \
char hastzinfo; /* boolean flag */
/* No _PyDateTime_BaseTZInfo is allocated; it's just to have something
* convenient to cast to, when getting at the hastzinfo member of objects
* starting with _PyTZINFO_HEAD.
*/
typedef struct
{
_PyTZINFO_HEAD
} _PyDateTime_BaseTZInfo;
/* All time objects are of PyDateTime_TimeType, but that can be allocated
* in two ways, with or without a tzinfo member. Without is the same as
* tzinfo == None, but consumes less memory. _PyDateTime_BaseTime is an
* internal struct used to allocate the right amount of space for the
* "without" case.
*/
#define _PyDateTime_TIMEHEAD \
_PyTZINFO_HEAD \
unsigned char data[_PyDateTime_TIME_DATASIZE];
typedef struct
{
_PyDateTime_TIMEHEAD
} _PyDateTime_BaseTime; /* hastzinfo false */
typedef struct
{
_PyDateTime_TIMEHEAD
unsigned char fold;
PyObject *tzinfo;
} PyDateTime_Time; /* hastzinfo true */
/* All datetime objects are of PyDateTime_DateTimeType, but that can be
* allocated in two ways too, just like for time objects above. In addition,
* the plain date type is a base class for datetime, so it must also have
* a hastzinfo member (although it's unused there).
*/
typedef struct
{
_PyTZINFO_HEAD
unsigned char data[_PyDateTime_DATE_DATASIZE];
} PyDateTime_Date;
#define _PyDateTime_DATETIMEHEAD \
_PyTZINFO_HEAD \
unsigned char data[_PyDateTime_DATETIME_DATASIZE];
typedef struct
{
_PyDateTime_DATETIMEHEAD
} _PyDateTime_BaseDateTime; /* hastzinfo false */
typedef struct
{
_PyDateTime_DATETIMEHEAD
unsigned char fold;
PyObject *tzinfo;
} PyDateTime_DateTime; /* hastzinfo true */
/* Apply for date and datetime instances. */
// o is a pointer to a time or a datetime object.
#define _PyDateTime_HAS_TZINFO(o) (((_PyDateTime_BaseTZInfo *)(o))->hastzinfo)
#define PyDateTime_GET_YEAR(o) ((((PyDateTime_Date*)(o))->data[0] << 8) | \
((PyDateTime_Date*)(o))->data[1])
#define PyDateTime_GET_MONTH(o) (((PyDateTime_Date*)(o))->data[2])
#define PyDateTime_GET_DAY(o) (((PyDateTime_Date*)(o))->data[3])
#define PyDateTime_DATE_GET_HOUR(o) (((PyDateTime_DateTime*)(o))->data[4])
#define PyDateTime_DATE_GET_MINUTE(o) (((PyDateTime_DateTime*)(o))->data[5])
#define PyDateTime_DATE_GET_SECOND(o) (((PyDateTime_DateTime*)(o))->data[6])
#define PyDateTime_DATE_GET_MICROSECOND(o) \
((((PyDateTime_DateTime*)(o))->data[7] << 16) | \
(((PyDateTime_DateTime*)(o))->data[8] << 8) | \
((PyDateTime_DateTime*)(o))->data[9])
#define PyDateTime_DATE_GET_FOLD(o) (((PyDateTime_DateTime*)(o))->fold)
#define PyDateTime_DATE_GET_TZINFO(o) (_PyDateTime_HAS_TZINFO((o)) ? \
((PyDateTime_DateTime *)(o))->tzinfo : Py_None)
/* Apply for time instances. */
#define PyDateTime_TIME_GET_HOUR(o) (((PyDateTime_Time*)(o))->data[0])
#define PyDateTime_TIME_GET_MINUTE(o) (((PyDateTime_Time*)(o))->data[1])
#define PyDateTime_TIME_GET_SECOND(o) (((PyDateTime_Time*)(o))->data[2])
#define PyDateTime_TIME_GET_MICROSECOND(o) \
((((PyDateTime_Time*)(o))->data[3] << 16) | \
(((PyDateTime_Time*)(o))->data[4] << 8) | \
((PyDateTime_Time*)(o))->data[5])
#define PyDateTime_TIME_GET_FOLD(o) (((PyDateTime_Time*)(o))->fold)
#define PyDateTime_TIME_GET_TZINFO(o) (_PyDateTime_HAS_TZINFO(o) ? \
((PyDateTime_Time *)(o))->tzinfo : Py_None)
/* Apply for time delta instances */
#define PyDateTime_DELTA_GET_DAYS(o) (((PyDateTime_Delta*)(o))->days)
#define PyDateTime_DELTA_GET_SECONDS(o) (((PyDateTime_Delta*)(o))->seconds)
#define PyDateTime_DELTA_GET_MICROSECONDS(o) \
(((PyDateTime_Delta*)(o))->microseconds)
/* Define structure for C API. */
typedef struct {
/* type objects */
PyTypeObject *DateType;
PyTypeObject *DateTimeType;
PyTypeObject *TimeType;
PyTypeObject *DeltaType;
PyTypeObject *TZInfoType;
/* singletons */
PyObject *TimeZone_UTC;
/* constructors */
PyObject *(*Date_FromDate)(int, int, int, PyTypeObject*);
PyObject *(*DateTime_FromDateAndTime)(int, int, int, int, int, int, int,
PyObject*, PyTypeObject*);
PyObject *(*Time_FromTime)(int, int, int, int, PyObject*, PyTypeObject*);
PyObject *(*Delta_FromDelta)(int, int, int, int, PyTypeObject*);
PyObject *(*TimeZone_FromTimeZone)(PyObject *offset, PyObject *name);
/* constructors for the DB API */
PyObject *(*DateTime_FromTimestamp)(PyObject*, PyObject*, PyObject*);
PyObject *(*Date_FromTimestamp)(PyObject*, PyObject*);
/* PEP 495 constructors */
PyObject *(*DateTime_FromDateAndTimeAndFold)(int, int, int, int, int, int, int,
PyObject*, int, PyTypeObject*);
PyObject *(*Time_FromTimeAndFold)(int, int, int, int, PyObject*, int, PyTypeObject*);
} PyDateTime_CAPI;
#define PyDateTime_CAPSULE_NAME "datetime.datetime_CAPI"
/* This block is only used as part of the public API and should not be
* included in _datetimemodule.c, which does not use the C API capsule.
* See bpo-35081 for more details.
* */
#ifndef _PY_DATETIME_IMPL
/* Define global variable for the C API and a macro for setting it. */
static PyDateTime_CAPI *PyDateTimeAPI = NULL;
#define PyDateTime_IMPORT \
PyDateTimeAPI = (PyDateTime_CAPI *)PyCapsule_Import(PyDateTime_CAPSULE_NAME, 0)
/* Macro for access to the UTC singleton */
#define PyDateTime_TimeZone_UTC PyDateTimeAPI->TimeZone_UTC
/* Macros for type checking when not building the Python core. */
#define PyDate_Check(op) PyObject_TypeCheck((op), PyDateTimeAPI->DateType)
#define PyDate_CheckExact(op) Py_IS_TYPE((op), PyDateTimeAPI->DateType)
#define PyDateTime_Check(op) PyObject_TypeCheck((op), PyDateTimeAPI->DateTimeType)
#define PyDateTime_CheckExact(op) Py_IS_TYPE((op), PyDateTimeAPI->DateTimeType)
#define PyTime_Check(op) PyObject_TypeCheck((op), PyDateTimeAPI->TimeType)
#define PyTime_CheckExact(op) Py_IS_TYPE((op), PyDateTimeAPI->TimeType)
#define PyDelta_Check(op) PyObject_TypeCheck((op), PyDateTimeAPI->DeltaType)
#define PyDelta_CheckExact(op) Py_IS_TYPE((op), PyDateTimeAPI->DeltaType)
#define PyTZInfo_Check(op) PyObject_TypeCheck((op), PyDateTimeAPI->TZInfoType)
#define PyTZInfo_CheckExact(op) Py_IS_TYPE((op), PyDateTimeAPI->TZInfoType)
/* Macros for accessing constructors in a simplified fashion. */
#define PyDate_FromDate(year, month, day) \
PyDateTimeAPI->Date_FromDate((year), (month), (day), PyDateTimeAPI->DateType)
#define PyDateTime_FromDateAndTime(year, month, day, hour, min, sec, usec) \
PyDateTimeAPI->DateTime_FromDateAndTime((year), (month), (day), (hour), \
(min), (sec), (usec), Py_None, PyDateTimeAPI->DateTimeType)
#define PyDateTime_FromDateAndTimeAndFold(year, month, day, hour, min, sec, usec, fold) \
PyDateTimeAPI->DateTime_FromDateAndTimeAndFold((year), (month), (day), (hour), \
(min), (sec), (usec), Py_None, (fold), PyDateTimeAPI->DateTimeType)
#define PyTime_FromTime(hour, minute, second, usecond) \
PyDateTimeAPI->Time_FromTime((hour), (minute), (second), (usecond), \
Py_None, PyDateTimeAPI->TimeType)
#define PyTime_FromTimeAndFold(hour, minute, second, usecond, fold) \
PyDateTimeAPI->Time_FromTimeAndFold((hour), (minute), (second), (usecond), \
Py_None, (fold), PyDateTimeAPI->TimeType)
#define PyDelta_FromDSU(days, seconds, useconds) \
PyDateTimeAPI->Delta_FromDelta((days), (seconds), (useconds), 1, \
PyDateTimeAPI->DeltaType)
#define PyTimeZone_FromOffset(offset) \
PyDateTimeAPI->TimeZone_FromTimeZone((offset), NULL)
#define PyTimeZone_FromOffsetAndName(offset, name) \
PyDateTimeAPI->TimeZone_FromTimeZone((offset), (name))
/* Macros supporting the DB API. */
#define PyDateTime_FromTimestamp(args) \
PyDateTimeAPI->DateTime_FromTimestamp( \
(PyObject*) (PyDateTimeAPI->DateTimeType), (args), NULL)
#define PyDate_FromTimestamp(args) \
PyDateTimeAPI->Date_FromTimestamp( \
(PyObject*) (PyDateTimeAPI->DateType), (args))
#endif /* !defined(_PY_DATETIME_IMPL) */
#ifdef __cplusplus
}
#endif
#endif
#endif /* !Py_LIMITED_API */

View File

@@ -0,0 +1,100 @@
/* Descriptors */
#ifndef Py_DESCROBJECT_H
#define Py_DESCROBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
typedef PyObject *(*getter)(PyObject *, void *);
typedef int (*setter)(PyObject *, PyObject *, void *);
struct PyGetSetDef {
const char *name;
getter get;
setter set;
const char *doc;
void *closure;
};
PyAPI_DATA(PyTypeObject) PyClassMethodDescr_Type;
PyAPI_DATA(PyTypeObject) PyGetSetDescr_Type;
PyAPI_DATA(PyTypeObject) PyMemberDescr_Type;
PyAPI_DATA(PyTypeObject) PyMethodDescr_Type;
PyAPI_DATA(PyTypeObject) PyWrapperDescr_Type;
PyAPI_DATA(PyTypeObject) PyDictProxy_Type;
PyAPI_DATA(PyTypeObject) PyProperty_Type;
PyAPI_FUNC(PyObject *) PyDescr_NewMethod(PyTypeObject *, PyMethodDef *);
PyAPI_FUNC(PyObject *) PyDescr_NewClassMethod(PyTypeObject *, PyMethodDef *);
PyAPI_FUNC(PyObject *) PyDescr_NewMember(PyTypeObject *, PyMemberDef *);
PyAPI_FUNC(PyObject *) PyDescr_NewGetSet(PyTypeObject *, PyGetSetDef *);
PyAPI_FUNC(PyObject *) PyDictProxy_New(PyObject *);
PyAPI_FUNC(PyObject *) PyWrapper_New(PyObject *, PyObject *);
/* An array of PyMemberDef structures defines the name, type and offset
of selected members of a C structure. These can be read by
PyMember_GetOne() and set by PyMember_SetOne() (except if their READONLY
flag is set). The array must be terminated with an entry whose name
pointer is NULL. */
struct PyMemberDef {
const char *name;
int type;
Py_ssize_t offset;
int flags;
const char *doc;
};
// These constants used to be in structmember.h, not prefixed by Py_.
// (structmember.h now has aliases to the new names.)
/* Types */
#define Py_T_SHORT 0
#define Py_T_INT 1
#define Py_T_LONG 2
#define Py_T_FLOAT 3
#define Py_T_DOUBLE 4
#define Py_T_STRING 5
#define _Py_T_OBJECT 6 // Deprecated, use Py_T_OBJECT_EX instead
/* the ordering here is weird for binary compatibility */
#define Py_T_CHAR 7 /* 1-character string */
#define Py_T_BYTE 8 /* 8-bit signed int */
/* unsigned variants: */
#define Py_T_UBYTE 9
#define Py_T_USHORT 10
#define Py_T_UINT 11
#define Py_T_ULONG 12
/* Added by Jack: strings contained in the structure */
#define Py_T_STRING_INPLACE 13
/* Added by Lillo: bools contained in the structure (assumed char) */
#define Py_T_BOOL 14
#define Py_T_OBJECT_EX 16
#define Py_T_LONGLONG 17
#define Py_T_ULONGLONG 18
#define Py_T_PYSSIZET 19 /* Py_ssize_t */
#define _Py_T_NONE 20 // Deprecated. Value is always None.
/* Flags */
#define Py_READONLY 1
#define Py_AUDIT_READ 2 // Added in 3.10, harmless no-op before that
#define _Py_WRITE_RESTRICTED 4 // Deprecated, no-op. Do not reuse the value.
#define Py_RELATIVE_OFFSET 8
PyAPI_FUNC(PyObject *) PyMember_GetOne(const char *, PyMemberDef *);
PyAPI_FUNC(int) PyMember_SetOne(char *, PyMemberDef *, PyObject *);
#ifndef Py_LIMITED_API
# define Py_CPYTHON_DESCROBJECT_H
# include "cpython/descrobject.h"
# undef Py_CPYTHON_DESCROBJECT_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_DESCROBJECT_H */

108
Dependencies/Python/include/dictobject.h vendored Normal file
View File

@@ -0,0 +1,108 @@
#ifndef Py_DICTOBJECT_H
#define Py_DICTOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
/* Dictionary object type -- mapping from hashable object to object */
/* The distribution includes a separate file, Objects/dictnotes.txt,
describing explorations into dictionary design and optimization.
It covers typical dictionary use patterns, the parameters for
tuning dictionaries, and several ideas for possible optimizations.
*/
PyAPI_DATA(PyTypeObject) PyDict_Type;
#define PyDict_Check(op) \
PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_DICT_SUBCLASS)
#define PyDict_CheckExact(op) Py_IS_TYPE((op), &PyDict_Type)
PyAPI_FUNC(PyObject *) PyDict_New(void);
PyAPI_FUNC(PyObject *) PyDict_GetItem(PyObject *mp, PyObject *key);
PyAPI_FUNC(PyObject *) PyDict_GetItemWithError(PyObject *mp, PyObject *key);
PyAPI_FUNC(int) PyDict_SetItem(PyObject *mp, PyObject *key, PyObject *item);
PyAPI_FUNC(int) PyDict_DelItem(PyObject *mp, PyObject *key);
PyAPI_FUNC(void) PyDict_Clear(PyObject *mp);
PyAPI_FUNC(int) PyDict_Next(
PyObject *mp, Py_ssize_t *pos, PyObject **key, PyObject **value);
PyAPI_FUNC(PyObject *) PyDict_Keys(PyObject *mp);
PyAPI_FUNC(PyObject *) PyDict_Values(PyObject *mp);
PyAPI_FUNC(PyObject *) PyDict_Items(PyObject *mp);
PyAPI_FUNC(Py_ssize_t) PyDict_Size(PyObject *mp);
PyAPI_FUNC(PyObject *) PyDict_Copy(PyObject *mp);
PyAPI_FUNC(int) PyDict_Contains(PyObject *mp, PyObject *key);
/* PyDict_Update(mp, other) is equivalent to PyDict_Merge(mp, other, 1). */
PyAPI_FUNC(int) PyDict_Update(PyObject *mp, PyObject *other);
/* PyDict_Merge updates/merges from a mapping object (an object that
supports PyMapping_Keys() and PyObject_GetItem()). If override is true,
the last occurrence of a key wins, else the first. The Python
dict.update(other) is equivalent to PyDict_Merge(dict, other, 1).
*/
PyAPI_FUNC(int) PyDict_Merge(PyObject *mp,
PyObject *other,
int override);
/* PyDict_MergeFromSeq2 updates/merges from an iterable object producing
iterable objects of length 2. If override is true, the last occurrence
of a key wins, else the first. The Python dict constructor dict(seq2)
is equivalent to dict={}; PyDict_MergeFromSeq(dict, seq2, 1).
*/
PyAPI_FUNC(int) PyDict_MergeFromSeq2(PyObject *d,
PyObject *seq2,
int override);
PyAPI_FUNC(PyObject *) PyDict_GetItemString(PyObject *dp, const char *key);
PyAPI_FUNC(int) PyDict_SetItemString(PyObject *dp, const char *key, PyObject *item);
PyAPI_FUNC(int) PyDict_DelItemString(PyObject *dp, const char *key);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030D0000
// Return the object from dictionary *op* which has a key *key*.
// - If the key is present, set *result to a new strong reference to the value
// and return 1.
// - If the key is missing, set *result to NULL and return 0 .
// - On error, raise an exception and return -1.
PyAPI_FUNC(int) PyDict_GetItemRef(PyObject *mp, PyObject *key, PyObject **result);
PyAPI_FUNC(int) PyDict_GetItemStringRef(PyObject *mp, const char *key, PyObject **result);
#endif
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030A0000
PyAPI_FUNC(PyObject *) PyObject_GenericGetDict(PyObject *, void *);
#endif
/* Dictionary (keys, values, items) views */
PyAPI_DATA(PyTypeObject) PyDictKeys_Type;
PyAPI_DATA(PyTypeObject) PyDictValues_Type;
PyAPI_DATA(PyTypeObject) PyDictItems_Type;
#define PyDictKeys_Check(op) PyObject_TypeCheck((op), &PyDictKeys_Type)
#define PyDictValues_Check(op) PyObject_TypeCheck((op), &PyDictValues_Type)
#define PyDictItems_Check(op) PyObject_TypeCheck((op), &PyDictItems_Type)
/* This excludes Values, since they are not sets. */
# define PyDictViewSet_Check(op) \
(PyDictKeys_Check(op) || PyDictItems_Check(op))
/* Dictionary (key, value, items) iterators */
PyAPI_DATA(PyTypeObject) PyDictIterKey_Type;
PyAPI_DATA(PyTypeObject) PyDictIterValue_Type;
PyAPI_DATA(PyTypeObject) PyDictIterItem_Type;
PyAPI_DATA(PyTypeObject) PyDictRevIterKey_Type;
PyAPI_DATA(PyTypeObject) PyDictRevIterItem_Type;
PyAPI_DATA(PyTypeObject) PyDictRevIterValue_Type;
#ifndef Py_LIMITED_API
# define Py_CPYTHON_DICTOBJECT_H
# include "cpython/dictobject.h"
# undef Py_CPYTHON_DICTOBJECT_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_DICTOBJECT_H */

View File

@@ -0,0 +1,499 @@
/* Copyright (c) 2008-2009, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
* Author: Kostya Serebryany
* Copied to CPython by Jeffrey Yasskin, with all macros renamed to
* start with _Py_ to avoid colliding with users embedding Python, and
* with deprecated macros removed.
*/
/* This file defines dynamic annotations for use with dynamic analysis
tool such as valgrind, PIN, etc.
Dynamic annotation is a source code annotation that affects
the generated code (that is, the annotation is not a comment).
Each such annotation is attached to a particular
instruction and/or to a particular object (address) in the program.
The annotations that should be used by users are macros in all upper-case
(e.g., _Py_ANNOTATE_NEW_MEMORY).
Actual implementation of these macros may differ depending on the
dynamic analysis tool being used.
See https://code.google.com/p/data-race-test/ for more information.
This file supports the following dynamic analysis tools:
- None (DYNAMIC_ANNOTATIONS_ENABLED is not defined or zero).
Macros are defined empty.
- ThreadSanitizer, Helgrind, DRD (DYNAMIC_ANNOTATIONS_ENABLED is 1).
Macros are defined as calls to non-inlinable empty functions
that are intercepted by Valgrind. */
#ifndef __DYNAMIC_ANNOTATIONS_H__
#define __DYNAMIC_ANNOTATIONS_H__
#ifndef DYNAMIC_ANNOTATIONS_ENABLED
# define DYNAMIC_ANNOTATIONS_ENABLED 0
#endif
#if DYNAMIC_ANNOTATIONS_ENABLED != 0
/* -------------------------------------------------------------
Annotations useful when implementing condition variables such as CondVar,
using conditional critical sections (Await/LockWhen) and when constructing
user-defined synchronization mechanisms.
The annotations _Py_ANNOTATE_HAPPENS_BEFORE() and
_Py_ANNOTATE_HAPPENS_AFTER() can be used to define happens-before arcs in
user-defined synchronization mechanisms: the race detector will infer an
arc from the former to the latter when they share the same argument
pointer.
Example 1 (reference counting):
void Unref() {
_Py_ANNOTATE_HAPPENS_BEFORE(&refcount_);
if (AtomicDecrementByOne(&refcount_) == 0) {
_Py_ANNOTATE_HAPPENS_AFTER(&refcount_);
delete this;
}
}
Example 2 (message queue):
void MyQueue::Put(Type *e) {
MutexLock lock(&mu_);
_Py_ANNOTATE_HAPPENS_BEFORE(e);
PutElementIntoMyQueue(e);
}
Type *MyQueue::Get() {
MutexLock lock(&mu_);
Type *e = GetElementFromMyQueue();
_Py_ANNOTATE_HAPPENS_AFTER(e);
return e;
}
Note: when possible, please use the existing reference counting and message
queue implementations instead of inventing new ones. */
/* Report that wait on the condition variable at address "cv" has succeeded
and the lock at address "lock" is held. */
#define _Py_ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
AnnotateCondVarWait(__FILE__, __LINE__, cv, lock)
/* Report that wait on the condition variable at "cv" has succeeded. Variant
w/o lock. */
#define _Py_ANNOTATE_CONDVAR_WAIT(cv) \
AnnotateCondVarWait(__FILE__, __LINE__, cv, NULL)
/* Report that we are about to signal on the condition variable at address
"cv". */
#define _Py_ANNOTATE_CONDVAR_SIGNAL(cv) \
AnnotateCondVarSignal(__FILE__, __LINE__, cv)
/* Report that we are about to signal_all on the condition variable at "cv". */
#define _Py_ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
AnnotateCondVarSignalAll(__FILE__, __LINE__, cv)
/* Annotations for user-defined synchronization mechanisms. */
#define _Py_ANNOTATE_HAPPENS_BEFORE(obj) _Py_ANNOTATE_CONDVAR_SIGNAL(obj)
#define _Py_ANNOTATE_HAPPENS_AFTER(obj) _Py_ANNOTATE_CONDVAR_WAIT(obj)
/* Report that the bytes in the range [pointer, pointer+size) are about
to be published safely. The race checker will create a happens-before
arc from the call _Py_ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to
subsequent accesses to this memory.
Note: this annotation may not work properly if the race detector uses
sampling, i.e. does not observe all memory accesses.
*/
#define _Py_ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
AnnotatePublishMemoryRange(__FILE__, __LINE__, pointer, size)
/* Instruct the tool to create a happens-before arc between mu->Unlock() and
mu->Lock(). This annotation may slow down the race detector and hide real
races. Normally it is used only when it would be difficult to annotate each
of the mutex's critical sections individually using the annotations above.
This annotation makes sense only for hybrid race detectors. For pure
happens-before detectors this is a no-op. For more details see
https://code.google.com/p/data-race-test/wiki/PureHappensBeforeVsHybrid . */
#define _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
AnnotateMutexIsUsedAsCondVar(__FILE__, __LINE__, mu)
/* -------------------------------------------------------------
Annotations useful when defining memory allocators, or when memory that
was protected in one way starts to be protected in another. */
/* Report that a new memory at "address" of size "size" has been allocated.
This might be used when the memory has been retrieved from a free list and
is about to be reused, or when the locking discipline for a variable
changes. */
#define _Py_ANNOTATE_NEW_MEMORY(address, size) \
AnnotateNewMemory(__FILE__, __LINE__, address, size)
/* -------------------------------------------------------------
Annotations useful when defining FIFO queues that transfer data between
threads. */
/* Report that the producer-consumer queue (such as ProducerConsumerQueue) at
address "pcq" has been created. The _Py_ANNOTATE_PCQ_* annotations should
be used only for FIFO queues. For non-FIFO queues use
_Py_ANNOTATE_HAPPENS_BEFORE (for put) and _Py_ANNOTATE_HAPPENS_AFTER (for
get). */
#define _Py_ANNOTATE_PCQ_CREATE(pcq) \
AnnotatePCQCreate(__FILE__, __LINE__, pcq)
/* Report that the queue at address "pcq" is about to be destroyed. */
#define _Py_ANNOTATE_PCQ_DESTROY(pcq) \
AnnotatePCQDestroy(__FILE__, __LINE__, pcq)
/* Report that we are about to put an element into a FIFO queue at address
"pcq". */
#define _Py_ANNOTATE_PCQ_PUT(pcq) \
AnnotatePCQPut(__FILE__, __LINE__, pcq)
/* Report that we've just got an element from a FIFO queue at address "pcq". */
#define _Py_ANNOTATE_PCQ_GET(pcq) \
AnnotatePCQGet(__FILE__, __LINE__, pcq)
/* -------------------------------------------------------------
Annotations that suppress errors. It is usually better to express the
program's synchronization using the other annotations, but these can
be used when all else fails. */
/* Report that we may have a benign race at "pointer", with size
"sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the
point where "pointer" has been allocated, preferably close to the point
where the race happens. See also _Py_ANNOTATE_BENIGN_RACE_STATIC. */
#define _Py_ANNOTATE_BENIGN_RACE(pointer, description) \
AnnotateBenignRaceSized(__FILE__, __LINE__, pointer, \
sizeof(*(pointer)), description)
/* Same as _Py_ANNOTATE_BENIGN_RACE(address, description), but applies to
the memory range [address, address+size). */
#define _Py_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
AnnotateBenignRaceSized(__FILE__, __LINE__, address, size, description)
/* Request the analysis tool to ignore all reads in the current thread
until _Py_ANNOTATE_IGNORE_READS_END is called.
Useful to ignore intentional racey reads, while still checking
other reads and all writes.
See also _Py_ANNOTATE_UNPROTECTED_READ. */
#define _Py_ANNOTATE_IGNORE_READS_BEGIN() \
AnnotateIgnoreReadsBegin(__FILE__, __LINE__)
/* Stop ignoring reads. */
#define _Py_ANNOTATE_IGNORE_READS_END() \
AnnotateIgnoreReadsEnd(__FILE__, __LINE__)
/* Similar to _Py_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
#define _Py_ANNOTATE_IGNORE_WRITES_BEGIN() \
AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
/* Stop ignoring writes. */
#define _Py_ANNOTATE_IGNORE_WRITES_END() \
AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
/* Start ignoring all memory accesses (reads and writes). */
#define _Py_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
do {\
_Py_ANNOTATE_IGNORE_READS_BEGIN();\
_Py_ANNOTATE_IGNORE_WRITES_BEGIN();\
}while(0)\
/* Stop ignoring all memory accesses. */
#define _Py_ANNOTATE_IGNORE_READS_AND_WRITES_END() \
do {\
_Py_ANNOTATE_IGNORE_WRITES_END();\
_Py_ANNOTATE_IGNORE_READS_END();\
}while(0)\
/* Similar to _Py_ANNOTATE_IGNORE_READS_BEGIN, but ignore synchronization events:
RWLOCK* and CONDVAR*. */
#define _Py_ANNOTATE_IGNORE_SYNC_BEGIN() \
AnnotateIgnoreSyncBegin(__FILE__, __LINE__)
/* Stop ignoring sync events. */
#define _Py_ANNOTATE_IGNORE_SYNC_END() \
AnnotateIgnoreSyncEnd(__FILE__, __LINE__)
/* Enable (enable!=0) or disable (enable==0) race detection for all threads.
This annotation could be useful if you want to skip expensive race analysis
during some period of program execution, e.g. during initialization. */
#define _Py_ANNOTATE_ENABLE_RACE_DETECTION(enable) \
AnnotateEnableRaceDetection(__FILE__, __LINE__, enable)
/* -------------------------------------------------------------
Annotations useful for debugging. */
/* Request to trace every access to "address". */
#define _Py_ANNOTATE_TRACE_MEMORY(address) \
AnnotateTraceMemory(__FILE__, __LINE__, address)
/* Report the current thread name to a race detector. */
#define _Py_ANNOTATE_THREAD_NAME(name) \
AnnotateThreadName(__FILE__, __LINE__, name)
/* -------------------------------------------------------------
Annotations useful when implementing locks. They are not
normally needed by modules that merely use locks.
The "lock" argument is a pointer to the lock object. */
/* Report that a lock has been created at address "lock". */
#define _Py_ANNOTATE_RWLOCK_CREATE(lock) \
AnnotateRWLockCreate(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" is about to be destroyed. */
#define _Py_ANNOTATE_RWLOCK_DESTROY(lock) \
AnnotateRWLockDestroy(__FILE__, __LINE__, lock)
/* Report that the lock at address "lock" has been acquired.
is_w=1 for writer lock, is_w=0 for reader lock. */
#define _Py_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w)
/* Report that the lock at address "lock" is about to be released. */
#define _Py_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w)
/* -------------------------------------------------------------
Annotations useful when implementing barriers. They are not
normally needed by modules that merely use barriers.
The "barrier" argument is a pointer to the barrier object. */
/* Report that the "barrier" has been initialized with initial "count".
If 'reinitialization_allowed' is true, initialization is allowed to happen
multiple times w/o calling barrier_destroy() */
#define _Py_ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
AnnotateBarrierInit(__FILE__, __LINE__, barrier, count, \
reinitialization_allowed)
/* Report that we are about to enter barrier_wait("barrier"). */
#define _Py_ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
AnnotateBarrierWaitBefore(__FILE__, __LINE__, barrier)
/* Report that we just exited barrier_wait("barrier"). */
#define _Py_ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
AnnotateBarrierWaitAfter(__FILE__, __LINE__, barrier)
/* Report that the "barrier" has been destroyed. */
#define _Py_ANNOTATE_BARRIER_DESTROY(barrier) \
AnnotateBarrierDestroy(__FILE__, __LINE__, barrier)
/* -------------------------------------------------------------
Annotations useful for testing race detectors. */
/* Report that we expect a race on the variable at "address".
Use only in unit tests for a race detector. */
#define _Py_ANNOTATE_EXPECT_RACE(address, description) \
AnnotateExpectRace(__FILE__, __LINE__, address, description)
/* A no-op. Insert where you like to test the interceptors. */
#define _Py_ANNOTATE_NO_OP(arg) \
AnnotateNoOp(__FILE__, __LINE__, arg)
/* Force the race detector to flush its state. The actual effect depends on
* the implementation of the detector. */
#define _Py_ANNOTATE_FLUSH_STATE() \
AnnotateFlushState(__FILE__, __LINE__)
#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
#define _Py_ANNOTATE_RWLOCK_CREATE(lock) /* empty */
#define _Py_ANNOTATE_RWLOCK_DESTROY(lock) /* empty */
#define _Py_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) /* empty */
#define _Py_ANNOTATE_RWLOCK_RELEASED(lock, is_w) /* empty */
#define _Py_ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) /* */
#define _Py_ANNOTATE_BARRIER_WAIT_BEFORE(barrier) /* empty */
#define _Py_ANNOTATE_BARRIER_WAIT_AFTER(barrier) /* empty */
#define _Py_ANNOTATE_BARRIER_DESTROY(barrier) /* empty */
#define _Py_ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) /* empty */
#define _Py_ANNOTATE_CONDVAR_WAIT(cv) /* empty */
#define _Py_ANNOTATE_CONDVAR_SIGNAL(cv) /* empty */
#define _Py_ANNOTATE_CONDVAR_SIGNAL_ALL(cv) /* empty */
#define _Py_ANNOTATE_HAPPENS_BEFORE(obj) /* empty */
#define _Py_ANNOTATE_HAPPENS_AFTER(obj) /* empty */
#define _Py_ANNOTATE_PUBLISH_MEMORY_RANGE(address, size) /* empty */
#define _Py_ANNOTATE_UNPUBLISH_MEMORY_RANGE(address, size) /* empty */
#define _Py_ANNOTATE_SWAP_MEMORY_RANGE(address, size) /* empty */
#define _Py_ANNOTATE_PCQ_CREATE(pcq) /* empty */
#define _Py_ANNOTATE_PCQ_DESTROY(pcq) /* empty */
#define _Py_ANNOTATE_PCQ_PUT(pcq) /* empty */
#define _Py_ANNOTATE_PCQ_GET(pcq) /* empty */
#define _Py_ANNOTATE_NEW_MEMORY(address, size) /* empty */
#define _Py_ANNOTATE_EXPECT_RACE(address, description) /* empty */
#define _Py_ANNOTATE_BENIGN_RACE(address, description) /* empty */
#define _Py_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) /* empty */
#define _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) /* empty */
#define _Py_ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) /* empty */
#define _Py_ANNOTATE_TRACE_MEMORY(arg) /* empty */
#define _Py_ANNOTATE_THREAD_NAME(name) /* empty */
#define _Py_ANNOTATE_IGNORE_READS_BEGIN() /* empty */
#define _Py_ANNOTATE_IGNORE_READS_END() /* empty */
#define _Py_ANNOTATE_IGNORE_WRITES_BEGIN() /* empty */
#define _Py_ANNOTATE_IGNORE_WRITES_END() /* empty */
#define _Py_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() /* empty */
#define _Py_ANNOTATE_IGNORE_READS_AND_WRITES_END() /* empty */
#define _Py_ANNOTATE_IGNORE_SYNC_BEGIN() /* empty */
#define _Py_ANNOTATE_IGNORE_SYNC_END() /* empty */
#define _Py_ANNOTATE_ENABLE_RACE_DETECTION(enable) /* empty */
#define _Py_ANNOTATE_NO_OP(arg) /* empty */
#define _Py_ANNOTATE_FLUSH_STATE() /* empty */
#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
/* Use the macros above rather than using these functions directly. */
#ifdef __cplusplus
extern "C" {
#endif
void AnnotateRWLockCreate(const char *file, int line,
const volatile void *lock);
void AnnotateRWLockDestroy(const char *file, int line,
const volatile void *lock);
void AnnotateRWLockAcquired(const char *file, int line,
const volatile void *lock, long is_w);
void AnnotateRWLockReleased(const char *file, int line,
const volatile void *lock, long is_w);
void AnnotateBarrierInit(const char *file, int line,
const volatile void *barrier, long count,
long reinitialization_allowed);
void AnnotateBarrierWaitBefore(const char *file, int line,
const volatile void *barrier);
void AnnotateBarrierWaitAfter(const char *file, int line,
const volatile void *barrier);
void AnnotateBarrierDestroy(const char *file, int line,
const volatile void *barrier);
void AnnotateCondVarWait(const char *file, int line,
const volatile void *cv,
const volatile void *lock);
void AnnotateCondVarSignal(const char *file, int line,
const volatile void *cv);
void AnnotateCondVarSignalAll(const char *file, int line,
const volatile void *cv);
void AnnotatePublishMemoryRange(const char *file, int line,
const volatile void *address,
long size);
void AnnotateUnpublishMemoryRange(const char *file, int line,
const volatile void *address,
long size);
void AnnotatePCQCreate(const char *file, int line,
const volatile void *pcq);
void AnnotatePCQDestroy(const char *file, int line,
const volatile void *pcq);
void AnnotatePCQPut(const char *file, int line,
const volatile void *pcq);
void AnnotatePCQGet(const char *file, int line,
const volatile void *pcq);
void AnnotateNewMemory(const char *file, int line,
const volatile void *address,
long size);
void AnnotateExpectRace(const char *file, int line,
const volatile void *address,
const char *description);
void AnnotateBenignRace(const char *file, int line,
const volatile void *address,
const char *description);
void AnnotateBenignRaceSized(const char *file, int line,
const volatile void *address,
long size,
const char *description);
void AnnotateMutexIsUsedAsCondVar(const char *file, int line,
const volatile void *mu);
void AnnotateTraceMemory(const char *file, int line,
const volatile void *arg);
void AnnotateThreadName(const char *file, int line,
const char *name);
void AnnotateIgnoreReadsBegin(const char *file, int line);
void AnnotateIgnoreReadsEnd(const char *file, int line);
void AnnotateIgnoreWritesBegin(const char *file, int line);
void AnnotateIgnoreWritesEnd(const char *file, int line);
void AnnotateEnableRaceDetection(const char *file, int line, int enable);
void AnnotateNoOp(const char *file, int line,
const volatile void *arg);
void AnnotateFlushState(const char *file, int line);
/* Return non-zero value if running under valgrind.
If "valgrind.h" is included into dynamic_annotations.c,
the regular valgrind mechanism will be used.
See http://valgrind.org/docs/manual/manual-core-adv.html about
RUNNING_ON_VALGRIND and other valgrind "client requests".
The file "valgrind.h" may be obtained by doing
svn co svn://svn.valgrind.org/valgrind/trunk/include
If for some reason you can't use "valgrind.h" or want to fake valgrind,
there are two ways to make this function return non-zero:
- Use environment variable: export RUNNING_ON_VALGRIND=1
- Make your tool intercept the function RunningOnValgrind() and
change its return value.
*/
int RunningOnValgrind(void);
#ifdef __cplusplus
}
#endif
#if DYNAMIC_ANNOTATIONS_ENABLED != 0 && defined(__cplusplus)
/* _Py_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
Instead of doing
_Py_ANNOTATE_IGNORE_READS_BEGIN();
... = x;
_Py_ANNOTATE_IGNORE_READS_END();
one can use
... = _Py_ANNOTATE_UNPROTECTED_READ(x); */
template <class T>
inline T _Py_ANNOTATE_UNPROTECTED_READ(const volatile T &x) {
_Py_ANNOTATE_IGNORE_READS_BEGIN();
T res = x;
_Py_ANNOTATE_IGNORE_READS_END();
return res;
}
/* Apply _Py_ANNOTATE_BENIGN_RACE_SIZED to a static variable. */
#define _Py_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
namespace { \
class static_var ## _annotator { \
public: \
static_var ## _annotator() { \
_Py_ANNOTATE_BENIGN_RACE_SIZED(&static_var, \
sizeof(static_var), \
# static_var ": " description); \
} \
}; \
static static_var ## _annotator the ## static_var ## _annotator;\
}
#else /* DYNAMIC_ANNOTATIONS_ENABLED == 0 */
#define _Py_ANNOTATE_UNPROTECTED_READ(x) (x)
#define _Py_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) /* empty */
#endif /* DYNAMIC_ANNOTATIONS_ENABLED */
#endif /* __DYNAMIC_ANNOTATIONS_H__ */

View File

@@ -0,0 +1,17 @@
#ifndef Py_ENUMOBJECT_H
#define Py_ENUMOBJECT_H
/* Enumerate Object */
#ifdef __cplusplus
extern "C" {
#endif
PyAPI_DATA(PyTypeObject) PyEnum_Type;
PyAPI_DATA(PyTypeObject) PyReversed_Type;
#ifdef __cplusplus
}
#endif
#endif /* !Py_ENUMOBJECT_H */

45
Dependencies/Python/include/errcode.h vendored Normal file
View File

@@ -0,0 +1,45 @@
// Error codes passed around between file input, tokenizer, parser and
// interpreter. This is necessary so we can turn them into Python
// exceptions at a higher level. Note that some errors have a
// slightly different meaning when passed from the tokenizer to the
// parser than when passed from the parser to the interpreter; e.g.
// the parser only returns E_EOF when it hits EOF immediately, and it
// never returns E_OK.
//
// The public PyRun_InteractiveOneObjectEx() function can return E_EOF,
// same as its variants:
//
// * PyRun_InteractiveOneObject()
// * PyRun_InteractiveOneFlags()
// * PyRun_InteractiveOne()
#ifndef Py_ERRCODE_H
#define Py_ERRCODE_H
#ifdef __cplusplus
extern "C" {
#endif
#define E_OK 10 /* No error */
#define E_EOF 11 /* End Of File */
#define E_INTR 12 /* Interrupted */
#define E_TOKEN 13 /* Bad token */
#define E_SYNTAX 14 /* Syntax error */
#define E_NOMEM 15 /* Ran out of memory */
#define E_DONE 16 /* Parsing complete */
#define E_ERROR 17 /* Execution error */
#define E_TABSPACE 18 /* Inconsistent mixing of tabs and spaces */
#define E_OVERFLOW 19 /* Node had too many children */
#define E_TOODEEP 20 /* Too many indentation levels */
#define E_DEDENT 21 /* No matching outer block for dedent */
#define E_DECODE 22 /* Error in decoding into Unicode */
#define E_EOFS 23 /* EOF in triple-quoted string */
#define E_EOLS 24 /* EOL in single-quoted string */
#define E_LINECONT 25 /* Unexpected characters after a line continuation */
#define E_BADSINGLE 27 /* Ill-formed single statement input */
#define E_INTERACT_STOP 28 /* Interactive mode stopped tokenization */
#define E_COLUMNOVERFLOW 29 /* Column offset overflow */
#ifdef __cplusplus
}
#endif
#endif /* !Py_ERRCODE_H */

108
Dependencies/Python/include/exports.h vendored Normal file
View File

@@ -0,0 +1,108 @@
#ifndef Py_EXPORTS_H
#define Py_EXPORTS_H
/* Declarations for symbol visibility.
PyAPI_FUNC(type): Declares a public Python API function and return type
PyAPI_DATA(type): Declares public Python data and its type
PyMODINIT_FUNC: A Python module init function. If these functions are
inside the Python core, they are private to the core.
If in an extension module, it may be declared with
external linkage depending on the platform.
As a number of platforms support/require "__declspec(dllimport/dllexport)",
we support a HAVE_DECLSPEC_DLL macro to save duplication.
*/
/*
All windows ports, except cygwin, are handled in PC/pyconfig.h.
Cygwin is the only other autoconf platform requiring special
linkage handling and it uses __declspec().
*/
#if defined(__CYGWIN__)
# define HAVE_DECLSPEC_DLL
#endif
#if defined(_WIN32) || defined(__CYGWIN__)
#if defined(Py_ENABLE_SHARED)
#define Py_IMPORTED_SYMBOL __declspec(dllimport)
#define Py_EXPORTED_SYMBOL __declspec(dllexport)
#define Py_LOCAL_SYMBOL
#else
#define Py_IMPORTED_SYMBOL
#define Py_EXPORTED_SYMBOL
#define Py_LOCAL_SYMBOL
#endif
#else
/*
* If we only ever used gcc >= 5, we could use __has_attribute(visibility)
* as a cross-platform way to determine if visibility is supported. However,
* we may still need to support gcc >= 4, as some Ubuntu LTS and Centos versions
* have 4 < gcc < 5.
*/
#ifndef __has_attribute
#define __has_attribute(x) 0 // Compatibility with non-clang compilers.
#endif
#if (defined(__GNUC__) && (__GNUC__ >= 4)) ||\
(defined(__clang__) && __has_attribute(visibility))
#define Py_IMPORTED_SYMBOL __attribute__ ((visibility ("default")))
#define Py_EXPORTED_SYMBOL __attribute__ ((visibility ("default")))
#define Py_LOCAL_SYMBOL __attribute__ ((visibility ("hidden")))
#else
#define Py_IMPORTED_SYMBOL
#define Py_EXPORTED_SYMBOL
#define Py_LOCAL_SYMBOL
#endif
#endif
/* only get special linkage if built as shared or platform is Cygwin */
#if defined(Py_ENABLE_SHARED) || defined(__CYGWIN__)
# if defined(HAVE_DECLSPEC_DLL)
# if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE)
# define PyAPI_FUNC(RTYPE) Py_EXPORTED_SYMBOL RTYPE
# define PyAPI_DATA(RTYPE) extern Py_EXPORTED_SYMBOL RTYPE
/* module init functions inside the core need no external linkage */
/* except for Cygwin to handle embedding */
# if defined(__CYGWIN__)
# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject*
# else /* __CYGWIN__ */
# define PyMODINIT_FUNC PyObject*
# endif /* __CYGWIN__ */
# else /* Py_BUILD_CORE */
/* Building an extension module, or an embedded situation */
/* public Python functions and data are imported */
/* Under Cygwin, auto-import functions to prevent compilation */
/* failures similar to those described at the bottom of 4.1: */
/* http://docs.python.org/extending/windows.html#a-cookbook-approach */
# if !defined(__CYGWIN__)
# define PyAPI_FUNC(RTYPE) Py_IMPORTED_SYMBOL RTYPE
# endif /* !__CYGWIN__ */
# define PyAPI_DATA(RTYPE) extern Py_IMPORTED_SYMBOL RTYPE
/* module init functions outside the core must be exported */
# if defined(__cplusplus)
# define PyMODINIT_FUNC extern "C" Py_EXPORTED_SYMBOL PyObject*
# else /* __cplusplus */
# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject*
# endif /* __cplusplus */
# endif /* Py_BUILD_CORE */
# endif /* HAVE_DECLSPEC_DLL */
#endif /* Py_ENABLE_SHARED */
/* If no external linkage macros defined by now, create defaults */
#ifndef PyAPI_FUNC
# define PyAPI_FUNC(RTYPE) Py_EXPORTED_SYMBOL RTYPE
#endif
#ifndef PyAPI_DATA
# define PyAPI_DATA(RTYPE) extern Py_EXPORTED_SYMBOL RTYPE
#endif
#ifndef PyMODINIT_FUNC
# if defined(__cplusplus)
# define PyMODINIT_FUNC extern "C" Py_EXPORTED_SYMBOL PyObject*
# else /* __cplusplus */
# define PyMODINIT_FUNC Py_EXPORTED_SYMBOL PyObject*
# endif /* __cplusplus */
#endif
#endif /* Py_EXPORTS_H */

View File

@@ -0,0 +1,41 @@
/* File object interface (what's left of it -- see io.py) */
#ifndef Py_FILEOBJECT_H
#define Py_FILEOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#define PY_STDIOTEXTMODE "b"
PyAPI_FUNC(PyObject *) PyFile_FromFd(int, const char *, const char *, int,
const char *, const char *,
const char *, int);
PyAPI_FUNC(PyObject *) PyFile_GetLine(PyObject *, int);
PyAPI_FUNC(int) PyFile_WriteObject(PyObject *, PyObject *, int);
PyAPI_FUNC(int) PyFile_WriteString(const char *, PyObject *);
PyAPI_FUNC(int) PyObject_AsFileDescriptor(PyObject *);
/* The default encoding used by the platform file system APIs
If non-NULL, this is different than the default encoding for strings
*/
Py_DEPRECATED(3.12) PyAPI_DATA(const char *) Py_FileSystemDefaultEncoding;
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03060000
Py_DEPRECATED(3.12) PyAPI_DATA(const char *) Py_FileSystemDefaultEncodeErrors;
#endif
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_HasFileSystemDefaultEncoding;
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_UTF8Mode;
#endif
#ifndef Py_LIMITED_API
# define Py_CPYTHON_FILEOBJECT_H
# include "cpython/fileobject.h"
# undef Py_CPYTHON_FILEOBJECT_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_FILEOBJECT_H */

62
Dependencies/Python/include/fileutils.h vendored Normal file
View File

@@ -0,0 +1,62 @@
#ifndef Py_FILEUTILS_H
#define Py_FILEUTILS_H
/*******************************
* stat() and fstat() fiddling *
*******************************/
#ifdef HAVE_SYS_STAT_H
# include <sys/stat.h> // S_ISREG()
#elif defined(HAVE_STAT_H)
# include <stat.h> // S_ISREG()
#endif
#ifndef S_IFMT
// VisualAge C/C++ Failed to Define MountType Field in sys/stat.h.
# define S_IFMT 0170000
#endif
#ifndef S_IFLNK
// Windows doesn't define S_IFLNK, but posixmodule.c maps
// IO_REPARSE_TAG_SYMLINK to S_IFLNK.
# define S_IFLNK 0120000
#endif
#ifndef S_ISREG
# define S_ISREG(x) (((x) & S_IFMT) == S_IFREG)
#endif
#ifndef S_ISDIR
# define S_ISDIR(x) (((x) & S_IFMT) == S_IFDIR)
#endif
#ifndef S_ISCHR
# define S_ISCHR(x) (((x) & S_IFMT) == S_IFCHR)
#endif
#ifndef S_ISLNK
# define S_ISLNK(x) (((x) & S_IFMT) == S_IFLNK)
#endif
// Move this down here since some C++ #include's don't like to be included
// inside an extern "C".
#ifdef __cplusplus
extern "C" {
#endif
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000
PyAPI_FUNC(wchar_t *) Py_DecodeLocale(
const char *arg,
size_t *size);
PyAPI_FUNC(char*) Py_EncodeLocale(
const wchar_t *text,
size_t *error_pos);
#endif
#ifndef Py_LIMITED_API
# define Py_CPYTHON_FILEUTILS_H
# include "cpython/fileutils.h"
# undef Py_CPYTHON_FILEUTILS_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_FILEUTILS_H */

View File

@@ -0,0 +1,54 @@
/* Float object interface */
/*
PyFloatObject represents a (double precision) floating-point number.
*/
#ifndef Py_FLOATOBJECT_H
#define Py_FLOATOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
PyAPI_DATA(PyTypeObject) PyFloat_Type;
#define PyFloat_Check(op) PyObject_TypeCheck(op, &PyFloat_Type)
#define PyFloat_CheckExact(op) Py_IS_TYPE((op), &PyFloat_Type)
#define Py_RETURN_NAN return PyFloat_FromDouble(Py_NAN)
#define Py_RETURN_INF(sign) \
do { \
if (copysign(1., sign) == 1.) { \
return PyFloat_FromDouble(Py_HUGE_VAL); \
} \
else { \
return PyFloat_FromDouble(-Py_HUGE_VAL); \
} \
} while(0)
PyAPI_FUNC(double) PyFloat_GetMax(void);
PyAPI_FUNC(double) PyFloat_GetMin(void);
PyAPI_FUNC(PyObject*) PyFloat_GetInfo(void);
/* Return Python float from string PyObject. */
PyAPI_FUNC(PyObject*) PyFloat_FromString(PyObject*);
/* Return Python float from C double. */
PyAPI_FUNC(PyObject*) PyFloat_FromDouble(double);
/* Extract C double from Python float. The macro version trades safety for
speed. */
PyAPI_FUNC(double) PyFloat_AsDouble(PyObject*);
#ifndef Py_LIMITED_API
# define Py_CPYTHON_FLOATOBJECT_H
# include "cpython/floatobject.h"
# undef Py_CPYTHON_FLOATOBJECT_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_FLOATOBJECT_H */

View File

@@ -0,0 +1,20 @@
/* Frame object interface */
#ifndef Py_FRAMEOBJECT_H
#define Py_FRAMEOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#include "pyframe.h"
#ifndef Py_LIMITED_API
# define Py_CPYTHON_FRAMEOBJECT_H
# include "cpython/frameobject.h"
# undef Py_CPYTHON_FRAMEOBJECT_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_FRAMEOBJECT_H */

View File

@@ -0,0 +1,14 @@
// Implementation of PEP 585: support list[int] etc.
#ifndef Py_GENERICALIASOBJECT_H
#define Py_GENERICALIASOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
PyAPI_FUNC(PyObject *) Py_GenericAlias(PyObject *, PyObject *);
PyAPI_DATA(PyTypeObject) Py_GenericAliasType;
#ifdef __cplusplus
}
#endif
#endif /* !Py_GENERICALIASOBJECT_H */

103
Dependencies/Python/include/import.h vendored Normal file
View File

@@ -0,0 +1,103 @@
/* Module definition and import interface */
#ifndef Py_IMPORT_H
#define Py_IMPORT_H
#ifdef __cplusplus
extern "C" {
#endif
PyAPI_FUNC(long) PyImport_GetMagicNumber(void);
PyAPI_FUNC(const char *) PyImport_GetMagicTag(void);
PyAPI_FUNC(PyObject *) PyImport_ExecCodeModule(
const char *name, /* UTF-8 encoded string */
PyObject *co
);
PyAPI_FUNC(PyObject *) PyImport_ExecCodeModuleEx(
const char *name, /* UTF-8 encoded string */
PyObject *co,
const char *pathname /* decoded from the filesystem encoding */
);
PyAPI_FUNC(PyObject *) PyImport_ExecCodeModuleWithPathnames(
const char *name, /* UTF-8 encoded string */
PyObject *co,
const char *pathname, /* decoded from the filesystem encoding */
const char *cpathname /* decoded from the filesystem encoding */
);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000
PyAPI_FUNC(PyObject *) PyImport_ExecCodeModuleObject(
PyObject *name,
PyObject *co,
PyObject *pathname,
PyObject *cpathname
);
#endif
PyAPI_FUNC(PyObject *) PyImport_GetModuleDict(void);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000
PyAPI_FUNC(PyObject *) PyImport_GetModule(PyObject *name);
#endif
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000
PyAPI_FUNC(PyObject *) PyImport_AddModuleObject(
PyObject *name
);
#endif
PyAPI_FUNC(PyObject *) PyImport_AddModule(
const char *name /* UTF-8 encoded string */
);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030d0000
PyAPI_FUNC(PyObject *) PyImport_AddModuleRef(
const char *name /* UTF-8 encoded string */
);
#endif
PyAPI_FUNC(PyObject *) PyImport_ImportModule(
const char *name /* UTF-8 encoded string */
);
Py_DEPRECATED(3.13) PyAPI_FUNC(PyObject *) PyImport_ImportModuleNoBlock(
const char *name /* UTF-8 encoded string */
);
PyAPI_FUNC(PyObject *) PyImport_ImportModuleLevel(
const char *name, /* UTF-8 encoded string */
PyObject *globals,
PyObject *locals,
PyObject *fromlist,
int level
);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03050000
PyAPI_FUNC(PyObject *) PyImport_ImportModuleLevelObject(
PyObject *name,
PyObject *globals,
PyObject *locals,
PyObject *fromlist,
int level
);
#endif
#define PyImport_ImportModuleEx(n, g, l, f) \
PyImport_ImportModuleLevel((n), (g), (l), (f), 0)
PyAPI_FUNC(PyObject *) PyImport_GetImporter(PyObject *path);
PyAPI_FUNC(PyObject *) PyImport_Import(PyObject *name);
PyAPI_FUNC(PyObject *) PyImport_ReloadModule(PyObject *m);
#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03030000
PyAPI_FUNC(int) PyImport_ImportFrozenModuleObject(
PyObject *name
);
#endif
PyAPI_FUNC(int) PyImport_ImportFrozenModule(
const char *name /* UTF-8 encoded string */
);
PyAPI_FUNC(int) PyImport_AppendInittab(
const char *name, /* ASCII encoded string */
PyObject* (*initfunc)(void)
);
#ifndef Py_LIMITED_API
# define Py_CPYTHON_IMPORT_H
# include "cpython/import.h"
# undef Py_CPYTHON_IMPORT_H
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_IMPORT_H */

View File

@@ -0,0 +1,565 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#pragma once
#ifndef MIMALLOC_H
#define MIMALLOC_H
#define MI_MALLOC_VERSION 212 // major + 2 digits minor
// ------------------------------------------------------
// Compiler specific attributes
// ------------------------------------------------------
#ifdef __cplusplus
#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11
#define mi_attr_noexcept noexcept
#else
#define mi_attr_noexcept throw()
#endif
#else
#define mi_attr_noexcept
#endif
#if defined(__cplusplus) && (__cplusplus >= 201703)
#define mi_decl_nodiscard [[nodiscard]]
#elif (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) // includes clang, icc, and clang-cl
#define mi_decl_nodiscard __attribute__((warn_unused_result))
#elif defined(_HAS_NODISCARD)
#define mi_decl_nodiscard _NODISCARD
#elif (_MSC_VER >= 1700)
#define mi_decl_nodiscard _Check_return_
#else
#define mi_decl_nodiscard
#endif
#if defined(_MSC_VER) || defined(__MINGW32__)
#if !defined(MI_SHARED_LIB)
#define mi_decl_export
#elif defined(MI_SHARED_LIB_EXPORT)
#define mi_decl_export __declspec(dllexport)
#else
#define mi_decl_export __declspec(dllimport)
#endif
#if defined(__MINGW32__)
#define mi_decl_restrict
#define mi_attr_malloc __attribute__((malloc))
#else
#if (_MSC_VER >= 1900) && !defined(__EDG__)
#define mi_decl_restrict __declspec(allocator) __declspec(restrict)
#else
#define mi_decl_restrict __declspec(restrict)
#endif
#define mi_attr_malloc
#endif
#define mi_cdecl __cdecl
#define mi_attr_alloc_size(s)
#define mi_attr_alloc_size2(s1,s2)
#define mi_attr_alloc_align(p)
#elif defined(__GNUC__) // includes clang and icc
#if defined(MI_SHARED_LIB) && defined(MI_SHARED_LIB_EXPORT)
#define mi_decl_export __attribute__((visibility("default")))
#else
#define mi_decl_export
#endif
#define mi_cdecl // leads to warnings... __attribute__((cdecl))
#define mi_decl_restrict
#define mi_attr_malloc __attribute__((malloc))
#if (defined(__clang_major__) && (__clang_major__ < 4)) || (__GNUC__ < 5)
#define mi_attr_alloc_size(s)
#define mi_attr_alloc_size2(s1,s2)
#define mi_attr_alloc_align(p)
#elif defined(__INTEL_COMPILER)
#define mi_attr_alloc_size(s) __attribute__((alloc_size(s)))
#define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2)))
#define mi_attr_alloc_align(p)
#else
#define mi_attr_alloc_size(s) __attribute__((alloc_size(s)))
#define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2)))
#define mi_attr_alloc_align(p) __attribute__((alloc_align(p)))
#endif
#else
#define mi_cdecl
#define mi_decl_export
#define mi_decl_restrict
#define mi_attr_malloc
#define mi_attr_alloc_size(s)
#define mi_attr_alloc_size2(s1,s2)
#define mi_attr_alloc_align(p)
#endif
// ------------------------------------------------------
// Includes
// ------------------------------------------------------
#include <stddef.h> // size_t
#include <stdbool.h> // bool
#include <stdint.h> // INTPTR_MAX
#ifdef __cplusplus
extern "C" {
#endif
// ------------------------------------------------------
// Standard malloc interface
// ------------------------------------------------------
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
mi_decl_nodiscard mi_decl_export void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
mi_decl_export void* mi_expand(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
mi_decl_export void mi_free(void* p) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept mi_attr_malloc;
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept mi_attr_malloc;
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc;
// ------------------------------------------------------
// Extended functionality
// ------------------------------------------------------
#define MI_SMALL_WSIZE_MAX (128)
#define MI_SMALL_SIZE_MAX (MI_SMALL_WSIZE_MAX*sizeof(void*))
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
mi_decl_nodiscard mi_decl_export void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
mi_decl_nodiscard mi_decl_export void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept;
// ------------------------------------------------------
// Internals
// ------------------------------------------------------
typedef void (mi_cdecl mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg);
mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg) mi_attr_noexcept;
typedef void (mi_cdecl mi_output_fun)(const char* msg, void* arg);
mi_decl_export void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept;
typedef void (mi_cdecl mi_error_fun)(int err, void* arg);
mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg);
mi_decl_export void mi_collect(bool force) mi_attr_noexcept;
mi_decl_export int mi_version(void) mi_attr_noexcept;
mi_decl_export void mi_stats_reset(void) mi_attr_noexcept;
mi_decl_export void mi_stats_merge(void) mi_attr_noexcept;
mi_decl_export void mi_stats_print(void* out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL
mi_decl_export void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept;
mi_decl_export void mi_process_init(void) mi_attr_noexcept;
mi_decl_export void mi_thread_init(void) mi_attr_noexcept;
mi_decl_export void mi_thread_done(void) mi_attr_noexcept;
mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept;
mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs,
size_t* current_rss, size_t* peak_rss,
size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept;
// -------------------------------------------------------------------------------------
// Aligned allocation
// Note that `alignment` always follows `size` for consistency with unaligned
// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`.
// -------------------------------------------------------------------------------------
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2) mi_attr_alloc_align(3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3);
mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2);
// -------------------------------------------------------------------------------------
// Heaps: first-class, but can only allocate from the same thread that created it.
// -------------------------------------------------------------------------------------
struct mi_heap_s;
typedef struct mi_heap_s mi_heap_t;
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new(void);
mi_decl_export void mi_heap_delete(mi_heap_t* heap);
mi_decl_export void mi_heap_destroy(mi_heap_t* heap);
mi_decl_export mi_heap_t* mi_heap_set_default(mi_heap_t* heap);
mi_decl_export mi_heap_t* mi_heap_get_default(void);
mi_decl_export mi_heap_t* mi_heap_get_backing(void);
mi_decl_export void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
mi_decl_nodiscard mi_decl_export void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4);
mi_decl_nodiscard mi_decl_export void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept mi_attr_malloc;
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept mi_attr_malloc;
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc;
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4);
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3);
// --------------------------------------------------------------------------------
// Zero initialized re-allocation.
// Only valid on memory that was originally allocated with zero initialization too.
// e.g. `mi_calloc`, `mi_zalloc`, `mi_zalloc_aligned` etc.
// see <https://github.com/microsoft/mimalloc/issues/63#issuecomment-508272992>
// --------------------------------------------------------------------------------
mi_decl_nodiscard mi_decl_export void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export void* mi_recalloc(void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3);
mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(2,3) mi_attr_alloc_align(4);
mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(2,3);
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4);
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4);
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3);
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(3,4) mi_attr_alloc_align(5);
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(3,4);
// ------------------------------------------------------
// Analysis
// ------------------------------------------------------
mi_decl_export bool mi_heap_contains_block(mi_heap_t* heap, const void* p);
mi_decl_export bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
mi_decl_export bool mi_check_owned(const void* p);
// An area of heap space contains blocks of a single size.
typedef struct mi_heap_area_s {
void* blocks; // start of the area containing heap blocks
size_t reserved; // bytes reserved for this area (virtual)
size_t committed; // current available bytes for this area
size_t used; // number of allocated blocks
size_t block_size; // size in bytes of each block
size_t full_block_size; // size in bytes of a full block including padding and metadata.
} mi_heap_area_t;
typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
// Experimental
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept;
mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept;
mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept;
mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept;
mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept;
mi_decl_export void mi_debug_show_arenas(void) mi_attr_noexcept;
// Experimental: heaps associated with specific memory arena's
typedef int mi_arena_id_t;
mi_decl_export void* mi_arena_area(mi_arena_id_t arena_id, size_t* size);
mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
#if MI_MALLOC_VERSION >= 182
// Create a heap that only allocates in the specified arena
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id);
#endif
// deprecated
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
// ------------------------------------------------------
// Convenience
// ------------------------------------------------------
#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
#define mi_calloc_tp(tp,n) ((tp*)mi_calloc(n,sizeof(tp)))
#define mi_mallocn_tp(tp,n) ((tp*)mi_mallocn(n,sizeof(tp)))
#define mi_reallocn_tp(p,tp,n) ((tp*)mi_reallocn(p,n,sizeof(tp)))
#define mi_recalloc_tp(p,tp,n) ((tp*)mi_recalloc(p,n,sizeof(tp)))
#define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
#define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
#define mi_heap_calloc_tp(hp,tp,n) ((tp*)mi_heap_calloc(hp,n,sizeof(tp)))
#define mi_heap_mallocn_tp(hp,tp,n) ((tp*)mi_heap_mallocn(hp,n,sizeof(tp)))
#define mi_heap_reallocn_tp(hp,p,tp,n) ((tp*)mi_heap_reallocn(hp,p,n,sizeof(tp)))
#define mi_heap_recalloc_tp(hp,p,tp,n) ((tp*)mi_heap_recalloc(hp,p,n,sizeof(tp)))
// ------------------------------------------------------
// Options
// ------------------------------------------------------
typedef enum mi_option_e {
// stable options
mi_option_show_errors, // print error messages
mi_option_show_stats, // print statistics on termination
mi_option_verbose, // print verbose messages
// the following options are experimental (see src/options.h)
mi_option_eager_commit, // eager commit segments? (after `eager_commit_delay` segments) (=1)
mi_option_arena_eager_commit, // eager commit arenas? Use 2 to enable just on overcommit systems (=2)
mi_option_purge_decommits, // should a memory purge decommit (or only reset) (=1)
mi_option_allow_large_os_pages, // allow large (2MiB) OS pages, implies eager commit
mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB/page) at startup
mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
mi_option_reserve_os_memory, // reserve specified amount of OS memory in an arena at startup
mi_option_deprecated_segment_cache,
mi_option_deprecated_page_reset,
mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
mi_option_deprecated_segment_reset,
mi_option_eager_commit_delay,
mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all.
mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas)
mi_option_os_tag, // tag used for OS logging (macOS only for now)
mi_option_max_errors, // issue at most N error messages
mi_option_max_warnings, // issue at most N warning messages
mi_option_max_segment_reclaim,
mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe.
mi_option_arena_reserve, // initial memory size in KiB for arena reservation (1GiB on 64-bit)
mi_option_arena_purge_mult,
mi_option_purge_extend_delay,
_mi_option_last,
// legacy option names
mi_option_large_os_pages = mi_option_allow_large_os_pages,
mi_option_eager_region_commit = mi_option_arena_eager_commit,
mi_option_reset_decommits = mi_option_purge_decommits,
mi_option_reset_delay = mi_option_purge_delay,
mi_option_abandoned_page_reset = mi_option_abandoned_page_purge
} mi_option_t;
mi_decl_nodiscard mi_decl_export bool mi_option_is_enabled(mi_option_t option);
mi_decl_export void mi_option_enable(mi_option_t option);
mi_decl_export void mi_option_disable(mi_option_t option);
mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable);
mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable);
mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option);
mi_decl_nodiscard mi_decl_export long mi_option_get_clamp(mi_option_t option, long min, long max);
mi_decl_nodiscard mi_decl_export size_t mi_option_get_size(mi_option_t option);
mi_decl_export void mi_option_set(mi_option_t option, long value);
mi_decl_export void mi_option_set_default(mi_option_t option, long value);
// -------------------------------------------------------------------------------------------------------
// "mi" prefixed implementations of various posix, Unix, Windows, and C++ allocation functions.
// (This can be convenient when providing overrides of these functions as done in `mimalloc-override.h`.)
// note: we use `mi_cfree` as "checked free" and it checks if the pointer is in our heap before free-ing.
// -------------------------------------------------------------------------------------------------------
mi_decl_export void mi_cfree(void* p) mi_attr_noexcept;
mi_decl_export void* mi__expand(void* p, size_t newsize) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export size_t mi_malloc_size(const void* p) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export size_t mi_malloc_good_size(size_t size) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept;
mi_decl_export int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
mi_decl_nodiscard mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
mi_decl_nodiscard mi_decl_export int mi_reallocarr(void* p, size_t count, size_t size) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept;
mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept mi_attr_malloc;
mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept mi_attr_malloc;
mi_decl_export int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept;
mi_decl_export int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name) mi_attr_noexcept;
mi_decl_export void mi_free_size(void* p, size_t size) mi_attr_noexcept;
mi_decl_export void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept;
mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept;
// The `mi_new` wrappers implement C++ semantics on out-of-memory instead of directly returning `NULL`.
// (and call `std::get_new_handler` and potentially raise a `std::bad_alloc` exception).
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1, 2);
mi_decl_nodiscard mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) mi_attr_malloc mi_attr_alloc_size(2);
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(2, 3);
#ifdef __cplusplus
}
#endif
// ---------------------------------------------------------------------------------------------
// Implement the C++ std::allocator interface for use in STL containers.
// (note: see `mimalloc-new-delete.h` for overriding the new/delete operators globally)
// ---------------------------------------------------------------------------------------------
#ifdef __cplusplus
#include <cstddef> // std::size_t
#include <cstdint> // PTRDIFF_MAX
#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11
#include <type_traits> // std::true_type
#include <utility> // std::forward
#endif
template<class T> struct _mi_stl_allocator_common {
typedef T value_type;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef value_type& reference;
typedef value_type const& const_reference;
typedef value_type* pointer;
typedef value_type const* const_pointer;
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
using propagate_on_container_copy_assignment = std::true_type;
using propagate_on_container_move_assignment = std::true_type;
using propagate_on_container_swap = std::true_type;
template <class U, class ...Args> void construct(U* p, Args&& ...args) { ::new(p) U(std::forward<Args>(args)...); }
template <class U> void destroy(U* p) mi_attr_noexcept { p->~U(); }
#else
void construct(pointer p, value_type const& val) { ::new(p) value_type(val); }
void destroy(pointer p) { p->~value_type(); }
#endif
size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); }
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
};
template<class T> struct mi_stl_allocator : public _mi_stl_allocator_common<T> {
using typename _mi_stl_allocator_common<T>::size_type;
using typename _mi_stl_allocator_common<T>::value_type;
using typename _mi_stl_allocator_common<T>::pointer;
template <class U> struct rebind { typedef mi_stl_allocator<U> other; };
mi_stl_allocator() mi_attr_noexcept = default;
mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept = default;
template<class U> mi_stl_allocator(const mi_stl_allocator<U>&) mi_attr_noexcept { }
mi_stl_allocator select_on_container_copy_construction() const { return *this; }
void deallocate(T* p, size_type) { mi_free(p); }
#if (__cplusplus >= 201703L) // C++17
mi_decl_nodiscard T* allocate(size_type count) { return static_cast<T*>(mi_new_n(count, sizeof(T))); }
mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); }
#else
mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast<pointer>(mi_new_n(count, sizeof(value_type))); }
#endif
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
using is_always_equal = std::true_type;
#endif
};
template<class T1,class T2> bool operator==(const mi_stl_allocator<T1>& , const mi_stl_allocator<T2>& ) mi_attr_noexcept { return true; }
template<class T1,class T2> bool operator!=(const mi_stl_allocator<T1>& , const mi_stl_allocator<T2>& ) mi_attr_noexcept { return false; }
#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) // C++11
#define MI_HAS_HEAP_STL_ALLOCATOR 1
#include <memory> // std::shared_ptr
// Common base class for STL allocators in a specific heap
template<class T, bool _mi_destroy> struct _mi_heap_stl_allocator_common : public _mi_stl_allocator_common<T> {
using typename _mi_stl_allocator_common<T>::size_type;
using typename _mi_stl_allocator_common<T>::value_type;
using typename _mi_stl_allocator_common<T>::pointer;
_mi_heap_stl_allocator_common(mi_heap_t* hp) : heap(hp) { } /* will not delete nor destroy the passed in heap */
#if (__cplusplus >= 201703L) // C++17
mi_decl_nodiscard T* allocate(size_type count) { return static_cast<T*>(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(T))); }
mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); }
#else
mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast<pointer>(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(value_type))); }
#endif
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
using is_always_equal = std::false_type;
#endif
void collect(bool force) { mi_heap_collect(this->heap.get(), force); }
template<class U> bool is_equal(const _mi_heap_stl_allocator_common<U, _mi_destroy>& x) const { return (this->heap == x.heap); }
protected:
std::shared_ptr<mi_heap_t> heap;
template<class U, bool D> friend struct _mi_heap_stl_allocator_common;
_mi_heap_stl_allocator_common() {
mi_heap_t* hp = mi_heap_new();
this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */
}
_mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { }
template<class U> _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common<U, _mi_destroy>& x) mi_attr_noexcept : heap(x.heap) { }
private:
static void heap_delete(mi_heap_t* hp) { if (hp != NULL) { mi_heap_delete(hp); } }
static void heap_destroy(mi_heap_t* hp) { if (hp != NULL) { mi_heap_destroy(hp); } }
};
// STL allocator allocation in a specific heap
template<class T> struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common<T, false> {
using typename _mi_heap_stl_allocator_common<T, false>::size_type;
mi_heap_stl_allocator() : _mi_heap_stl_allocator_common<T, false>() { } // creates fresh heap that is deleted when the destructor is called
mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, false>(hp) { } // no delete nor destroy on the passed in heap
template<class U> mi_heap_stl_allocator(const mi_heap_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, false>(x) { }
mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; }
void deallocate(T* p, size_type) { mi_free(p); }
template<class U> struct rebind { typedef mi_heap_stl_allocator<U> other; };
};
template<class T1, class T2> bool operator==(const mi_heap_stl_allocator<T1>& x, const mi_heap_stl_allocator<T2>& y) mi_attr_noexcept { return (x.is_equal(y)); }
template<class T1, class T2> bool operator!=(const mi_heap_stl_allocator<T1>& x, const mi_heap_stl_allocator<T2>& y) mi_attr_noexcept { return (!x.is_equal(y)); }
// STL allocator allocation in a specific heap, where `free` does nothing and
// the heap is destroyed in one go on destruction -- use with care!
template<class T> struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common<T, true> {
using typename _mi_heap_stl_allocator_common<T, true>::size_type;
mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common<T, true>() { } // creates fresh heap that is destroyed when the destructor is called
mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, true>(hp) { } // no delete nor destroy on the passed in heap
template<class U> mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, true>(x) { }
mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; }
void deallocate(T*, size_type) { /* do nothing as we destroy the heap on destruct. */ }
template<class U> struct rebind { typedef mi_heap_destroy_stl_allocator<U> other; };
};
template<class T1, class T2> bool operator==(const mi_heap_destroy_stl_allocator<T1>& x, const mi_heap_destroy_stl_allocator<T2>& y) mi_attr_noexcept { return (x.is_equal(y)); }
template<class T1, class T2> bool operator!=(const mi_heap_destroy_stl_allocator<T1>& x, const mi_heap_destroy_stl_allocator<T2>& y) mi_attr_noexcept { return (!x.is_equal(y)); }
#endif // C++11
#endif // __cplusplus
#endif

View File

@@ -0,0 +1,392 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2018-2023 Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#pragma once
#ifndef MIMALLOC_ATOMIC_H
#define MIMALLOC_ATOMIC_H
// --------------------------------------------------------------------------------------------
// Atomics
// We need to be portable between C, C++, and MSVC.
// We base the primitives on the C/C++ atomics and create a mimimal wrapper for MSVC in C compilation mode.
// This is why we try to use only `uintptr_t` and `<type>*` as atomic types.
// To gain better insight in the range of used atomics, we use explicitly named memory order operations
// instead of passing the memory order as a parameter.
// -----------------------------------------------------------------------------------------------
#if defined(__cplusplus)
// Use C++ atomics
#include <atomic>
#define _Atomic(tp) std::atomic<tp>
#define mi_atomic(name) std::atomic_##name
#define mi_memory_order(name) std::memory_order_##name
#if (__cplusplus >= 202002L) // c++20, see issue #571
#define MI_ATOMIC_VAR_INIT(x) x
#elif !defined(ATOMIC_VAR_INIT)
#define MI_ATOMIC_VAR_INIT(x) x
#else
#define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
#endif
#elif defined(_MSC_VER)
// Use MSVC C wrapper for C11 atomics
#define _Atomic(tp) tp
#define MI_ATOMIC_VAR_INIT(x) x
#define mi_atomic(name) mi_atomic_##name
#define mi_memory_order(name) mi_memory_order_##name
#else
// Use C11 atomics
#include <stdatomic.h>
#define mi_atomic(name) atomic_##name
#define mi_memory_order(name) memory_order_##name
#if (__STDC_VERSION__ >= 201710L) // c17, see issue #735
#define MI_ATOMIC_VAR_INIT(x) x
#elif !defined(ATOMIC_VAR_INIT)
#define MI_ATOMIC_VAR_INIT(x) x
#else
#define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
#endif
#endif
// Various defines for all used memory orders in mimalloc
#define mi_atomic_cas_weak(p,expected,desired,mem_success,mem_fail) \
mi_atomic(compare_exchange_weak_explicit)(p,expected,desired,mem_success,mem_fail)
#define mi_atomic_cas_strong(p,expected,desired,mem_success,mem_fail) \
mi_atomic(compare_exchange_strong_explicit)(p,expected,desired,mem_success,mem_fail)
#define mi_atomic_load_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire))
#define mi_atomic_load_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
#define mi_atomic_store_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
#define mi_atomic_store_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
#define mi_atomic_exchange_release(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(release))
#define mi_atomic_exchange_acq_rel(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(acq_rel))
#define mi_atomic_cas_weak_release(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed))
#define mi_atomic_cas_weak_acq_rel(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire))
#define mi_atomic_cas_strong_release(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed))
#define mi_atomic_cas_strong_acq_rel(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire))
#define mi_atomic_add_relaxed(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(relaxed))
#define mi_atomic_sub_relaxed(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(relaxed))
#define mi_atomic_add_acq_rel(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(acq_rel))
#define mi_atomic_sub_acq_rel(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(acq_rel))
#define mi_atomic_and_acq_rel(p,x) mi_atomic(fetch_and_explicit)(p,x,mi_memory_order(acq_rel))
#define mi_atomic_or_acq_rel(p,x) mi_atomic(fetch_or_explicit)(p,x,mi_memory_order(acq_rel))
#define mi_atomic_increment_relaxed(p) mi_atomic_add_relaxed(p,(uintptr_t)1)
#define mi_atomic_decrement_relaxed(p) mi_atomic_sub_relaxed(p,(uintptr_t)1)
#define mi_atomic_increment_acq_rel(p) mi_atomic_add_acq_rel(p,(uintptr_t)1)
#define mi_atomic_decrement_acq_rel(p) mi_atomic_sub_acq_rel(p,(uintptr_t)1)
static inline void mi_atomic_yield(void);
static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add);
static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub);
#if defined(__cplusplus) || !defined(_MSC_VER)
// In C++/C11 atomics we have polymorphic atomics so can use the typed `ptr` variants (where `tp` is the type of atomic value)
// We use these macros so we can provide a typed wrapper in MSVC in C compilation mode as well
#define mi_atomic_load_ptr_acquire(tp,p) mi_atomic_load_acquire(p)
#define mi_atomic_load_ptr_relaxed(tp,p) mi_atomic_load_relaxed(p)
// In C++ we need to add casts to help resolve templates if NULL is passed
#if defined(__cplusplus)
#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,(tp*)x)
#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,(tp*)x)
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des)
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des)
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des)
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x)
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x)
#else
#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,x)
#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,x)
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des)
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des)
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des)
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x)
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x)
#endif
// These are used by the statistics
static inline int64_t mi_atomic_addi64_relaxed(volatile int64_t* p, int64_t add) {
return mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed));
}
static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) {
int64_t current = mi_atomic_load_relaxed((_Atomic(int64_t)*)p);
while (current < x && !mi_atomic_cas_weak_release((_Atomic(int64_t)*)p, &current, x)) { /* nothing */ };
}
// Used by timers
#define mi_atomic_loadi64_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire))
#define mi_atomic_loadi64_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
#define mi_atomic_storei64_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
#define mi_atomic_storei64_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
#define mi_atomic_casi64_strong_acq_rel(p,e,d) mi_atomic_cas_strong_acq_rel(p,e,d)
#define mi_atomic_addi64_acq_rel(p,i) mi_atomic_add_acq_rel(p,i)
#elif defined(_MSC_VER)
// MSVC C compilation wrapper that uses Interlocked operations to model C11 atomics.
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <intrin.h>
#ifdef _WIN64
typedef LONG64 msc_intptr_t;
#define MI_64(f) f##64
#else
typedef LONG msc_intptr_t;
#define MI_64(f) f
#endif
typedef enum mi_memory_order_e {
mi_memory_order_relaxed,
mi_memory_order_consume,
mi_memory_order_acquire,
mi_memory_order_release,
mi_memory_order_acq_rel,
mi_memory_order_seq_cst
} mi_memory_order;
static inline uintptr_t mi_atomic_fetch_add_explicit(_Atomic(uintptr_t)*p, uintptr_t add, mi_memory_order mo) {
(void)(mo);
return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add);
}
static inline uintptr_t mi_atomic_fetch_sub_explicit(_Atomic(uintptr_t)*p, uintptr_t sub, mi_memory_order mo) {
(void)(mo);
return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, -((msc_intptr_t)sub));
}
static inline uintptr_t mi_atomic_fetch_and_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
(void)(mo);
return (uintptr_t)MI_64(_InterlockedAnd)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
}
static inline uintptr_t mi_atomic_fetch_or_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
(void)(mo);
return (uintptr_t)MI_64(_InterlockedOr)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
}
static inline bool mi_atomic_compare_exchange_strong_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) {
(void)(mo1); (void)(mo2);
uintptr_t read = (uintptr_t)MI_64(_InterlockedCompareExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)desired, (msc_intptr_t)(*expected));
if (read == *expected) {
return true;
}
else {
*expected = read;
return false;
}
}
static inline bool mi_atomic_compare_exchange_weak_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) {
return mi_atomic_compare_exchange_strong_explicit(p, expected, desired, mo1, mo2);
}
static inline uintptr_t mi_atomic_exchange_explicit(_Atomic(uintptr_t)*p, uintptr_t exchange, mi_memory_order mo) {
(void)(mo);
return (uintptr_t)MI_64(_InterlockedExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)exchange);
}
static inline void mi_atomic_thread_fence(mi_memory_order mo) {
(void)(mo);
_Atomic(uintptr_t) x = 0;
mi_atomic_exchange_explicit(&x, 1, mo);
}
static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) {
(void)(mo);
#if defined(_M_IX86) || defined(_M_X64)
return *p;
#else
uintptr_t x = *p;
if (mo > mi_memory_order_relaxed) {
while (!mi_atomic_compare_exchange_weak_explicit((_Atomic(uintptr_t)*)p, &x, x, mo, mi_memory_order_relaxed)) { /* nothing */ };
}
return x;
#endif
}
static inline void mi_atomic_store_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
(void)(mo);
#if defined(_M_IX86) || defined(_M_X64)
*p = x;
#else
mi_atomic_exchange_explicit(p, x, mo);
#endif
}
static inline int64_t mi_atomic_loadi64_explicit(_Atomic(int64_t)*p, mi_memory_order mo) {
(void)(mo);
#if defined(_M_X64)
return *p;
#else
int64_t old = *p;
int64_t x = old;
while ((old = InterlockedCompareExchange64(p, x, old)) != x) {
x = old;
}
return x;
#endif
}
static inline void mi_atomic_storei64_explicit(_Atomic(int64_t)*p, int64_t x, mi_memory_order mo) {
(void)(mo);
#if defined(x_M_IX86) || defined(_M_X64)
*p = x;
#else
InterlockedExchange64(p, x);
#endif
}
// These are used by the statistics
static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int64_t add) {
#ifdef _WIN64
return (int64_t)mi_atomic_addi((int64_t*)p, add);
#else
int64_t current;
int64_t sum;
do {
current = *p;
sum = current + add;
} while (_InterlockedCompareExchange64(p, sum, current) != current);
return current;
#endif
}
static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t x) {
int64_t current;
do {
current = *p;
} while (current < x && _InterlockedCompareExchange64(p, x, current) != current);
}
static inline void mi_atomic_addi64_acq_rel(volatile _Atomic(int64_t*)p, int64_t i) {
mi_atomic_addi64_relaxed(p, i);
}
static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p, int64_t* exp, int64_t des) {
int64_t read = _InterlockedCompareExchange64(p, des, *exp);
if (read == *exp) {
return true;
}
else {
*exp = read;
return false;
}
}
// The pointer macros cast to `uintptr_t`.
#define mi_atomic_load_ptr_acquire(tp,p) (tp*)mi_atomic_load_acquire((_Atomic(uintptr_t)*)(p))
#define mi_atomic_load_ptr_relaxed(tp,p) (tp*)mi_atomic_load_relaxed((_Atomic(uintptr_t)*)(p))
#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release((_Atomic(uintptr_t)*)(p),(uintptr_t)(x))
#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)(x))
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
#define mi_atomic_exchange_ptr_release(tp,p,x) (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
#define mi_atomic_loadi64_acquire(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(acquire))
#define mi_atomic_loadi64_relaxed(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(relaxed))
#define mi_atomic_storei64_release(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(release))
#define mi_atomic_storei64_relaxed(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(relaxed))
#endif
// Atomically add a signed value; returns the previous value.
static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add) {
return (intptr_t)mi_atomic_add_acq_rel((_Atomic(uintptr_t)*)p, (uintptr_t)add);
}
// Atomically subtract a signed value; returns the previous value.
static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) {
return (intptr_t)mi_atomic_addi(p, -sub);
}
typedef _Atomic(uintptr_t) mi_atomic_once_t;
// Returns true only on the first invocation
static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
uintptr_t expected = 0;
return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
}
typedef _Atomic(uintptr_t) mi_atomic_guard_t;
// Allows only one thread to execute at a time
#define mi_atomic_guard(guard) \
uintptr_t _mi_guard_expected = 0; \
for(bool _mi_guard_once = true; \
_mi_guard_once && mi_atomic_cas_strong_acq_rel(guard,&_mi_guard_expected,(uintptr_t)1); \
(mi_atomic_store_release(guard,(uintptr_t)0), _mi_guard_once = false) )
// Yield
#if defined(__cplusplus)
#include <thread>
static inline void mi_atomic_yield(void) {
std::this_thread::yield();
}
#elif defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
static inline void mi_atomic_yield(void) {
YieldProcessor();
}
#elif defined(__SSE2__)
#include <emmintrin.h>
static inline void mi_atomic_yield(void) {
_mm_pause();
}
#elif (defined(__GNUC__) || defined(__clang__)) && \
(defined(__x86_64__) || defined(__i386__) || \
defined(__aarch64__) || defined(__arm__) || \
defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__))
#if defined(__x86_64__) || defined(__i386__)
static inline void mi_atomic_yield(void) {
__asm__ volatile ("pause" ::: "memory");
}
#elif defined(__aarch64__)
static inline void mi_atomic_yield(void) {
__asm__ volatile("wfe");
}
#elif defined(__arm__)
#if __ARM_ARCH >= 7
static inline void mi_atomic_yield(void) {
__asm__ volatile("yield" ::: "memory");
}
#else
static inline void mi_atomic_yield(void) {
__asm__ volatile ("nop" ::: "memory");
}
#endif
#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__)
#ifdef __APPLE__
static inline void mi_atomic_yield(void) {
__asm__ volatile ("or r27,r27,r27" ::: "memory");
}
#else
static inline void mi_atomic_yield(void) {
__asm__ __volatile__ ("or 27,27,27" ::: "memory");
}
#endif
#endif
#elif defined(__sun)
// Fallback for other archs
#include <synch.h>
static inline void mi_atomic_yield(void) {
smt_pause();
}
#elif defined(__wasi__)
#include <sched.h>
static inline void mi_atomic_yield(void) {
sched_yield();
}
#else
#include <unistd.h>
static inline void mi_atomic_yield(void) {
sleep(0);
}
#endif
#endif // __MIMALLOC_ATOMIC_H

View File

@@ -0,0 +1,969 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#pragma once
#ifndef MIMALLOC_INTERNAL_H
#define MIMALLOC_INTERNAL_H
// --------------------------------------------------------------------------
// This file contains the interal API's of mimalloc and various utility
// functions and macros.
// --------------------------------------------------------------------------
#include "types.h"
#include "track.h"
#if (MI_DEBUG>0)
#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__)
#else
#define mi_trace_message(...)
#endif
#if defined(__EMSCRIPTEN__) && !defined(__wasi__)
#define __wasi__
#endif
#if defined(__cplusplus)
#define mi_decl_externc extern "C"
#else
#define mi_decl_externc
#endif
// pthreads
#if !defined(_WIN32) && !defined(__wasi__)
#define MI_USE_PTHREADS
#include <pthread.h>
#endif
// "options.c"
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...);
void _mi_warning_message(const char* fmt, ...);
void _mi_verbose_message(const char* fmt, ...);
void _mi_trace_message(const char* fmt, ...);
void _mi_options_init(void);
void _mi_error_message(int err, const char* fmt, ...);
// random.c
void _mi_random_init(mi_random_ctx_t* ctx);
void _mi_random_init_weak(mi_random_ctx_t* ctx);
void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx);
void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
uintptr_t _mi_random_next(mi_random_ctx_t* ctx);
uintptr_t _mi_heap_random_next(mi_heap_t* heap);
uintptr_t _mi_os_random_weak(uintptr_t extra_seed);
static inline uintptr_t _mi_random_shuffle(uintptr_t x);
// init.c
extern mi_decl_cache_align mi_stats_t _mi_stats_main;
extern mi_decl_cache_align const mi_page_t _mi_page_empty;
bool _mi_is_main_thread(void);
size_t _mi_current_thread_count(void);
bool _mi_preloading(void); // true while the C runtime is not initialized yet
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
void _mi_thread_done(mi_heap_t* heap);
void _mi_thread_data_collect(void);
void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap);
// os.c
void _mi_os_init(void); // called from process init
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);
size_t _mi_os_page_size(void);
size_t _mi_os_good_alloc_size(size_t size);
bool _mi_os_has_overcommit(void);
bool _mi_os_has_virtual_reserve(void);
bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats);
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
bool _mi_os_protect(void* addr, size_t size);
bool _mi_os_unprotect(void* addr, size_t size);
bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats);
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats);
void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats);
void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
bool _mi_os_use_large_page(size_t size, size_t alignment);
size_t _mi_os_large_page_size(void);
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
// arena.c
mi_arena_id_t _mi_arena_id_none(void);
void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats);
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
bool _mi_arena_contains(const void* p);
void _mi_arena_collect(bool force_purge, mi_stats_t* stats);
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats);
// "segment-map.c"
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
void _mi_segment_map_freed_at(const mi_segment_t* segment);
// "segment.c"
extern mi_abandoned_pool_t _mi_abandoned_default; // global abandoned pool
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld);
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld);
void _mi_segment_thread_collect(mi_segments_tld_t* tld);
bool _mi_abandoned_pool_visit_blocks(mi_abandoned_pool_t* pool, uint8_t page_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
#if MI_HUGE_PAGE_ABANDON
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
#else
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
#endif
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
void _mi_abandoned_await_readers(mi_abandoned_pool_t *pool);
void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld);
// "page.c"
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
void _mi_page_unfull(mi_page_t* page);
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
void _mi_heap_delayed_free_all(mi_heap_t* heap);
bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
void _mi_deferred_free(mi_heap_t* heap, bool force);
void _mi_page_free_collect(mi_page_t* page,bool force);
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments
size_t _mi_bin_size(uint8_t bin); // for stats
uint8_t _mi_bin(size_t size); // for stats
// "heap.c"
void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool no_reclaim, uint8_t tag);
void _mi_heap_destroy_pages(mi_heap_t* heap);
void _mi_heap_collect_abandon(mi_heap_t* heap);
void _mi_heap_set_default_direct(mi_heap_t* heap);
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
void _mi_heap_unsafe_destroy_all(void);
void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page);
bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t *page, mi_block_visit_fun* visitor, void* arg);
// "stats.c"
void _mi_stats_done(mi_stats_t* stats);
mi_msecs_t _mi_clock_now(void);
mi_msecs_t _mi_clock_end(mi_msecs_t start);
mi_msecs_t _mi_clock_start(void);
// "alloc.c"
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic`
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept;
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
bool _mi_free_delayed_block(mi_block_t* block);
void _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
// option.c, c primitives
char _mi_toupper(char c);
int _mi_strnicmp(const char* s, const char* t, size_t n);
void _mi_strlcpy(char* dest, const char* src, size_t dest_size);
void _mi_strlcat(char* dest, const char* src, size_t dest_size);
size_t _mi_strlen(const char* s);
size_t _mi_strnlen(const char* s, size_t max_len);
#if MI_DEBUG>1
bool _mi_page_is_valid(mi_page_t* page);
#endif
// ------------------------------------------------------
// Branches
// ------------------------------------------------------
#if defined(__GNUC__) || defined(__clang__)
#define mi_unlikely(x) (__builtin_expect(!!(x),false))
#define mi_likely(x) (__builtin_expect(!!(x),true))
#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
#define mi_unlikely(x) (x) [[unlikely]]
#define mi_likely(x) (x) [[likely]]
#else
#define mi_unlikely(x) (x)
#define mi_likely(x) (x)
#endif
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
/* -----------------------------------------------------------
Error codes passed to `_mi_fatal_error`
All are recoverable but EFAULT is a serious error and aborts by default in secure mode.
For portability define undefined error codes using common Unix codes:
<https://www-numi.fnal.gov/offline_software/srt_public_context/WebDocs/Errors/unix_system_errors.html>
----------------------------------------------------------- */
#include <errno.h>
#ifndef EAGAIN // double free
#define EAGAIN (11)
#endif
#ifndef ENOMEM // out of memory
#define ENOMEM (12)
#endif
#ifndef EFAULT // corrupted free-list or meta-data
#define EFAULT (14)
#endif
#ifndef EINVAL // trying to free an invalid pointer
#define EINVAL (22)
#endif
#ifndef EOVERFLOW // count*size overflow
#define EOVERFLOW (75)
#endif
/* -----------------------------------------------------------
Inlined definitions
----------------------------------------------------------- */
#define MI_UNUSED(x) (void)(x)
#if (MI_DEBUG>0)
#define MI_UNUSED_RELEASE(x)
#else
#define MI_UNUSED_RELEASE(x) MI_UNUSED(x)
#endif
#define MI_INIT4(x) x(),x(),x(),x()
#define MI_INIT8(x) MI_INIT4(x),MI_INIT4(x)
#define MI_INIT16(x) MI_INIT8(x),MI_INIT8(x)
#define MI_INIT32(x) MI_INIT16(x),MI_INIT16(x)
#define MI_INIT64(x) MI_INIT32(x),MI_INIT32(x)
#define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x)
#define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x)
#include <string.h>
// initialize a local variable to zero; use memset as compilers optimize constant sized memset's
#define _mi_memzero_var(x) memset(&x,0,sizeof(x))
// Is `x` a power of two? (0 is considered a power of two)
static inline bool _mi_is_power_of_two(uintptr_t x) {
return ((x & (x - 1)) == 0);
}
// Is a pointer aligned?
static inline bool _mi_is_aligned(void* p, size_t alignment) {
mi_assert_internal(alignment != 0);
return (((uintptr_t)p % alignment) == 0);
}
// Align upwards
static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
mi_assert_internal(alignment != 0);
uintptr_t mask = alignment - 1;
if ((alignment & mask) == 0) { // power of two?
return ((sz + mask) & ~mask);
}
else {
return (((sz + mask)/alignment)*alignment);
}
}
// Align downwards
static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
mi_assert_internal(alignment != 0);
uintptr_t mask = alignment - 1;
if ((alignment & mask) == 0) { // power of two?
return (sz & ~mask);
}
else {
return ((sz / alignment) * alignment);
}
}
// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
mi_assert_internal(divider != 0);
return (divider == 0 ? size : ((size + divider - 1) / divider));
}
// Is memory zero initialized?
static inline bool mi_mem_is_zero(const void* p, size_t size) {
for (size_t i = 0; i < size; i++) {
if (((uint8_t*)p)[i] != 0) return false;
}
return true;
}
// Align a byte size to a size in _machine words_,
// i.e. byte size == `wsize*sizeof(void*)`.
static inline size_t _mi_wsize_from_size(size_t size) {
mi_assert_internal(size <= SIZE_MAX - sizeof(uintptr_t));
return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t);
}
// Overflow detecting multiply
#if __has_builtin(__builtin_umul_overflow) || (defined(__GNUC__) && (__GNUC__ >= 5))
#include <limits.h> // UINT_MAX, ULONG_MAX
#if defined(_CLOCK_T) // for Illumos
#undef _CLOCK_T
#endif
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
#if (SIZE_MAX == ULONG_MAX)
return __builtin_umull_overflow(count, size, (unsigned long *)total);
#elif (SIZE_MAX == UINT_MAX)
return __builtin_umul_overflow(count, size, (unsigned int *)total);
#else
return __builtin_umulll_overflow(count, size, (unsigned long long *)total);
#endif
}
#else /* __builtin_umul_overflow is unavailable */
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
#define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
*total = count * size;
// note: gcc/clang optimize this to directly check the overflow flag
return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count);
}
#endif
// Safe multiply `count*size` into `total`; return `true` on overflow.
static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* total) {
if (count==1) { // quick check for the case where count is one (common for C++ allocators)
*total = size;
return false;
}
else if mi_unlikely(mi_mul_overflow(count, size, total)) {
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size);
#endif
*total = SIZE_MAX;
return true;
}
else return false;
}
/*----------------------------------------------------------------------------------------
Heap functions
------------------------------------------------------------------------------------------- */
extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap
static inline bool mi_heap_is_backing(const mi_heap_t* heap) {
return (heap->tld->heap_backing == heap);
}
static inline bool mi_heap_is_initialized(mi_heap_t* heap) {
mi_assert_internal(heap != NULL);
return (heap != &_mi_heap_empty);
}
static inline uintptr_t _mi_ptr_cookie(const void* p) {
extern mi_heap_t _mi_heap_main;
mi_assert_internal(_mi_heap_main.cookie != 0);
return ((uintptr_t)p ^ _mi_heap_main.cookie);
}
/* -----------------------------------------------------------
Pages
----------------------------------------------------------- */
static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) {
mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE));
const size_t idx = _mi_wsize_from_size(size);
mi_assert_internal(idx < MI_PAGES_DIRECT);
return heap->pages_free_direct[idx];
}
// Segment that contains the pointer
// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE),
// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it;
// therefore we align one byte before `p`.
static inline mi_segment_t* _mi_ptr_segment(const void* p) {
mi_assert_internal(p != NULL);
return (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK);
}
static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) {
mi_assert_internal(s->slice_offset== 0 && s->slice_count > 0);
return (mi_page_t*)(s);
}
static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) {
mi_assert_internal(p->slice_offset== 0 && p->slice_count > 0);
return (mi_slice_t*)(p);
}
// Segment belonging to a page
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
mi_segment_t* segment = _mi_ptr_segment(page);
mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries));
return segment;
}
static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) {
mi_slice_t* start = (mi_slice_t*)((uint8_t*)slice - slice->slice_offset);
mi_assert_internal(start >= _mi_ptr_segment(slice)->slices);
mi_assert_internal(start->slice_offset == 0);
mi_assert_internal(start + start->slice_count > slice);
return start;
}
// Get the page containing the pointer (performance critical as it is called in mi_free)
static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) {
mi_assert_internal(p > (void*)segment);
ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
mi_assert_internal(diff > 0 && diff <= (ptrdiff_t)MI_SEGMENT_SIZE);
size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT;
mi_assert_internal(idx <= segment->slice_entries);
mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx];
mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data
mi_assert_internal(slice->slice_offset == 0);
mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_entries);
return mi_slice_to_page(slice);
}
// Quick page start for initialized pages
static inline uint8_t* _mi_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) {
return _mi_segment_page_start(segment, page, page_size);
}
// Get the page containing the pointer
static inline mi_page_t* _mi_ptr_page(void* p) {
return _mi_segment_page_of(_mi_ptr_segment(p), p);
}
// Get the block size of a page (special case for huge objects)
static inline size_t mi_page_block_size(const mi_page_t* page) {
const size_t bsize = page->xblock_size;
mi_assert_internal(bsize > 0);
if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) {
return bsize;
}
else {
size_t psize;
_mi_segment_page_start(_mi_page_segment(page), page, &psize);
return psize;
}
}
static inline bool mi_page_is_huge(const mi_page_t* page) {
return (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
}
// Get the usable block size of a page without fixed padding.
// This may still include internal padding due to alignment and rounding up size classes.
static inline size_t mi_page_usable_block_size(const mi_page_t* page) {
return mi_page_block_size(page) - MI_PADDING_SIZE;
}
// size of a segment
static inline size_t mi_segment_size(mi_segment_t* segment) {
return segment->segment_slices * MI_SEGMENT_SLICE_SIZE;
}
static inline uint8_t* mi_segment_end(mi_segment_t* segment) {
return (uint8_t*)segment + mi_segment_size(segment);
}
// Thread free access
static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) {
return (mi_block_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & ~3);
}
static inline mi_delayed_t mi_page_thread_free_flag(const mi_page_t* page) {
return (mi_delayed_t)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & 3);
}
// Heap access
static inline mi_heap_t* mi_page_heap(const mi_page_t* page) {
return (mi_heap_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xheap));
}
static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) {
mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING);
mi_atomic_store_release(&page->xheap,(uintptr_t)heap);
}
// Thread free flag helpers
static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) {
return (mi_block_t*)(tf & ~0x03);
}
static inline mi_delayed_t mi_tf_delayed(mi_thread_free_t tf) {
return (mi_delayed_t)(tf & 0x03);
}
static inline mi_thread_free_t mi_tf_make(mi_block_t* block, mi_delayed_t delayed) {
return (mi_thread_free_t)((uintptr_t)block | (uintptr_t)delayed);
}
static inline mi_thread_free_t mi_tf_set_delayed(mi_thread_free_t tf, mi_delayed_t delayed) {
return mi_tf_make(mi_tf_block(tf),delayed);
}
static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t* block) {
return mi_tf_make(block, mi_tf_delayed(tf));
}
// are all blocks in a page freed?
// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`.
static inline bool mi_page_all_free(const mi_page_t* page) {
mi_assert_internal(page != NULL);
return (page->used == 0);
}
// are there any available blocks?
static inline bool mi_page_has_any_available(const mi_page_t* page) {
mi_assert_internal(page != NULL && page->reserved > 0);
return (page->used < page->reserved || (mi_page_thread_free(page) != NULL));
}
// are there immediately available blocks, i.e. blocks available on the free list.
static inline bool mi_page_immediate_available(const mi_page_t* page) {
mi_assert_internal(page != NULL);
return (page->free != NULL);
}
// is more than 7/8th of a page in use?
static inline bool mi_page_mostly_used(const mi_page_t* page) {
if (page==NULL) return true;
uint16_t frac = page->reserved / 8U;
return (page->reserved - page->used <= frac);
}
static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size) {
return &((mi_heap_t*)heap)->pages[_mi_bin(size)];
}
//-----------------------------------------------------------
// Page flags
//-----------------------------------------------------------
static inline bool mi_page_is_in_full(const mi_page_t* page) {
return page->flags.x.in_full;
}
static inline void mi_page_set_in_full(mi_page_t* page, bool in_full) {
page->flags.x.in_full = in_full;
}
static inline bool mi_page_has_aligned(const mi_page_t* page) {
return page->flags.x.has_aligned;
}
static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
page->flags.x.has_aligned = has_aligned;
}
/* -------------------------------------------------------------------
Encoding/Decoding the free list next pointers
This is to protect against buffer overflow exploits where the
free list is mutated. Many hardened allocators xor the next pointer `p`
with a secret key `k1`, as `p^k1`. This prevents overwriting with known
values but might be still too weak: if the attacker can guess
the pointer `p` this can reveal `k1` (since `p^k1^p == k1`).
Moreover, if multiple blocks can be read as well, the attacker can
xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot
about the pointers (and subsequently `k1`).
Instead mimalloc uses an extra key `k2` and encodes as `((p^k2)<<<k1)+k1`.
Since these operations are not associative, the above approaches do not
work so well any more even if the `p` can be guesstimated. For example,
for the read case we can subtract two entries to discard the `+k1` term,
but that leads to `((p1^k2)<<<k1) - ((p2^k2)<<<k1)` at best.
We include the left-rotation since xor and addition are otherwise linear
in the lowest bit. Finally, both keys are unique per page which reduces
the re-use of keys by a large factor.
We also pass a separate `null` value to be used as `NULL` or otherwise
`(k2<<<k1)+k1` would appear (too) often as a sentinel value.
------------------------------------------------------------------- */
static inline bool mi_is_in_same_segment(const void* p, const void* q) {
return (_mi_ptr_segment(p) == _mi_ptr_segment(q));
}
static inline bool mi_is_in_same_page(const void* p, const void* q) {
mi_segment_t* segment = _mi_ptr_segment(p);
if (_mi_ptr_segment(q) != segment) return false;
// assume q may be invalid // return (_mi_segment_page_of(segment, p) == _mi_segment_page_of(segment, q));
mi_page_t* page = _mi_segment_page_of(segment, p);
size_t psize;
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
return (start <= (uint8_t*)q && (uint8_t*)q < start + psize);
}
static inline uintptr_t mi_rotl(uintptr_t x, uintptr_t shift) {
shift %= MI_INTPTR_BITS;
return (shift==0 ? x : ((x << shift) | (x >> (MI_INTPTR_BITS - shift))));
}
static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
shift %= MI_INTPTR_BITS;
return (shift==0 ? x : ((x >> shift) | (x << (MI_INTPTR_BITS - shift))));
}
static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) {
void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]);
return (p==null ? NULL : p);
}
static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) {
uintptr_t x = (uintptr_t)(p==NULL ? null : p);
return mi_rotl(x ^ keys[1], keys[0]) + keys[0];
}
static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) {
mi_track_mem_defined(block,sizeof(mi_block_t));
mi_block_t* next;
#ifdef MI_ENCODE_FREELIST
next = (mi_block_t*)mi_ptr_decode(null, block->next, keys);
#else
MI_UNUSED(keys); MI_UNUSED(null);
next = (mi_block_t*)block->next;
#endif
mi_track_mem_noaccess(block,sizeof(mi_block_t));
return next;
}
static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) {
mi_track_mem_undefined(block,sizeof(mi_block_t));
#ifdef MI_ENCODE_FREELIST
block->next = mi_ptr_encode(null, next, keys);
#else
MI_UNUSED(keys); MI_UNUSED(null);
block->next = (mi_encoded_t)next;
#endif
mi_track_mem_noaccess(block,sizeof(mi_block_t));
}
static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
#ifdef MI_ENCODE_FREELIST
mi_block_t* next = mi_block_nextx(page,block,page->keys);
// check for free list corruption: is `next` at least in the same page?
// TODO: check if `next` is `page->block_size` aligned?
if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) {
_mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
next = NULL;
}
return next;
#else
MI_UNUSED(page);
return mi_block_nextx(page,block,NULL);
#endif
}
static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) {
#ifdef MI_ENCODE_FREELIST
mi_block_set_nextx(page,block,next, page->keys);
#else
MI_UNUSED(page);
mi_block_set_nextx(page,block,next,NULL);
#endif
}
// -------------------------------------------------------------------
// commit mask
// -------------------------------------------------------------------
static inline void mi_commit_mask_create_empty(mi_commit_mask_t* cm) {
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
cm->mask[i] = 0;
}
}
static inline void mi_commit_mask_create_full(mi_commit_mask_t* cm) {
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
cm->mask[i] = ~((size_t)0);
}
}
static inline bool mi_commit_mask_is_empty(const mi_commit_mask_t* cm) {
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
if (cm->mask[i] != 0) return false;
}
return true;
}
static inline bool mi_commit_mask_is_full(const mi_commit_mask_t* cm) {
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
if (cm->mask[i] != ~((size_t)0)) return false;
}
return true;
}
// defined in `segment.c`:
size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total);
size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx);
#define mi_commit_mask_foreach(cm,idx,count) \
idx = 0; \
while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) {
#define mi_commit_mask_foreach_end() \
idx += count; \
}
/* -----------------------------------------------------------
memory id's
----------------------------------------------------------- */
static inline mi_memid_t _mi_memid_create(mi_memkind_t memkind) {
mi_memid_t memid;
_mi_memzero_var(memid);
memid.memkind = memkind;
return memid;
}
static inline mi_memid_t _mi_memid_none(void) {
return _mi_memid_create(MI_MEM_NONE);
}
static inline mi_memid_t _mi_memid_create_os(bool committed, bool is_zero, bool is_large) {
mi_memid_t memid = _mi_memid_create(MI_MEM_OS);
memid.initially_committed = committed;
memid.initially_zero = is_zero;
memid.is_pinned = is_large;
return memid;
}
// -------------------------------------------------------------------
// Fast "random" shuffle
// -------------------------------------------------------------------
static inline uintptr_t _mi_random_shuffle(uintptr_t x) {
if (x==0) { x = 17; } // ensure we don't get stuck in generating zeros
#if (MI_INTPTR_SIZE==8)
// by Sebastiano Vigna, see: <http://xoshiro.di.unimi.it/splitmix64.c>
x ^= x >> 30;
x *= 0xbf58476d1ce4e5b9UL;
x ^= x >> 27;
x *= 0x94d049bb133111ebUL;
x ^= x >> 31;
#elif (MI_INTPTR_SIZE==4)
// by Chris Wellons, see: <https://nullprogram.com/blog/2018/07/31/>
x ^= x >> 16;
x *= 0x7feb352dUL;
x ^= x >> 15;
x *= 0x846ca68bUL;
x ^= x >> 16;
#endif
return x;
}
// -------------------------------------------------------------------
// Optimize numa node access for the common case (= one node)
// -------------------------------------------------------------------
int _mi_os_numa_node_get(mi_os_tld_t* tld);
size_t _mi_os_numa_node_count_get(void);
extern _Atomic(size_t) _mi_numa_node_count;
static inline int _mi_os_numa_node(mi_os_tld_t* tld) {
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
else return _mi_os_numa_node_get(tld);
}
static inline size_t _mi_os_numa_node_count(void) {
const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count);
if mi_likely(count > 0) { return count; }
else return _mi_os_numa_node_count_get();
}
// -----------------------------------------------------------------------
// Count bits: trailing or leading zeros (with MI_INTPTR_BITS on all zero)
// -----------------------------------------------------------------------
#if defined(__GNUC__)
#include <limits.h> // LONG_MAX
#define MI_HAVE_FAST_BITSCAN
static inline size_t mi_clz(uintptr_t x) {
if (x==0) return MI_INTPTR_BITS;
#if (INTPTR_MAX == LONG_MAX)
return __builtin_clzl(x);
#else
return __builtin_clzll(x);
#endif
}
static inline size_t mi_ctz(uintptr_t x) {
if (x==0) return MI_INTPTR_BITS;
#if (INTPTR_MAX == LONG_MAX)
return __builtin_ctzl(x);
#else
return __builtin_ctzll(x);
#endif
}
#elif defined(_MSC_VER)
#include <limits.h> // LONG_MAX
#include <intrin.h> // BitScanReverse64
#define MI_HAVE_FAST_BITSCAN
static inline size_t mi_clz(uintptr_t x) {
if (x==0) return MI_INTPTR_BITS;
unsigned long idx;
#if (INTPTR_MAX == LONG_MAX)
_BitScanReverse(&idx, x);
#else
_BitScanReverse64(&idx, x);
#endif
return ((MI_INTPTR_BITS - 1) - idx);
}
static inline size_t mi_ctz(uintptr_t x) {
if (x==0) return MI_INTPTR_BITS;
unsigned long idx;
#if (INTPTR_MAX == LONG_MAX)
_BitScanForward(&idx, x);
#else
_BitScanForward64(&idx, x);
#endif
return idx;
}
#else
static inline size_t mi_ctz32(uint32_t x) {
// de Bruijn multiplication, see <http://supertech.csail.mit.edu/papers/debruijn.pdf>
static const unsigned char debruijn[32] = {
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
};
if (x==0) return 32;
return debruijn[((x & -(int32_t)x) * 0x077CB531UL) >> 27];
}
static inline size_t mi_clz32(uint32_t x) {
// de Bruijn multiplication, see <http://supertech.csail.mit.edu/papers/debruijn.pdf>
static const uint8_t debruijn[32] = {
31, 22, 30, 21, 18, 10, 29, 2, 20, 17, 15, 13, 9, 6, 28, 1,
23, 19, 11, 3, 16, 14, 7, 24, 12, 4, 8, 25, 5, 26, 27, 0
};
if (x==0) return 32;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return debruijn[(uint32_t)(x * 0x07C4ACDDUL) >> 27];
}
static inline size_t mi_clz(uintptr_t x) {
if (x==0) return MI_INTPTR_BITS;
#if (MI_INTPTR_BITS <= 32)
return mi_clz32((uint32_t)x);
#else
size_t count = mi_clz32((uint32_t)(x >> 32));
if (count < 32) return count;
return (32 + mi_clz32((uint32_t)x));
#endif
}
static inline size_t mi_ctz(uintptr_t x) {
if (x==0) return MI_INTPTR_BITS;
#if (MI_INTPTR_BITS <= 32)
return mi_ctz32((uint32_t)x);
#else
size_t count = mi_ctz32((uint32_t)x);
if (count < 32) return count;
return (32 + mi_ctz32((uint32_t)(x>>32)));
#endif
}
#endif
// "bit scan reverse": Return index of the highest bit (or MI_INTPTR_BITS if `x` is zero)
static inline size_t mi_bsr(uintptr_t x) {
return (x==0 ? MI_INTPTR_BITS : MI_INTPTR_BITS - 1 - mi_clz(x));
}
// ---------------------------------------------------------------------------------
// Provide our own `_mi_memcpy` for potential performance optimizations.
//
// For now, only on Windows with msvc/clang-cl we optimize to `rep movsb` if
// we happen to run on x86/x64 cpu's that have "fast short rep movsb" (FSRM) support
// (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253.
// ---------------------------------------------------------------------------------
#if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
#include <intrin.h>
extern bool _mi_cpu_has_fsrm;
static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
if (_mi_cpu_has_fsrm) {
__movsb((unsigned char*)dst, (const unsigned char*)src, n);
}
else {
memcpy(dst, src, n);
}
}
static inline void _mi_memzero(void* dst, size_t n) {
if (_mi_cpu_has_fsrm) {
__stosb((unsigned char*)dst, 0, n);
}
else {
memset(dst, 0, n);
}
}
#else
static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
memcpy(dst, src, n);
}
static inline void _mi_memzero(void* dst, size_t n) {
memset(dst, 0, n);
}
#endif
// -------------------------------------------------------------------------------
// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned
// This is used for example in `mi_realloc`.
// -------------------------------------------------------------------------------
#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__)
// On GCC/CLang we provide a hint that the pointers are word aligned.
static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
const void* asrc = __builtin_assume_aligned(src, MI_INTPTR_SIZE);
_mi_memcpy(adst, asrc, n);
}
static inline void _mi_memzero_aligned(void* dst, size_t n) {
mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
_mi_memzero(adst, n);
}
#else
// Default fallback on `_mi_memcpy`
static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
_mi_memcpy(dst, src, n);
}
static inline void _mi_memzero_aligned(void* dst, size_t n) {
mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
_mi_memzero(dst, n);
}
#endif
#endif

View File

@@ -0,0 +1,329 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#pragma once
#ifndef MIMALLOC_PRIM_H
#define MIMALLOC_PRIM_H
// --------------------------------------------------------------------------
// This file specifies the primitive portability API.
// Each OS/host needs to implement these primitives, see `src/prim`
// for implementations on Window, macOS, WASI, and Linux/Unix.
//
// note: on all primitive functions, we always have result parameters != NUL, and:
// addr != NULL and page aligned
// size > 0 and page aligned
// return value is an error code an int where 0 is success.
// --------------------------------------------------------------------------
// OS memory configuration
typedef struct mi_os_mem_config_s {
size_t page_size; // 4KiB
size_t large_page_size; // 2MiB
size_t alloc_granularity; // smallest allocation size (on Windows 64KiB)
bool has_overcommit; // can we reserve more memory than can be actually committed?
bool must_free_whole; // must allocated blocks be freed as a whole (false for mmap, true for VirtualAlloc)
bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory)
} mi_os_mem_config_t;
// Initialize
void _mi_prim_mem_init( mi_os_mem_config_t* config );
// Free OS memory
int _mi_prim_free(void* addr, size_t size );
// Allocate OS memory. Return NULL on error.
// The `try_alignment` is just a hint and the returned pointer does not have to be aligned.
// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
// which will later be committed explicitly using `_mi_prim_commit`.
// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
// pre: !commit => !allow_large
// try_alignment >= _mi_os_page_size() and a power of 2
int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr);
// Commit memory. Returns error code or 0 on success.
// For example, on Linux this would make the memory PROT_READ|PROT_WRITE.
// `is_zero` is set to true if the memory was zero initialized (e.g. on Windows)
int _mi_prim_commit(void* addr, size_t size, bool* is_zero);
// Decommit memory. Returns error code or 0 on success. The `needs_recommit` result is true
// if the memory would need to be re-committed. For example, on Windows this is always true,
// but on Linux we could use MADV_DONTNEED to decommit which does not need a recommit.
// pre: needs_recommit != NULL
int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit);
// Reset memory. The range keeps being accessible but the content might be reset.
// Returns error code or 0 on success.
int _mi_prim_reset(void* addr, size_t size);
// Protect memory. Returns error code or 0 on success.
int _mi_prim_protect(void* addr, size_t size, bool protect);
// Allocate huge (1GiB) pages possibly associated with a NUMA node.
// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
// pre: size > 0 and a multiple of 1GiB.
// numa_node is either negative (don't care), or a numa node number.
int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr);
// Return the current NUMA node
size_t _mi_prim_numa_node(void);
// Return the number of logical NUMA nodes
size_t _mi_prim_numa_node_count(void);
// Clock ticks
mi_msecs_t _mi_prim_clock_now(void);
// Return process information (only for statistics)
typedef struct mi_process_info_s {
mi_msecs_t elapsed;
mi_msecs_t utime;
mi_msecs_t stime;
size_t current_rss;
size_t peak_rss;
size_t current_commit;
size_t peak_commit;
size_t page_faults;
} mi_process_info_t;
void _mi_prim_process_info(mi_process_info_t* pinfo);
// Default stderr output. (only for warnings etc. with verbose enabled)
// msg != NULL && _mi_strlen(msg) > 0
void _mi_prim_out_stderr( const char* msg );
// Get an environment variable. (only for options)
// name != NULL, result != NULL, result_size >= 64
bool _mi_prim_getenv(const char* name, char* result, size_t result_size);
// Fill a buffer with strong randomness; return `false` on error or if
// there is no strong randomization available.
bool _mi_prim_random_buf(void* buf, size_t buf_len);
// Called on the first thread start, and should ensure `_mi_thread_done` is called on thread termination.
void _mi_prim_thread_init_auto_done(void);
// Called on process exit and may take action to clean up resources associated with the thread auto done.
void _mi_prim_thread_done_auto_done(void);
// Called when the default heap for a thread changes
void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
//-------------------------------------------------------------------
// Thread id: `_mi_prim_thread_id()`
//
// Getting the thread id should be performant as it is called in the
// fast path of `_mi_free` and we specialize for various platforms as
// inlined definitions. Regular code should call `init.c:_mi_thread_id()`.
// We only require _mi_prim_thread_id() to return a unique id
// for each thread (unequal to zero).
//-------------------------------------------------------------------
// defined in `init.c`; do not use these directly
extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from
extern bool _mi_process_is_initialized; // has mi_process_init been called?
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept;
#ifdef MI_PRIM_THREAD_ID
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
return MI_PRIM_THREAD_ID();
}
#elif defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
// Windows: works on Intel and ARM in both 32- and 64-bit
return (uintptr_t)NtCurrentTeb();
}
// We use assembly for a fast thread id on the main platforms. The TLS layout depends on
// both the OS and libc implementation so we use specific tests for each main platform.
// If you test on another platform and it works please send a PR :-)
// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register.
#elif defined(__GNUC__) && ( \
(defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__))) \
|| (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__))) \
|| (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__))) \
|| (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
|| (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
)
static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept {
void* res;
const size_t ofs = (slot*sizeof(void*));
#if defined(__i386__)
__asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS
#elif defined(__APPLE__) && defined(__x86_64__)
__asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
__asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI
#elif defined(__x86_64__)
__asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS
#elif defined(__arm__)
void** tcb; MI_UNUSED(ofs);
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
res = tcb[slot];
#elif defined(__aarch64__)
void** tcb; MI_UNUSED(ofs);
#if defined(__APPLE__) // M1, issue #343
__asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
#else
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
#endif
res = tcb[slot];
#endif
return res;
}
// setting a tls slot is only used on macOS for now
static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept {
const size_t ofs = (slot*sizeof(void*));
#if defined(__i386__)
__asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS
#elif defined(__APPLE__) && defined(__x86_64__)
__asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
__asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI
#elif defined(__x86_64__)
__asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS
#elif defined(__arm__)
void** tcb; MI_UNUSED(ofs);
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
tcb[slot] = value;
#elif defined(__aarch64__)
void** tcb; MI_UNUSED(ofs);
#if defined(__APPLE__) // M1, issue #343
__asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
#else
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
#endif
tcb[slot] = value;
#endif
}
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
#if defined(__BIONIC__)
// issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id
// see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86
return (uintptr_t)mi_prim_tls_slot(1);
#else
// in all our other targets, slot 0 is the thread id
// glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h
// apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36
return (uintptr_t)mi_prim_tls_slot(0);
#endif
}
#else
// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms).
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
return (uintptr_t)&_mi_heap_default;
}
#endif
/* ----------------------------------------------------------------------------------------
The thread local default heap: `_mi_prim_get_default_heap()`
This is inlined here as it is on the fast path for allocation functions.
On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a
__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures
that the storage will always be available (allocated on the thread stacks).
On some platforms though we cannot use that when overriding `malloc` since the underlying
TLS implementation (or the loader) will call itself `malloc` on a first access and recurse.
We try to circumvent this in an efficient way:
- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the
loader itself calls `malloc` even before the modules are initialized.
- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS).
- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323)
------------------------------------------------------------------------------------------- */
static inline mi_heap_t* mi_prim_get_default_heap(void);
#if defined(MI_MALLOC_OVERRIDE)
#if defined(__APPLE__) // macOS
#define MI_TLS_SLOT 89 // seems unused?
// #define MI_TLS_RECURSE_GUARD 1
// other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89)
// see <https://github.com/rweichler/substrate/blob/master/include/pthread_machdep.h>
#elif defined(__OpenBSD__)
// use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16)
// see <https://github.com/openbsd/src/blob/master/lib/libc/include/thread_private.h#L371>
#define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24)
// #elif defined(__DragonFly__)
// #warning "mimalloc is not working correctly on DragonFly yet."
// #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) <https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/lib/libthread_xu/thread/thr_private.h#L458>
#elif defined(__ANDROID__)
// See issue #381
#define MI_TLS_PTHREAD
#endif
#endif
#if defined(MI_TLS_SLOT)
static inline mi_heap_t* mi_prim_get_default_heap(void) {
mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT);
if mi_unlikely(heap == NULL) {
#ifdef __GNUC__
__asm(""); // prevent conditional load of the address of _mi_heap_empty
#endif
heap = (mi_heap_t*)&_mi_heap_empty;
}
return heap;
}
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
static inline mi_heap_t** mi_prim_tls_pthread_heap_slot(void) {
pthread_t self = pthread_self();
#if defined(__DragonFly__)
if (self==NULL) return NULL;
#endif
return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS);
}
static inline mi_heap_t* mi_prim_get_default_heap(void) {
mi_heap_t** pheap = mi_prim_tls_pthread_heap_slot();
if mi_unlikely(pheap == NULL) return _mi_heap_main_get();
mi_heap_t* heap = *pheap;
if mi_unlikely(heap == NULL) return (mi_heap_t*)&_mi_heap_empty;
return heap;
}
#elif defined(MI_TLS_PTHREAD)
extern pthread_key_t _mi_heap_default_key;
static inline mi_heap_t* mi_prim_get_default_heap(void) {
mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key));
return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
}
#else // default using a thread local variable; used on most platforms.
static inline mi_heap_t* mi_prim_get_default_heap(void) {
#if defined(MI_TLS_RECURSE_GUARD)
if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get();
#endif
return _mi_heap_default;
}
#endif // mi_prim_get_default_heap()
#endif // MIMALLOC_PRIM_H

View File

@@ -0,0 +1,147 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#pragma once
#ifndef MIMALLOC_TRACK_H
#define MIMALLOC_TRACK_H
/* ------------------------------------------------------------------------------------------------------
Track memory ranges with macros for tools like Valgrind address sanitizer, or other memory checkers.
These can be defined for tracking allocation:
#define mi_track_malloc_size(p,reqsize,size,zero)
#define mi_track_free_size(p,_size)
The macros are set up such that the size passed to `mi_track_free_size`
always matches the size of `mi_track_malloc_size`. (currently, `size == mi_usable_size(p)`).
The `reqsize` is what the user requested, and `size >= reqsize`.
The `size` is either byte precise (and `size==reqsize`) if `MI_PADDING` is enabled,
or otherwise it is the usable block size which may be larger than the original request.
Use `_mi_block_size_of(void* p)` to get the full block size that was allocated (including padding etc).
The `zero` parameter is `true` if the allocated block is zero initialized.
Optional:
#define mi_track_align(p,alignedp,offset,size)
#define mi_track_resize(p,oldsize,newsize)
#define mi_track_init()
The `mi_track_align` is called right after a `mi_track_malloc` for aligned pointers in a block.
The corresponding `mi_track_free` still uses the block start pointer and original size (corresponding to the `mi_track_malloc`).
The `mi_track_resize` is currently unused but could be called on reallocations within a block.
`mi_track_init` is called at program start.
The following macros are for tools like asan and valgrind to track whether memory is
defined, undefined, or not accessible at all:
#define mi_track_mem_defined(p,size)
#define mi_track_mem_undefined(p,size)
#define mi_track_mem_noaccess(p,size)
-------------------------------------------------------------------------------------------------------*/
#if MI_TRACK_VALGRIND
// valgrind tool
#define MI_TRACK_ENABLED 1
#define MI_TRACK_HEAP_DESTROY 1 // track free of individual blocks on heap_destroy
#define MI_TRACK_TOOL "valgrind"
#include <valgrind/valgrind.h>
#include <valgrind/memcheck.h>
#define mi_track_malloc_size(p,reqsize,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero)
#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/)
#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/)
#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size)
#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size)
#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size)
#elif MI_TRACK_ASAN
// address sanitizer
#define MI_TRACK_ENABLED 1
#define MI_TRACK_HEAP_DESTROY 0
#define MI_TRACK_TOOL "asan"
#include <sanitizer/asan_interface.h>
#define mi_track_malloc_size(p,reqsize,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size)
#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size)
#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
#define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size)
#elif MI_TRACK_ETW
// windows event tracing
#define MI_TRACK_ENABLED 1
#define MI_TRACK_HEAP_DESTROY 1
#define MI_TRACK_TOOL "ETW"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include "../src/prim/windows/etw.h"
#define mi_track_init() EventRegistermicrosoft_windows_mimalloc();
#define mi_track_malloc_size(p,reqsize,size,zero) EventWriteETW_MI_ALLOC((UINT64)(p), size)
#define mi_track_free_size(p,size) EventWriteETW_MI_FREE((UINT64)(p), size)
#else
// no tracking
#define MI_TRACK_ENABLED 0
#define MI_TRACK_HEAP_DESTROY 0
#define MI_TRACK_TOOL "none"
#define mi_track_malloc_size(p,reqsize,size,zero)
#define mi_track_free_size(p,_size)
#endif
// -------------------
// Utility definitions
#ifndef mi_track_resize
#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false)
#endif
#ifndef mi_track_align
#define mi_track_align(p,alignedp,offset,size) mi_track_mem_noaccess(p,offset)
#endif
#ifndef mi_track_init
#define mi_track_init()
#endif
#ifndef mi_track_mem_defined
#define mi_track_mem_defined(p,size)
#endif
#ifndef mi_track_mem_undefined
#define mi_track_mem_undefined(p,size)
#endif
#ifndef mi_track_mem_noaccess
#define mi_track_mem_noaccess(p,size)
#endif
#if MI_PADDING
#define mi_track_malloc(p,reqsize,zero) \
if ((p)!=NULL) { \
mi_assert_internal(mi_usable_size(p)==(reqsize)); \
mi_track_malloc_size(p,reqsize,reqsize,zero); \
}
#else
#define mi_track_malloc(p,reqsize,zero) \
if ((p)!=NULL) { \
mi_assert_internal(mi_usable_size(p)>=(reqsize)); \
mi_track_malloc_size(p,reqsize,mi_usable_size(p),zero); \
}
#endif
#endif

View File

@@ -0,0 +1,721 @@
/* ----------------------------------------------------------------------------
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#pragma once
#ifndef MIMALLOC_TYPES_H
#define MIMALLOC_TYPES_H
// --------------------------------------------------------------------------
// This file contains the main type definitions for mimalloc:
// mi_heap_t : all data for a thread-local heap, contains
// lists of all managed heap pages.
// mi_segment_t : a larger chunk of memory (32GiB) from where pages
// are allocated.
// mi_page_t : a mimalloc page (usually 64KiB or 512KiB) from
// where objects are allocated.
// --------------------------------------------------------------------------
#include <stddef.h> // ptrdiff_t
#include <stdint.h> // uintptr_t, uint16_t, etc
#include "atomic.h" // _Atomic
#ifdef _MSC_VER
#pragma warning(disable:4214) // bitfield is not int
#endif
// Minimal alignment necessary. On most platforms 16 bytes are needed
// due to SSE registers for example. This must be at least `sizeof(void*)`
#ifndef MI_MAX_ALIGN_SIZE
#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t)
#endif
#define MI_CACHE_LINE 64
#if defined(_MSC_VER)
#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths)
#pragma warning(disable:26812) // unscoped enum warning
#define mi_decl_noinline __declspec(noinline)
#define mi_decl_thread __declspec(thread)
#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc
#define mi_decl_noinline __attribute__((noinline))
#define mi_decl_thread __thread
#define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE)))
#else
#define mi_decl_noinline
#define mi_decl_thread __thread // hope for the best :-)
#define mi_decl_cache_align
#endif
// ------------------------------------------------------
// Variants
// ------------------------------------------------------
// Define NDEBUG in the release version to disable assertions.
// #define NDEBUG
// Define MI_TRACK_<tool> to enable tracking support
// #define MI_TRACK_VALGRIND 1
// #define MI_TRACK_ASAN 1
// #define MI_TRACK_ETW 1
// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance).
// #define MI_STAT 1
// Define MI_SECURE to enable security mitigations
// #define MI_SECURE 1 // guard page around metadata
// #define MI_SECURE 2 // guard page around each mimalloc page
// #define MI_SECURE 3 // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free)
// #define MI_SECURE 4 // checks for double free. (may be more expensive)
#if !defined(MI_SECURE)
#define MI_SECURE 0
#endif
// Define MI_DEBUG for debug mode
// #define MI_DEBUG 1 // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free.
// #define MI_DEBUG 2 // + internal assertion checks
// #define MI_DEBUG 3 // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON)
#if !defined(MI_DEBUG)
#if !defined(NDEBUG) || defined(_DEBUG)
#define MI_DEBUG 2
#else
#define MI_DEBUG 0
#endif
#endif
// Reserve extra padding at the end of each block to be more resilient against heap block overflows.
// The padding can detect buffer overflow on free.
#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW))
#define MI_PADDING 1
#endif
// Check padding bytes; allows byte-precise buffer overflow detection
#if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1)
#define MI_PADDING_CHECK 1
#endif
// Encoded free lists allow detection of corrupted free lists
// and can detect buffer overflows, modify after free, and double `free`s.
#if (MI_SECURE>=3 || MI_DEBUG>=1)
#define MI_ENCODE_FREELIST 1
#endif
// We used to abandon huge pages but to eagerly deallocate if freed from another thread,
// but that makes it not possible to visit them during a heap walk or include them in a
// `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks if freed from
// another thread so most memory is available until it gets properly freed by the owning thread.
// #define MI_HUGE_PAGE_ABANDON 1
// ------------------------------------------------------
// Platform specific values
// ------------------------------------------------------
// ------------------------------------------------------
// Size of a pointer.
// We assume that `sizeof(void*)==sizeof(intptr_t)`
// and it holds for all platforms we know of.
//
// However, the C standard only requires that:
// p == (void*)((intptr_t)p))
// but we also need:
// i == (intptr_t)((void*)i)
// or otherwise one might define an intptr_t type that is larger than a pointer...
// ------------------------------------------------------
#if INTPTR_MAX > INT64_MAX
# define MI_INTPTR_SHIFT (4) // assume 128-bit (as on arm CHERI for example)
#elif INTPTR_MAX == INT64_MAX
# define MI_INTPTR_SHIFT (3)
#elif INTPTR_MAX == INT32_MAX
# define MI_INTPTR_SHIFT (2)
#else
#error platform pointers must be 32, 64, or 128 bits
#endif
#if SIZE_MAX == UINT64_MAX
# define MI_SIZE_SHIFT (3)
typedef int64_t mi_ssize_t;
#elif SIZE_MAX == UINT32_MAX
# define MI_SIZE_SHIFT (2)
typedef int32_t mi_ssize_t;
#else
#error platform objects must be 32 or 64 bits
#endif
#if (SIZE_MAX/2) > LONG_MAX
# define MI_ZU(x) x##ULL
# define MI_ZI(x) x##LL
#else
# define MI_ZU(x) x##UL
# define MI_ZI(x) x##L
#endif
#define MI_INTPTR_SIZE (1<<MI_INTPTR_SHIFT)
#define MI_INTPTR_BITS (MI_INTPTR_SIZE*8)
#define MI_SIZE_SIZE (1<<MI_SIZE_SHIFT)
#define MI_SIZE_BITS (MI_SIZE_SIZE*8)
#define MI_KiB (MI_ZU(1024))
#define MI_MiB (MI_KiB*MI_KiB)
#define MI_GiB (MI_MiB*MI_KiB)
// ------------------------------------------------------
// Main internal data-structures
// ------------------------------------------------------
// Main tuning parameters for segment and page sizes
// Sizes for 64-bit (usually divide by two for 32-bit)
#define MI_SEGMENT_SLICE_SHIFT (13 + MI_INTPTR_SHIFT) // 64KiB (32KiB on 32-bit)
#if MI_INTPTR_SIZE > 4
#define MI_SEGMENT_SHIFT ( 9 + MI_SEGMENT_SLICE_SHIFT) // 32MiB
#else
#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 4MiB on 32-bit
#endif
#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB
#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB
// Derived constants
#define MI_SEGMENT_SIZE (MI_ZU(1)<<MI_SEGMENT_SHIFT)
#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
#define MI_SEGMENT_MASK ((uintptr_t)(MI_SEGMENT_ALIGN - 1))
#define MI_SEGMENT_SLICE_SIZE (MI_ZU(1)<< MI_SEGMENT_SLICE_SHIFT)
#define MI_SLICES_PER_SEGMENT (MI_SEGMENT_SIZE / MI_SEGMENT_SLICE_SIZE) // 1024
#define MI_SMALL_PAGE_SIZE (MI_ZU(1)<<MI_SMALL_PAGE_SHIFT)
#define MI_MEDIUM_PAGE_SIZE (MI_ZU(1)<<MI_MEDIUM_PAGE_SHIFT)
#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 8KiB on 64-bit
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128KiB on 64-bit
#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
#define MI_LARGE_OBJ_SIZE_MAX (MI_SEGMENT_SIZE/2) // 32MiB on 64-bit
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
// Maximum number of size classes. (spaced exponentially in 12.5% increments)
#define MI_BIN_HUGE (73U)
#if (MI_MEDIUM_OBJ_WSIZE_MAX >= 655360)
#error "mimalloc internal: define more bins"
#endif
// Maximum slice offset (15)
#define MI_MAX_SLICE_OFFSET ((MI_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
// Used as a special value to encode block sizes in 32 bits.
#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB))
// blocks up to this size are always allocated aligned
#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
// ------------------------------------------------------
// Mimalloc pages contain allocated blocks
// ------------------------------------------------------
// The free lists use encoded next fields
// (Only actually encodes when MI_ENCODED_FREELIST is defined.)
typedef uintptr_t mi_encoded_t;
// thread id's
typedef size_t mi_threadid_t;
// free lists contain blocks
typedef struct mi_block_s {
mi_encoded_t next;
} mi_block_t;
// The delayed flags are used for efficient multi-threaded free-ing
typedef enum mi_delayed_e {
MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list
MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap
MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list
MI_NEVER_DELAYED_FREE = 3 // sticky, only resets on page reclaim
} mi_delayed_t;
// The `in_full` and `has_aligned` page flags are put in a union to efficiently
// test if both are false (`full_aligned == 0`) in the `mi_free` routine.
#if !MI_TSAN
typedef union mi_page_flags_s {
uint8_t full_aligned;
struct {
uint8_t in_full : 1;
uint8_t has_aligned : 1;
} x;
} mi_page_flags_t;
#else
// under thread sanitizer, use a byte for each flag to suppress warning, issue #130
typedef union mi_page_flags_s {
uint16_t full_aligned;
struct {
uint8_t in_full;
uint8_t has_aligned;
} x;
} mi_page_flags_t;
#endif
// Thread free list.
// We use the bottom 2 bits of the pointer for mi_delayed_t flags
typedef uintptr_t mi_thread_free_t;
// A page contains blocks of one specific size (`block_size`).
// Each page has three list of free blocks:
// `free` for blocks that can be allocated,
// `local_free` for freed blocks that are not yet available to `mi_malloc`
// `thread_free` for freed blocks by other threads
// The `local_free` and `thread_free` lists are migrated to the `free` list
// when it is exhausted. The separate `local_free` list is necessary to
// implement a monotonic heartbeat. The `thread_free` list is needed for
// avoiding atomic operations in the common case.
//
//
// `used - |thread_free|` == actual blocks that are in use (alive)
// `used - |thread_free| + |free| + |local_free| == capacity`
//
// We don't count `freed` (as |free|) but use `used` to reduce
// the number of memory accesses in the `mi_page_all_free` function(s).
//
// Notes:
// - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`)
// - Using `uint16_t` does not seem to slow things down
// - The size is 8 words on 64-bit which helps the page index calculations
// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10
// and 12 are still good for address calculation)
// - To limit the structure size, the `xblock_size` is 32-bits only; for
// blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size
// - `thread_free` uses the bottom bits as a delayed-free flags to optimize
// concurrent frees where only the first concurrent free adds to the owning
// heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`).
// The invariant is that no-delayed-free is only set if there is
// at least one block that will be added, or as already been added, to
// the owning heap `thread_delayed_free` list. This guarantees that pages
// will be freed correctly even if only other threads free blocks.
typedef struct mi_page_s {
// "owned" by the segment
uint32_t slice_count; // slices in this page (0 if not a page)
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
uint8_t is_committed : 1; // `true` if the page virtual memory is committed
uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized
uint8_t use_qsbr : 1; // delay page freeing using qsbr
uint8_t tag : 4; // tag from the owning heap
uint8_t debug_offset; // number of bytes to preserve when filling freed or uninitialized memory
// layout like this to optimize access in `mi_malloc` and `mi_free`
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
uint16_t reserved; // number of blocks reserved in memory
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
uint8_t free_is_zero : 1; // `true` if the blocks in the free list are zero initialized
uint8_t retire_expire : 7; // expiration count for retired blocks
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
uint32_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
uint32_t xblock_size; // size available in each block (always `>0`)
mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
#if (MI_ENCODE_FREELIST || MI_PADDING)
uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
#endif
_Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
_Atomic(uintptr_t) xheap;
struct mi_page_s* next; // next page owned by this thread with the same `block_size`
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
#ifdef Py_GIL_DISABLED
struct llist_node qsbr_node;
uint64_t qsbr_goal;
#endif
// 64-bit 9 words, 32-bit 12 words, (+2 for secure)
#if MI_INTPTR_SIZE==8 && !defined(Py_GIL_DISABLED)
uintptr_t padding[1];
#endif
} mi_page_t;
// ------------------------------------------------------
// Mimalloc segments contain mimalloc pages
// ------------------------------------------------------
typedef enum mi_page_kind_e {
MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
MI_PAGE_MEDIUM, // medium blocks go into medium pages inside a segment
MI_PAGE_LARGE, // larger blocks go into a page of just one block
MI_PAGE_HUGE, // huge blocks (> 16 MiB) are put into a single page in a single segment.
} mi_page_kind_t;
typedef enum mi_segment_kind_e {
MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside.
MI_SEGMENT_HUGE, // > MI_LARGE_SIZE_MAX segment with just one huge page inside.
} mi_segment_kind_t;
// ------------------------------------------------------
// A segment holds a commit mask where a bit is set if
// the corresponding MI_COMMIT_SIZE area is committed.
// The MI_COMMIT_SIZE must be a multiple of the slice
// size. If it is equal we have the most fine grained
// decommit (but setting it higher can be more efficient).
// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
// be committed in one go which can be set higher than
// MI_COMMIT_SIZE for efficiency (while the decommit mask
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
// ------------------------------------------------------
#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)
#if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS))
#error "the segment size must be exactly divisible by the (commit size * size_t bits)"
#endif
typedef struct mi_commit_mask_s {
size_t mask[MI_COMMIT_MASK_FIELD_COUNT];
} mi_commit_mask_t;
typedef mi_page_t mi_slice_t;
typedef int64_t mi_msecs_t;
// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
typedef enum mi_memkind_e {
MI_MEM_NONE, // not allocated
MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example)
MI_MEM_STATIC, // allocated in a static area and should not be freed (for arena meta data for example)
MI_MEM_OS, // allocated from the OS
MI_MEM_OS_HUGE, // allocated as huge os pages
MI_MEM_OS_REMAP, // allocated in a remapable area (i.e. using `mremap`)
MI_MEM_ARENA // allocated from an arena (the usual case)
} mi_memkind_t;
static inline bool mi_memkind_is_os(mi_memkind_t memkind) {
return (memkind >= MI_MEM_OS && memkind <= MI_MEM_OS_REMAP);
}
typedef struct mi_memid_os_info {
void* base; // actual base address of the block (used for offset aligned allocations)
size_t alignment; // alignment at allocation
} mi_memid_os_info_t;
typedef struct mi_memid_arena_info {
size_t block_index; // index in the arena
mi_arena_id_t id; // arena id (>= 1)
bool is_exclusive; // the arena can only be used for specific arena allocations
} mi_memid_arena_info_t;
typedef struct mi_memid_s {
union {
mi_memid_os_info_t os; // only used for MI_MEM_OS
mi_memid_arena_info_t arena; // only used for MI_MEM_ARENA
} mem;
bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large OS pages)
bool initially_committed;// `true` if the memory was originally allocated as committed
bool initially_zero; // `true` if the memory was originally zero initialized
mi_memkind_t memkind;
} mi_memid_t;
// Segments are large allocated memory blocks (8mb on 64 bit) from
// the OS. Inside segments we allocated fixed size _pages_ that
// contain blocks.
typedef struct mi_segment_s {
// constant fields
mi_memid_t memid; // memory id for arena allocation
bool allow_decommit;
bool allow_purge;
size_t segment_size;
// segment fields
mi_msecs_t purge_expire;
mi_commit_mask_t purge_mask;
mi_commit_mask_t commit_mask;
_Atomic(struct mi_segment_s*) abandoned_next;
// from here is zero initialized
struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
size_t used; // count of pages in use
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
size_t segment_info_slices; // initial slices we are using segment info and possible guard pages.
// layout like this to optimize access in `mi_free`
mi_segment_kind_t kind;
size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
_Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment
mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one more for huge blocks with large alignment
} mi_segment_t;
typedef uintptr_t mi_tagged_segment_t;
// Segments unowned by any thread are put in a shared pool
typedef struct mi_abandoned_pool_s {
// This is a list of visited abandoned pages that were full at the time.
// this list migrates to `abandoned` when that becomes NULL. The use of
// this list reduces contention and the rate at which segments are visited.
mi_decl_cache_align _Atomic(mi_segment_t*) abandoned_visited; // = NULL
// The abandoned page list (tagged as it supports pop)
mi_decl_cache_align _Atomic(mi_tagged_segment_t) abandoned; // = NULL
// Maintain these for debug purposes (these counts may be a bit off)
mi_decl_cache_align _Atomic(size_t) abandoned_count;
mi_decl_cache_align _Atomic(size_t) abandoned_visited_count;
// We also maintain a count of current readers of the abandoned list
// in order to prevent resetting/decommitting segment memory if it might
// still be read.
mi_decl_cache_align _Atomic(size_t) abandoned_readers; // = 0
} mi_abandoned_pool_t;
// ------------------------------------------------------
// Heaps
// Provide first-class heaps to allocate from.
// A heap just owns a set of pages for allocation and
// can only be allocate/reallocate from the thread that created it.
// Freeing blocks can be done from any thread though.
// Per thread, the segments are shared among its heaps.
// Per thread, there is always a default heap that is
// used for allocation; it is initialized to statically
// point to an empty heap to avoid initialization checks
// in the fast path.
// ------------------------------------------------------
// Thread local data
typedef struct mi_tld_s mi_tld_t;
// Pages of a certain block size are held in a queue.
typedef struct mi_page_queue_s {
mi_page_t* first;
mi_page_t* last;
size_t block_size;
} mi_page_queue_t;
#define MI_BIN_FULL (MI_BIN_HUGE+1)
// Random context
typedef struct mi_random_cxt_s {
uint32_t input[16];
uint32_t output[16];
int output_available;
bool weak;
} mi_random_ctx_t;
// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows
#if (MI_PADDING)
typedef struct mi_padding_s {
uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)
uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
} mi_padding_t;
#define MI_PADDING_SIZE (sizeof(mi_padding_t))
#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE)
#else
#define MI_PADDING_SIZE 0
#define MI_PADDING_WSIZE 0
#endif
#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1)
// A heap owns a set of pages.
struct mi_heap_s {
mi_tld_t* tld;
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
_Atomic(mi_block_t*) thread_delayed_free;
mi_threadid_t thread_id; // thread this heap belongs too
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
mi_random_ctx_t random; // random number context used for secure allocation
size_t page_count; // total number of pages in the `pages` queues.
size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues)
size_t page_retired_max; // largest retired index into the `pages` array.
mi_heap_t* next; // list of heaps per thread
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
uint8_t tag; // custom identifier for this heap
uint8_t debug_offset; // number of bytes to preserve when filling freed or uninitialized memory
bool page_use_qsbr; // should freeing pages be delayed using QSBR
};
// ------------------------------------------------------
// Debug
// ------------------------------------------------------
#if !defined(MI_DEBUG_UNINIT)
#define MI_DEBUG_UNINIT (0xD0)
#endif
#if !defined(MI_DEBUG_FREED)
#define MI_DEBUG_FREED (0xDF)
#endif
#if !defined(MI_DEBUG_PADDING)
#define MI_DEBUG_PADDING (0xDE)
#endif
#if (MI_DEBUG)
// use our own assertion to print without memory allocation
void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func );
#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__))
#else
#define mi_assert(x)
#endif
#if (MI_DEBUG>1)
#define mi_assert_internal mi_assert
#else
#define mi_assert_internal(x)
#endif
#if (MI_DEBUG>2)
#define mi_assert_expensive mi_assert
#else
#define mi_assert_expensive(x)
#endif
// ------------------------------------------------------
// Statistics
// ------------------------------------------------------
#ifndef MI_STAT
#if (MI_DEBUG>0)
#define MI_STAT 2
#else
#define MI_STAT 0
#endif
#endif
typedef struct mi_stat_count_s {
int64_t allocated;
int64_t freed;
int64_t peak;
int64_t current;
} mi_stat_count_t;
typedef struct mi_stat_counter_s {
int64_t total;
int64_t count;
} mi_stat_counter_t;
typedef struct mi_stats_s {
mi_stat_count_t segments;
mi_stat_count_t pages;
mi_stat_count_t reserved;
mi_stat_count_t committed;
mi_stat_count_t reset;
mi_stat_count_t purged;
mi_stat_count_t page_committed;
mi_stat_count_t segments_abandoned;
mi_stat_count_t pages_abandoned;
mi_stat_count_t threads;
mi_stat_count_t normal;
mi_stat_count_t huge;
mi_stat_count_t large;
mi_stat_count_t malloc;
mi_stat_count_t segments_cache;
mi_stat_counter_t pages_extended;
mi_stat_counter_t mmap_calls;
mi_stat_counter_t commit_calls;
mi_stat_counter_t reset_calls;
mi_stat_counter_t purge_calls;
mi_stat_counter_t page_no_retire;
mi_stat_counter_t searches;
mi_stat_counter_t normal_count;
mi_stat_counter_t huge_count;
mi_stat_counter_t large_count;
#if MI_STAT>1
mi_stat_count_t normal_bins[MI_BIN_HUGE+1];
#endif
} mi_stats_t;
void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
#if (MI_STAT)
#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount)
#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount)
#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)
#else
#define mi_stat_increase(stat,amount) (void)0
#define mi_stat_decrease(stat,amount) (void)0
#define mi_stat_counter_increase(stat,amount) (void)0
#endif
#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount)
#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount)
// ------------------------------------------------------
// Thread Local data
// ------------------------------------------------------
// A "span" is is an available range of slices. The span queues keep
// track of slice spans of at most the given `slice_count` (but more than the previous size class).
typedef struct mi_span_queue_s {
mi_slice_t* first;
mi_slice_t* last;
size_t slice_count;
} mi_span_queue_t;
#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT)
// OS thread local data
typedef struct mi_os_tld_s {
size_t region_idx; // start point for next allocation
mi_stats_t* stats; // points to tld stats
} mi_os_tld_t;
// Segments thread local data
typedef struct mi_segments_tld_s {
mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments
size_t count; // current number of segments;
size_t peak_count; // peak number of segments
size_t current_size; // current size of all segments
size_t peak_size; // peak size of all segments
mi_stats_t* stats; // points to tld stats
mi_os_tld_t* os; // points to os stats
mi_abandoned_pool_t* abandoned; // pool of abandoned segments
} mi_segments_tld_t;
// Thread local data
struct mi_tld_s {
unsigned long long heartbeat; // monotonic heartbeat count
bool recurse; // true if deferred was called; used to prevent infinite recursion.
mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted)
mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates)
mi_segments_tld_t segments; // segment tld
mi_os_tld_t os; // os tld
mi_stats_t stats; // statistics
};
#endif

View File

@@ -0,0 +1,61 @@
#ifndef Py_INTERNAL_ABSTRACT_H
#define Py_INTERNAL_ABSTRACT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
// Fast inlined version of PyIndex_Check()
static inline int
_PyIndex_Check(PyObject *obj)
{
PyNumberMethods *tp_as_number = Py_TYPE(obj)->tp_as_number;
return (tp_as_number != NULL && tp_as_number->nb_index != NULL);
}
PyObject *_PyNumber_PowerNoMod(PyObject *lhs, PyObject *rhs);
PyObject *_PyNumber_InPlacePowerNoMod(PyObject *lhs, PyObject *rhs);
extern int _PyObject_HasLen(PyObject *o);
/* === Sequence protocol ================================================ */
#define PY_ITERSEARCH_COUNT 1
#define PY_ITERSEARCH_INDEX 2
#define PY_ITERSEARCH_CONTAINS 3
/* Iterate over seq.
Result depends on the operation:
PY_ITERSEARCH_COUNT: return # of times obj appears in seq; -1 if
error.
PY_ITERSEARCH_INDEX: return 0-based index of first occurrence of
obj in seq; set ValueError and return -1 if none found;
also return -1 on error.
PY_ITERSEARCH_CONTAINS: return 1 if obj in seq, else 0; -1 on
error. */
extern Py_ssize_t _PySequence_IterSearch(PyObject *seq,
PyObject *obj, int operation);
/* === Mapping protocol ================================================= */
extern int _PyObject_RealIsInstance(PyObject *inst, PyObject *cls);
extern int _PyObject_RealIsSubclass(PyObject *derived, PyObject *cls);
// Convert Python int to Py_ssize_t. Do nothing if the argument is None.
// Export for '_bisect' shared extension.
PyAPI_FUNC(int) _Py_convert_optional_to_ssize_t(PyObject *, void *);
// Same as PyNumber_Index() but can return an instance of a subclass of int.
// Export for 'math' shared extension.
PyAPI_FUNC(PyObject*) _PyNumber_Index(PyObject *o);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_ABSTRACT_H */

View File

@@ -0,0 +1,112 @@
#ifndef Py_INTERNAL_ASDL_H
#define Py_INTERNAL_ASDL_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_pyarena.h" // _PyArena_Malloc()
typedef PyObject * identifier;
typedef PyObject * string;
typedef PyObject * object;
typedef PyObject * constant;
/* It would be nice if the code generated by asdl_c.py was completely
independent of Python, but it is a goal the requires too much work
at this stage. So, for example, I'll represent identifiers as
interned Python strings.
*/
#define _ASDL_SEQ_HEAD \
Py_ssize_t size; \
void **elements;
typedef struct {
_ASDL_SEQ_HEAD
} asdl_seq;
typedef struct {
_ASDL_SEQ_HEAD
void *typed_elements[1];
} asdl_generic_seq;
typedef struct {
_ASDL_SEQ_HEAD
PyObject *typed_elements[1];
} asdl_identifier_seq;
typedef struct {
_ASDL_SEQ_HEAD
int typed_elements[1];
} asdl_int_seq;
asdl_generic_seq *_Py_asdl_generic_seq_new(Py_ssize_t size, PyArena *arena);
asdl_identifier_seq *_Py_asdl_identifier_seq_new(Py_ssize_t size, PyArena *arena);
asdl_int_seq *_Py_asdl_int_seq_new(Py_ssize_t size, PyArena *arena);
#define GENERATE_ASDL_SEQ_CONSTRUCTOR(NAME, TYPE) \
asdl_ ## NAME ## _seq *_Py_asdl_ ## NAME ## _seq_new(Py_ssize_t size, PyArena *arena) \
{ \
asdl_ ## NAME ## _seq *seq = NULL; \
size_t n; \
/* check size is sane */ \
if (size < 0 || \
(size && (((size_t)size - 1) > (SIZE_MAX / sizeof(void *))))) { \
PyErr_NoMemory(); \
return NULL; \
} \
n = (size ? (sizeof(TYPE *) * (size - 1)) : 0); \
/* check if size can be added safely */ \
if (n > SIZE_MAX - sizeof(asdl_ ## NAME ## _seq)) { \
PyErr_NoMemory(); \
return NULL; \
} \
n += sizeof(asdl_ ## NAME ## _seq); \
seq = (asdl_ ## NAME ## _seq *)_PyArena_Malloc(arena, n); \
if (!seq) { \
PyErr_NoMemory(); \
return NULL; \
} \
memset(seq, 0, n); \
seq->size = size; \
seq->elements = (void**)seq->typed_elements; \
return seq; \
}
#define asdl_seq_GET_UNTYPED(S, I) _Py_RVALUE((S)->elements[(I)])
#define asdl_seq_GET(S, I) _Py_RVALUE((S)->typed_elements[(I)])
#define asdl_seq_LEN(S) _Py_RVALUE(((S) == NULL ? 0 : (S)->size))
#ifdef Py_DEBUG
# define asdl_seq_SET(S, I, V) \
do { \
Py_ssize_t _asdl_i = (I); \
assert((S) != NULL); \
assert(0 <= _asdl_i && _asdl_i < (S)->size); \
(S)->typed_elements[_asdl_i] = (V); \
} while (0)
#else
# define asdl_seq_SET(S, I, V) _Py_RVALUE((S)->typed_elements[(I)] = (V))
#endif
#ifdef Py_DEBUG
# define asdl_seq_SET_UNTYPED(S, I, V) \
do { \
Py_ssize_t _asdl_i = (I); \
assert((S) != NULL); \
assert(0 <= _asdl_i && _asdl_i < (S)->size); \
(S)->elements[_asdl_i] = (V); \
} while (0)
#else
# define asdl_seq_SET_UNTYPED(S, I, V) _Py_RVALUE((S)->elements[(I)] = (V))
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_ASDL_H */

View File

@@ -0,0 +1,926 @@
// File automatically generated by Parser/asdl_c.py.
#ifndef Py_INTERNAL_AST_H
#define Py_INTERNAL_AST_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_asdl.h" // _ASDL_SEQ_HEAD
typedef struct _mod *mod_ty;
typedef struct _stmt *stmt_ty;
typedef struct _expr *expr_ty;
typedef enum _expr_context { Load=1, Store=2, Del=3 } expr_context_ty;
typedef enum _boolop { And=1, Or=2 } boolop_ty;
typedef enum _operator { Add=1, Sub=2, Mult=3, MatMult=4, Div=5, Mod=6, Pow=7,
LShift=8, RShift=9, BitOr=10, BitXor=11, BitAnd=12,
FloorDiv=13 } operator_ty;
typedef enum _unaryop { Invert=1, Not=2, UAdd=3, USub=4 } unaryop_ty;
typedef enum _cmpop { Eq=1, NotEq=2, Lt=3, LtE=4, Gt=5, GtE=6, Is=7, IsNot=8,
In=9, NotIn=10 } cmpop_ty;
typedef struct _comprehension *comprehension_ty;
typedef struct _excepthandler *excepthandler_ty;
typedef struct _arguments *arguments_ty;
typedef struct _arg *arg_ty;
typedef struct _keyword *keyword_ty;
typedef struct _alias *alias_ty;
typedef struct _withitem *withitem_ty;
typedef struct _match_case *match_case_ty;
typedef struct _pattern *pattern_ty;
typedef struct _type_ignore *type_ignore_ty;
typedef struct _type_param *type_param_ty;
typedef struct {
_ASDL_SEQ_HEAD
mod_ty typed_elements[1];
} asdl_mod_seq;
asdl_mod_seq *_Py_asdl_mod_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
stmt_ty typed_elements[1];
} asdl_stmt_seq;
asdl_stmt_seq *_Py_asdl_stmt_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
expr_ty typed_elements[1];
} asdl_expr_seq;
asdl_expr_seq *_Py_asdl_expr_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
comprehension_ty typed_elements[1];
} asdl_comprehension_seq;
asdl_comprehension_seq *_Py_asdl_comprehension_seq_new(Py_ssize_t size, PyArena
*arena);
typedef struct {
_ASDL_SEQ_HEAD
excepthandler_ty typed_elements[1];
} asdl_excepthandler_seq;
asdl_excepthandler_seq *_Py_asdl_excepthandler_seq_new(Py_ssize_t size, PyArena
*arena);
typedef struct {
_ASDL_SEQ_HEAD
arguments_ty typed_elements[1];
} asdl_arguments_seq;
asdl_arguments_seq *_Py_asdl_arguments_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
arg_ty typed_elements[1];
} asdl_arg_seq;
asdl_arg_seq *_Py_asdl_arg_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
keyword_ty typed_elements[1];
} asdl_keyword_seq;
asdl_keyword_seq *_Py_asdl_keyword_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
alias_ty typed_elements[1];
} asdl_alias_seq;
asdl_alias_seq *_Py_asdl_alias_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
withitem_ty typed_elements[1];
} asdl_withitem_seq;
asdl_withitem_seq *_Py_asdl_withitem_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
match_case_ty typed_elements[1];
} asdl_match_case_seq;
asdl_match_case_seq *_Py_asdl_match_case_seq_new(Py_ssize_t size, PyArena
*arena);
typedef struct {
_ASDL_SEQ_HEAD
pattern_ty typed_elements[1];
} asdl_pattern_seq;
asdl_pattern_seq *_Py_asdl_pattern_seq_new(Py_ssize_t size, PyArena *arena);
typedef struct {
_ASDL_SEQ_HEAD
type_ignore_ty typed_elements[1];
} asdl_type_ignore_seq;
asdl_type_ignore_seq *_Py_asdl_type_ignore_seq_new(Py_ssize_t size, PyArena
*arena);
typedef struct {
_ASDL_SEQ_HEAD
type_param_ty typed_elements[1];
} asdl_type_param_seq;
asdl_type_param_seq *_Py_asdl_type_param_seq_new(Py_ssize_t size, PyArena
*arena);
enum _mod_kind {Module_kind=1, Interactive_kind=2, Expression_kind=3,
FunctionType_kind=4};
struct _mod {
enum _mod_kind kind;
union {
struct {
asdl_stmt_seq *body;
asdl_type_ignore_seq *type_ignores;
} Module;
struct {
asdl_stmt_seq *body;
} Interactive;
struct {
expr_ty body;
} Expression;
struct {
asdl_expr_seq *argtypes;
expr_ty returns;
} FunctionType;
} v;
};
enum _stmt_kind {FunctionDef_kind=1, AsyncFunctionDef_kind=2, ClassDef_kind=3,
Return_kind=4, Delete_kind=5, Assign_kind=6,
TypeAlias_kind=7, AugAssign_kind=8, AnnAssign_kind=9,
For_kind=10, AsyncFor_kind=11, While_kind=12, If_kind=13,
With_kind=14, AsyncWith_kind=15, Match_kind=16,
Raise_kind=17, Try_kind=18, TryStar_kind=19, Assert_kind=20,
Import_kind=21, ImportFrom_kind=22, Global_kind=23,
Nonlocal_kind=24, Expr_kind=25, Pass_kind=26, Break_kind=27,
Continue_kind=28};
struct _stmt {
enum _stmt_kind kind;
union {
struct {
identifier name;
arguments_ty args;
asdl_stmt_seq *body;
asdl_expr_seq *decorator_list;
expr_ty returns;
string type_comment;
asdl_type_param_seq *type_params;
} FunctionDef;
struct {
identifier name;
arguments_ty args;
asdl_stmt_seq *body;
asdl_expr_seq *decorator_list;
expr_ty returns;
string type_comment;
asdl_type_param_seq *type_params;
} AsyncFunctionDef;
struct {
identifier name;
asdl_expr_seq *bases;
asdl_keyword_seq *keywords;
asdl_stmt_seq *body;
asdl_expr_seq *decorator_list;
asdl_type_param_seq *type_params;
} ClassDef;
struct {
expr_ty value;
} Return;
struct {
asdl_expr_seq *targets;
} Delete;
struct {
asdl_expr_seq *targets;
expr_ty value;
string type_comment;
} Assign;
struct {
expr_ty name;
asdl_type_param_seq *type_params;
expr_ty value;
} TypeAlias;
struct {
expr_ty target;
operator_ty op;
expr_ty value;
} AugAssign;
struct {
expr_ty target;
expr_ty annotation;
expr_ty value;
int simple;
} AnnAssign;
struct {
expr_ty target;
expr_ty iter;
asdl_stmt_seq *body;
asdl_stmt_seq *orelse;
string type_comment;
} For;
struct {
expr_ty target;
expr_ty iter;
asdl_stmt_seq *body;
asdl_stmt_seq *orelse;
string type_comment;
} AsyncFor;
struct {
expr_ty test;
asdl_stmt_seq *body;
asdl_stmt_seq *orelse;
} While;
struct {
expr_ty test;
asdl_stmt_seq *body;
asdl_stmt_seq *orelse;
} If;
struct {
asdl_withitem_seq *items;
asdl_stmt_seq *body;
string type_comment;
} With;
struct {
asdl_withitem_seq *items;
asdl_stmt_seq *body;
string type_comment;
} AsyncWith;
struct {
expr_ty subject;
asdl_match_case_seq *cases;
} Match;
struct {
expr_ty exc;
expr_ty cause;
} Raise;
struct {
asdl_stmt_seq *body;
asdl_excepthandler_seq *handlers;
asdl_stmt_seq *orelse;
asdl_stmt_seq *finalbody;
} Try;
struct {
asdl_stmt_seq *body;
asdl_excepthandler_seq *handlers;
asdl_stmt_seq *orelse;
asdl_stmt_seq *finalbody;
} TryStar;
struct {
expr_ty test;
expr_ty msg;
} Assert;
struct {
asdl_alias_seq *names;
} Import;
struct {
identifier module;
asdl_alias_seq *names;
int level;
} ImportFrom;
struct {
asdl_identifier_seq *names;
} Global;
struct {
asdl_identifier_seq *names;
} Nonlocal;
struct {
expr_ty value;
} Expr;
} v;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
enum _expr_kind {BoolOp_kind=1, NamedExpr_kind=2, BinOp_kind=3, UnaryOp_kind=4,
Lambda_kind=5, IfExp_kind=6, Dict_kind=7, Set_kind=8,
ListComp_kind=9, SetComp_kind=10, DictComp_kind=11,
GeneratorExp_kind=12, Await_kind=13, Yield_kind=14,
YieldFrom_kind=15, Compare_kind=16, Call_kind=17,
FormattedValue_kind=18, JoinedStr_kind=19, Constant_kind=20,
Attribute_kind=21, Subscript_kind=22, Starred_kind=23,
Name_kind=24, List_kind=25, Tuple_kind=26, Slice_kind=27};
struct _expr {
enum _expr_kind kind;
union {
struct {
boolop_ty op;
asdl_expr_seq *values;
} BoolOp;
struct {
expr_ty target;
expr_ty value;
} NamedExpr;
struct {
expr_ty left;
operator_ty op;
expr_ty right;
} BinOp;
struct {
unaryop_ty op;
expr_ty operand;
} UnaryOp;
struct {
arguments_ty args;
expr_ty body;
} Lambda;
struct {
expr_ty test;
expr_ty body;
expr_ty orelse;
} IfExp;
struct {
asdl_expr_seq *keys;
asdl_expr_seq *values;
} Dict;
struct {
asdl_expr_seq *elts;
} Set;
struct {
expr_ty elt;
asdl_comprehension_seq *generators;
} ListComp;
struct {
expr_ty elt;
asdl_comprehension_seq *generators;
} SetComp;
struct {
expr_ty key;
expr_ty value;
asdl_comprehension_seq *generators;
} DictComp;
struct {
expr_ty elt;
asdl_comprehension_seq *generators;
} GeneratorExp;
struct {
expr_ty value;
} Await;
struct {
expr_ty value;
} Yield;
struct {
expr_ty value;
} YieldFrom;
struct {
expr_ty left;
asdl_int_seq *ops;
asdl_expr_seq *comparators;
} Compare;
struct {
expr_ty func;
asdl_expr_seq *args;
asdl_keyword_seq *keywords;
} Call;
struct {
expr_ty value;
int conversion;
expr_ty format_spec;
} FormattedValue;
struct {
asdl_expr_seq *values;
} JoinedStr;
struct {
constant value;
string kind;
} Constant;
struct {
expr_ty value;
identifier attr;
expr_context_ty ctx;
} Attribute;
struct {
expr_ty value;
expr_ty slice;
expr_context_ty ctx;
} Subscript;
struct {
expr_ty value;
expr_context_ty ctx;
} Starred;
struct {
identifier id;
expr_context_ty ctx;
} Name;
struct {
asdl_expr_seq *elts;
expr_context_ty ctx;
} List;
struct {
asdl_expr_seq *elts;
expr_context_ty ctx;
} Tuple;
struct {
expr_ty lower;
expr_ty upper;
expr_ty step;
} Slice;
} v;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
struct _comprehension {
expr_ty target;
expr_ty iter;
asdl_expr_seq *ifs;
int is_async;
};
enum _excepthandler_kind {ExceptHandler_kind=1};
struct _excepthandler {
enum _excepthandler_kind kind;
union {
struct {
expr_ty type;
identifier name;
asdl_stmt_seq *body;
} ExceptHandler;
} v;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
struct _arguments {
asdl_arg_seq *posonlyargs;
asdl_arg_seq *args;
arg_ty vararg;
asdl_arg_seq *kwonlyargs;
asdl_expr_seq *kw_defaults;
arg_ty kwarg;
asdl_expr_seq *defaults;
};
struct _arg {
identifier arg;
expr_ty annotation;
string type_comment;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
struct _keyword {
identifier arg;
expr_ty value;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
struct _alias {
identifier name;
identifier asname;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
struct _withitem {
expr_ty context_expr;
expr_ty optional_vars;
};
struct _match_case {
pattern_ty pattern;
expr_ty guard;
asdl_stmt_seq *body;
};
enum _pattern_kind {MatchValue_kind=1, MatchSingleton_kind=2,
MatchSequence_kind=3, MatchMapping_kind=4,
MatchClass_kind=5, MatchStar_kind=6, MatchAs_kind=7,
MatchOr_kind=8};
struct _pattern {
enum _pattern_kind kind;
union {
struct {
expr_ty value;
} MatchValue;
struct {
constant value;
} MatchSingleton;
struct {
asdl_pattern_seq *patterns;
} MatchSequence;
struct {
asdl_expr_seq *keys;
asdl_pattern_seq *patterns;
identifier rest;
} MatchMapping;
struct {
expr_ty cls;
asdl_pattern_seq *patterns;
asdl_identifier_seq *kwd_attrs;
asdl_pattern_seq *kwd_patterns;
} MatchClass;
struct {
identifier name;
} MatchStar;
struct {
pattern_ty pattern;
identifier name;
} MatchAs;
struct {
asdl_pattern_seq *patterns;
} MatchOr;
} v;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
enum _type_ignore_kind {TypeIgnore_kind=1};
struct _type_ignore {
enum _type_ignore_kind kind;
union {
struct {
int lineno;
string tag;
} TypeIgnore;
} v;
};
enum _type_param_kind {TypeVar_kind=1, ParamSpec_kind=2, TypeVarTuple_kind=3};
struct _type_param {
enum _type_param_kind kind;
union {
struct {
identifier name;
expr_ty bound;
expr_ty default_value;
} TypeVar;
struct {
identifier name;
expr_ty default_value;
} ParamSpec;
struct {
identifier name;
expr_ty default_value;
} TypeVarTuple;
} v;
int lineno;
int col_offset;
int end_lineno;
int end_col_offset;
};
// Note: these macros affect function definitions, not only call sites.
mod_ty _PyAST_Module(asdl_stmt_seq * body, asdl_type_ignore_seq * type_ignores,
PyArena *arena);
mod_ty _PyAST_Interactive(asdl_stmt_seq * body, PyArena *arena);
mod_ty _PyAST_Expression(expr_ty body, PyArena *arena);
mod_ty _PyAST_FunctionType(asdl_expr_seq * argtypes, expr_ty returns, PyArena
*arena);
stmt_ty _PyAST_FunctionDef(identifier name, arguments_ty args, asdl_stmt_seq *
body, asdl_expr_seq * decorator_list, expr_ty
returns, string type_comment, asdl_type_param_seq *
type_params, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_AsyncFunctionDef(identifier name, arguments_ty args,
asdl_stmt_seq * body, asdl_expr_seq *
decorator_list, expr_ty returns, string
type_comment, asdl_type_param_seq *
type_params, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_ClassDef(identifier name, asdl_expr_seq * bases,
asdl_keyword_seq * keywords, asdl_stmt_seq * body,
asdl_expr_seq * decorator_list, asdl_type_param_seq *
type_params, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Return(expr_ty value, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Delete(asdl_expr_seq * targets, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Assign(asdl_expr_seq * targets, expr_ty value, string
type_comment, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
stmt_ty _PyAST_TypeAlias(expr_ty name, asdl_type_param_seq * type_params,
expr_ty value, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_AugAssign(expr_ty target, operator_ty op, expr_ty value, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_AnnAssign(expr_ty target, expr_ty annotation, expr_ty value, int
simple, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
stmt_ty _PyAST_For(expr_ty target, expr_ty iter, asdl_stmt_seq * body,
asdl_stmt_seq * orelse, string type_comment, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
stmt_ty _PyAST_AsyncFor(expr_ty target, expr_ty iter, asdl_stmt_seq * body,
asdl_stmt_seq * orelse, string type_comment, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_While(expr_ty test, asdl_stmt_seq * body, asdl_stmt_seq *
orelse, int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_If(expr_ty test, asdl_stmt_seq * body, asdl_stmt_seq * orelse,
int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_With(asdl_withitem_seq * items, asdl_stmt_seq * body, string
type_comment, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
stmt_ty _PyAST_AsyncWith(asdl_withitem_seq * items, asdl_stmt_seq * body,
string type_comment, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Match(expr_ty subject, asdl_match_case_seq * cases, int lineno,
int col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
stmt_ty _PyAST_Raise(expr_ty exc, expr_ty cause, int lineno, int col_offset,
int end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Try(asdl_stmt_seq * body, asdl_excepthandler_seq * handlers,
asdl_stmt_seq * orelse, asdl_stmt_seq * finalbody, int
lineno, int col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
stmt_ty _PyAST_TryStar(asdl_stmt_seq * body, asdl_excepthandler_seq * handlers,
asdl_stmt_seq * orelse, asdl_stmt_seq * finalbody, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_Assert(expr_ty test, expr_ty msg, int lineno, int col_offset,
int end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Import(asdl_alias_seq * names, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_ImportFrom(identifier module, asdl_alias_seq * names, int level,
int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_Global(asdl_identifier_seq * names, int lineno, int col_offset,
int end_lineno, int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Nonlocal(asdl_identifier_seq * names, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
stmt_ty _PyAST_Expr(expr_ty value, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
stmt_ty _PyAST_Pass(int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_Break(int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
stmt_ty _PyAST_Continue(int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
expr_ty _PyAST_BoolOp(boolop_ty op, asdl_expr_seq * values, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_NamedExpr(expr_ty target, expr_ty value, int lineno, int
col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
expr_ty _PyAST_BinOp(expr_ty left, operator_ty op, expr_ty right, int lineno,
int col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
expr_ty _PyAST_UnaryOp(unaryop_ty op, expr_ty operand, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_Lambda(arguments_ty args, expr_ty body, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_IfExp(expr_ty test, expr_ty body, expr_ty orelse, int lineno,
int col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
expr_ty _PyAST_Dict(asdl_expr_seq * keys, asdl_expr_seq * values, int lineno,
int col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_Set(asdl_expr_seq * elts, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
expr_ty _PyAST_ListComp(expr_ty elt, asdl_comprehension_seq * generators, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
expr_ty _PyAST_SetComp(expr_ty elt, asdl_comprehension_seq * generators, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
expr_ty _PyAST_DictComp(expr_ty key, expr_ty value, asdl_comprehension_seq *
generators, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
expr_ty _PyAST_GeneratorExp(expr_ty elt, asdl_comprehension_seq * generators,
int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
expr_ty _PyAST_Await(expr_ty value, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
expr_ty _PyAST_Yield(expr_ty value, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
expr_ty _PyAST_YieldFrom(expr_ty value, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
expr_ty _PyAST_Compare(expr_ty left, asdl_int_seq * ops, asdl_expr_seq *
comparators, int lineno, int col_offset, int end_lineno,
int end_col_offset, PyArena *arena);
expr_ty _PyAST_Call(expr_ty func, asdl_expr_seq * args, asdl_keyword_seq *
keywords, int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
expr_ty _PyAST_FormattedValue(expr_ty value, int conversion, expr_ty
format_spec, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
expr_ty _PyAST_JoinedStr(asdl_expr_seq * values, int lineno, int col_offset,
int end_lineno, int end_col_offset, PyArena *arena);
expr_ty _PyAST_Constant(constant value, string kind, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_Attribute(expr_ty value, identifier attr, expr_context_ty ctx,
int lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
expr_ty _PyAST_Subscript(expr_ty value, expr_ty slice, expr_context_ty ctx, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
expr_ty _PyAST_Starred(expr_ty value, expr_context_ty ctx, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_Name(identifier id, expr_context_ty ctx, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_List(asdl_expr_seq * elts, expr_context_ty ctx, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_Tuple(asdl_expr_seq * elts, expr_context_ty ctx, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
expr_ty _PyAST_Slice(expr_ty lower, expr_ty upper, expr_ty step, int lineno,
int col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
comprehension_ty _PyAST_comprehension(expr_ty target, expr_ty iter,
asdl_expr_seq * ifs, int is_async,
PyArena *arena);
excepthandler_ty _PyAST_ExceptHandler(expr_ty type, identifier name,
asdl_stmt_seq * body, int lineno, int
col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
arguments_ty _PyAST_arguments(asdl_arg_seq * posonlyargs, asdl_arg_seq * args,
arg_ty vararg, asdl_arg_seq * kwonlyargs,
asdl_expr_seq * kw_defaults, arg_ty kwarg,
asdl_expr_seq * defaults, PyArena *arena);
arg_ty _PyAST_arg(identifier arg, expr_ty annotation, string type_comment, int
lineno, int col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
keyword_ty _PyAST_keyword(identifier arg, expr_ty value, int lineno, int
col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
alias_ty _PyAST_alias(identifier name, identifier asname, int lineno, int
col_offset, int end_lineno, int end_col_offset, PyArena
*arena);
withitem_ty _PyAST_withitem(expr_ty context_expr, expr_ty optional_vars,
PyArena *arena);
match_case_ty _PyAST_match_case(pattern_ty pattern, expr_ty guard,
asdl_stmt_seq * body, PyArena *arena);
pattern_ty _PyAST_MatchValue(expr_ty value, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
pattern_ty _PyAST_MatchSingleton(constant value, int lineno, int col_offset,
int end_lineno, int end_col_offset, PyArena
*arena);
pattern_ty _PyAST_MatchSequence(asdl_pattern_seq * patterns, int lineno, int
col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
pattern_ty _PyAST_MatchMapping(asdl_expr_seq * keys, asdl_pattern_seq *
patterns, identifier rest, int lineno, int
col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
pattern_ty _PyAST_MatchClass(expr_ty cls, asdl_pattern_seq * patterns,
asdl_identifier_seq * kwd_attrs, asdl_pattern_seq
* kwd_patterns, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
pattern_ty _PyAST_MatchStar(identifier name, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
pattern_ty _PyAST_MatchAs(pattern_ty pattern, identifier name, int lineno, int
col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
pattern_ty _PyAST_MatchOr(asdl_pattern_seq * patterns, int lineno, int
col_offset, int end_lineno, int end_col_offset,
PyArena *arena);
type_ignore_ty _PyAST_TypeIgnore(int lineno, string tag, PyArena *arena);
type_param_ty _PyAST_TypeVar(identifier name, expr_ty bound, expr_ty
default_value, int lineno, int col_offset, int
end_lineno, int end_col_offset, PyArena *arena);
type_param_ty _PyAST_ParamSpec(identifier name, expr_ty default_value, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
type_param_ty _PyAST_TypeVarTuple(identifier name, expr_ty default_value, int
lineno, int col_offset, int end_lineno, int
end_col_offset, PyArena *arena);
PyObject* PyAST_mod2obj(mod_ty t);
mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode);
int PyAST_Check(PyObject* obj);
extern int _PyAST_Validate(mod_ty);
/* _PyAST_ExprAsUnicode is defined in ast_unparse.c */
extern PyObject* _PyAST_ExprAsUnicode(expr_ty);
/* Return the borrowed reference to the first literal string in the
sequence of statements or NULL if it doesn't start from a literal string.
Doesn't set exception. */
extern PyObject* _PyAST_GetDocString(asdl_stmt_seq *);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_AST_H */

View File

@@ -0,0 +1,268 @@
// File automatically generated by Parser/asdl_c.py.
#ifndef Py_INTERNAL_AST_STATE_H
#define Py_INTERNAL_AST_STATE_H
#include "pycore_lock.h" // _PyOnceFlag
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
struct ast_state {
_PyOnceFlag once;
int finalized;
PyObject *AST_type;
PyObject *Add_singleton;
PyObject *Add_type;
PyObject *And_singleton;
PyObject *And_type;
PyObject *AnnAssign_type;
PyObject *Assert_type;
PyObject *Assign_type;
PyObject *AsyncFor_type;
PyObject *AsyncFunctionDef_type;
PyObject *AsyncWith_type;
PyObject *Attribute_type;
PyObject *AugAssign_type;
PyObject *Await_type;
PyObject *BinOp_type;
PyObject *BitAnd_singleton;
PyObject *BitAnd_type;
PyObject *BitOr_singleton;
PyObject *BitOr_type;
PyObject *BitXor_singleton;
PyObject *BitXor_type;
PyObject *BoolOp_type;
PyObject *Break_type;
PyObject *Call_type;
PyObject *ClassDef_type;
PyObject *Compare_type;
PyObject *Constant_type;
PyObject *Continue_type;
PyObject *Del_singleton;
PyObject *Del_type;
PyObject *Delete_type;
PyObject *DictComp_type;
PyObject *Dict_type;
PyObject *Div_singleton;
PyObject *Div_type;
PyObject *Eq_singleton;
PyObject *Eq_type;
PyObject *ExceptHandler_type;
PyObject *Expr_type;
PyObject *Expression_type;
PyObject *FloorDiv_singleton;
PyObject *FloorDiv_type;
PyObject *For_type;
PyObject *FormattedValue_type;
PyObject *FunctionDef_type;
PyObject *FunctionType_type;
PyObject *GeneratorExp_type;
PyObject *Global_type;
PyObject *GtE_singleton;
PyObject *GtE_type;
PyObject *Gt_singleton;
PyObject *Gt_type;
PyObject *IfExp_type;
PyObject *If_type;
PyObject *ImportFrom_type;
PyObject *Import_type;
PyObject *In_singleton;
PyObject *In_type;
PyObject *Interactive_type;
PyObject *Invert_singleton;
PyObject *Invert_type;
PyObject *IsNot_singleton;
PyObject *IsNot_type;
PyObject *Is_singleton;
PyObject *Is_type;
PyObject *JoinedStr_type;
PyObject *LShift_singleton;
PyObject *LShift_type;
PyObject *Lambda_type;
PyObject *ListComp_type;
PyObject *List_type;
PyObject *Load_singleton;
PyObject *Load_type;
PyObject *LtE_singleton;
PyObject *LtE_type;
PyObject *Lt_singleton;
PyObject *Lt_type;
PyObject *MatMult_singleton;
PyObject *MatMult_type;
PyObject *MatchAs_type;
PyObject *MatchClass_type;
PyObject *MatchMapping_type;
PyObject *MatchOr_type;
PyObject *MatchSequence_type;
PyObject *MatchSingleton_type;
PyObject *MatchStar_type;
PyObject *MatchValue_type;
PyObject *Match_type;
PyObject *Mod_singleton;
PyObject *Mod_type;
PyObject *Module_type;
PyObject *Mult_singleton;
PyObject *Mult_type;
PyObject *Name_type;
PyObject *NamedExpr_type;
PyObject *Nonlocal_type;
PyObject *NotEq_singleton;
PyObject *NotEq_type;
PyObject *NotIn_singleton;
PyObject *NotIn_type;
PyObject *Not_singleton;
PyObject *Not_type;
PyObject *Or_singleton;
PyObject *Or_type;
PyObject *ParamSpec_type;
PyObject *Pass_type;
PyObject *Pow_singleton;
PyObject *Pow_type;
PyObject *RShift_singleton;
PyObject *RShift_type;
PyObject *Raise_type;
PyObject *Return_type;
PyObject *SetComp_type;
PyObject *Set_type;
PyObject *Slice_type;
PyObject *Starred_type;
PyObject *Store_singleton;
PyObject *Store_type;
PyObject *Sub_singleton;
PyObject *Sub_type;
PyObject *Subscript_type;
PyObject *TryStar_type;
PyObject *Try_type;
PyObject *Tuple_type;
PyObject *TypeAlias_type;
PyObject *TypeIgnore_type;
PyObject *TypeVarTuple_type;
PyObject *TypeVar_type;
PyObject *UAdd_singleton;
PyObject *UAdd_type;
PyObject *USub_singleton;
PyObject *USub_type;
PyObject *UnaryOp_type;
PyObject *While_type;
PyObject *With_type;
PyObject *YieldFrom_type;
PyObject *Yield_type;
PyObject *__dict__;
PyObject *__doc__;
PyObject *__match_args__;
PyObject *__module__;
PyObject *_attributes;
PyObject *_fields;
PyObject *alias_type;
PyObject *annotation;
PyObject *arg;
PyObject *arg_type;
PyObject *args;
PyObject *argtypes;
PyObject *arguments_type;
PyObject *asname;
PyObject *ast;
PyObject *attr;
PyObject *bases;
PyObject *body;
PyObject *boolop_type;
PyObject *bound;
PyObject *cases;
PyObject *cause;
PyObject *cls;
PyObject *cmpop_type;
PyObject *col_offset;
PyObject *comparators;
PyObject *comprehension_type;
PyObject *context_expr;
PyObject *conversion;
PyObject *ctx;
PyObject *decorator_list;
PyObject *default_value;
PyObject *defaults;
PyObject *elt;
PyObject *elts;
PyObject *end_col_offset;
PyObject *end_lineno;
PyObject *exc;
PyObject *excepthandler_type;
PyObject *expr_context_type;
PyObject *expr_type;
PyObject *finalbody;
PyObject *format_spec;
PyObject *func;
PyObject *generators;
PyObject *guard;
PyObject *handlers;
PyObject *id;
PyObject *ifs;
PyObject *is_async;
PyObject *items;
PyObject *iter;
PyObject *key;
PyObject *keys;
PyObject *keyword_type;
PyObject *keywords;
PyObject *kind;
PyObject *kw_defaults;
PyObject *kwarg;
PyObject *kwd_attrs;
PyObject *kwd_patterns;
PyObject *kwonlyargs;
PyObject *left;
PyObject *level;
PyObject *lineno;
PyObject *lower;
PyObject *match_case_type;
PyObject *mod_type;
PyObject *module;
PyObject *msg;
PyObject *name;
PyObject *names;
PyObject *op;
PyObject *operand;
PyObject *operator_type;
PyObject *ops;
PyObject *optional_vars;
PyObject *orelse;
PyObject *pattern;
PyObject *pattern_type;
PyObject *patterns;
PyObject *posonlyargs;
PyObject *rest;
PyObject *returns;
PyObject *right;
PyObject *simple;
PyObject *slice;
PyObject *step;
PyObject *stmt_type;
PyObject *subject;
PyObject *tag;
PyObject *target;
PyObject *targets;
PyObject *test;
PyObject *type;
PyObject *type_comment;
PyObject *type_ignore_type;
PyObject *type_ignores;
PyObject *type_param_type;
PyObject *type_params;
PyObject *unaryop_type;
PyObject *upper;
PyObject *value;
PyObject *values;
PyObject *vararg;
PyObject *withitem_type;
};
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_AST_STATE_H */

View File

@@ -0,0 +1,67 @@
#ifndef Py_INTERNAL_ATEXIT_H
#define Py_INTERNAL_ATEXIT_H
#include "pycore_lock.h" // PyMutex
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
//###############
// runtime atexit
typedef void (*atexit_callbackfunc)(void);
struct _atexit_runtime_state {
PyMutex mutex;
#define NEXITFUNCS 32
atexit_callbackfunc callbacks[NEXITFUNCS];
int ncallbacks;
};
//###################
// interpreter atexit
typedef void (*atexit_datacallbackfunc)(void *);
typedef struct atexit_callback {
atexit_datacallbackfunc func;
void *data;
struct atexit_callback *next;
} atexit_callback;
typedef struct {
PyObject *func;
PyObject *args;
PyObject *kwargs;
} atexit_py_callback;
struct atexit_state {
atexit_callback *ll_callbacks;
// Kept for ABI compatibility--do not use! (See GH-127791.)
atexit_callback *last_ll_callback;
// XXX The rest of the state could be moved to the atexit module state
// and a low-level callback added for it during module exec.
// For the moment we leave it here.
atexit_py_callback **callbacks;
int ncallbacks;
int callback_len;
};
// Export for '_interpchannels' shared extension
PyAPI_FUNC(int) _Py_AtExit(
PyInterpreterState *interp,
atexit_datacallbackfunc func,
void *data);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_ATEXIT_H */

View File

@@ -0,0 +1,145 @@
#ifndef Py_INTERNAL_BACKOFF_H
#define Py_INTERNAL_BACKOFF_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
typedef struct {
union {
struct {
uint16_t backoff : 4;
uint16_t value : 12;
};
uint16_t as_counter; // For printf("%#x", ...)
};
} _Py_BackoffCounter;
/* 16-bit countdown counters using exponential backoff.
These are used by the adaptive specializer to count down until
it is time to specialize an instruction. If specialization fails
the counter is reset using exponential backoff.
Another use is for the Tier 2 optimizer to decide when to create
a new Tier 2 trace (executor). Again, exponential backoff is used.
The 16-bit counter is structured as a 12-bit unsigned 'value'
and a 4-bit 'backoff' field. When resetting the counter, the
backoff field is incremented (until it reaches a limit) and the
value is set to a bit mask representing the value 2**backoff - 1.
The maximum backoff is 12 (the number of value bits).
There is an exceptional value which must not be updated, 0xFFFF.
*/
#define UNREACHABLE_BACKOFF 0xFFFF
static inline bool
is_unreachable_backoff_counter(_Py_BackoffCounter counter)
{
return counter.as_counter == UNREACHABLE_BACKOFF;
}
static inline _Py_BackoffCounter
make_backoff_counter(uint16_t value, uint16_t backoff)
{
assert(backoff <= 15);
assert(value <= 0xFFF);
_Py_BackoffCounter result;
result.value = value;
result.backoff = backoff;
return result;
}
static inline _Py_BackoffCounter
forge_backoff_counter(uint16_t counter)
{
_Py_BackoffCounter result;
result.as_counter = counter;
return result;
}
static inline _Py_BackoffCounter
restart_backoff_counter(_Py_BackoffCounter counter)
{
assert(!is_unreachable_backoff_counter(counter));
if (counter.backoff < 12) {
return make_backoff_counter((1 << (counter.backoff + 1)) - 1, counter.backoff + 1);
}
else {
return make_backoff_counter((1 << 12) - 1, 12);
}
}
static inline _Py_BackoffCounter
pause_backoff_counter(_Py_BackoffCounter counter)
{
return make_backoff_counter(counter.value | 1, counter.backoff);
}
static inline _Py_BackoffCounter
advance_backoff_counter(_Py_BackoffCounter counter)
{
if (!is_unreachable_backoff_counter(counter)) {
return make_backoff_counter((counter.value - 1) & 0xFFF, counter.backoff);
}
else {
return counter;
}
}
static inline bool
backoff_counter_triggers(_Py_BackoffCounter counter)
{
return counter.value == 0;
}
/* Initial JUMP_BACKWARD counter.
* This determines when we create a trace for a loop.
* Backoff sequence 16, 32, 64, 128, 256, 512, 1024, 2048, 4096. */
#define JUMP_BACKWARD_INITIAL_VALUE 16
#define JUMP_BACKWARD_INITIAL_BACKOFF 4
static inline _Py_BackoffCounter
initial_jump_backoff_counter(void)
{
return make_backoff_counter(JUMP_BACKWARD_INITIAL_VALUE,
JUMP_BACKWARD_INITIAL_BACKOFF);
}
/* Initial exit temperature.
* Must be larger than ADAPTIVE_COOLDOWN_VALUE,
* otherwise when a side exit warms up we may construct
* a new trace before the Tier 1 code has properly re-specialized.
* Backoff sequence 64, 128, 256, 512, 1024, 2048, 4096. */
#define COLD_EXIT_INITIAL_VALUE 64
#define COLD_EXIT_INITIAL_BACKOFF 6
static inline _Py_BackoffCounter
initial_temperature_backoff_counter(void)
{
return make_backoff_counter(COLD_EXIT_INITIAL_VALUE,
COLD_EXIT_INITIAL_BACKOFF);
}
/* Unreachable backoff counter. */
static inline _Py_BackoffCounter
initial_unreachable_backoff_counter(void)
{
return forge_backoff_counter(UNREACHABLE_BACKOFF);
}
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_BACKOFF_H */

View File

@@ -0,0 +1,186 @@
/* Bit and bytes utilities.
Bytes swap functions, reverse order of bytes:
- _Py_bswap16(uint16_t)
- _Py_bswap32(uint32_t)
- _Py_bswap64(uint64_t)
*/
#ifndef Py_INTERNAL_BITUTILS_H
#define Py_INTERNAL_BITUTILS_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#if defined(__GNUC__) \
&& ((__GNUC__ >= 5) || (__GNUC__ == 4) && (__GNUC_MINOR__ >= 8))
/* __builtin_bswap16() is available since GCC 4.8,
__builtin_bswap32() is available since GCC 4.3,
__builtin_bswap64() is available since GCC 4.3. */
# define _PY_HAVE_BUILTIN_BSWAP
#endif
#ifdef _MSC_VER
# include <intrin.h> // _byteswap_uint64()
#endif
static inline uint16_t
_Py_bswap16(uint16_t word)
{
#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap16)
return __builtin_bswap16(word);
#elif defined(_MSC_VER)
Py_BUILD_ASSERT(sizeof(word) == sizeof(unsigned short));
return _byteswap_ushort(word);
#else
// Portable implementation which doesn't rely on circular bit shift
return ( ((word & UINT16_C(0x00FF)) << 8)
| ((word & UINT16_C(0xFF00)) >> 8));
#endif
}
static inline uint32_t
_Py_bswap32(uint32_t word)
{
#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap32)
return __builtin_bswap32(word);
#elif defined(_MSC_VER)
Py_BUILD_ASSERT(sizeof(word) == sizeof(unsigned long));
return _byteswap_ulong(word);
#else
// Portable implementation which doesn't rely on circular bit shift
return ( ((word & UINT32_C(0x000000FF)) << 24)
| ((word & UINT32_C(0x0000FF00)) << 8)
| ((word & UINT32_C(0x00FF0000)) >> 8)
| ((word & UINT32_C(0xFF000000)) >> 24));
#endif
}
static inline uint64_t
_Py_bswap64(uint64_t word)
{
#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap64)
return __builtin_bswap64(word);
#elif defined(_MSC_VER)
return _byteswap_uint64(word);
#else
// Portable implementation which doesn't rely on circular bit shift
return ( ((word & UINT64_C(0x00000000000000FF)) << 56)
| ((word & UINT64_C(0x000000000000FF00)) << 40)
| ((word & UINT64_C(0x0000000000FF0000)) << 24)
| ((word & UINT64_C(0x00000000FF000000)) << 8)
| ((word & UINT64_C(0x000000FF00000000)) >> 8)
| ((word & UINT64_C(0x0000FF0000000000)) >> 24)
| ((word & UINT64_C(0x00FF000000000000)) >> 40)
| ((word & UINT64_C(0xFF00000000000000)) >> 56));
#endif
}
// Population count: count the number of 1's in 'x'
// (number of bits set to 1), also known as the hamming weight.
//
// Implementation note. CPUID is not used, to test if x86 POPCNT instruction
// can be used, to keep the implementation simple. For example, Visual Studio
// __popcnt() is not used this reason. The clang and GCC builtin function can
// use the x86 POPCNT instruction if the target architecture has SSE4a or
// newer.
static inline int
_Py_popcount32(uint32_t x)
{
#if (defined(__clang__) || defined(__GNUC__))
#if SIZEOF_INT >= 4
Py_BUILD_ASSERT(sizeof(x) <= sizeof(unsigned int));
return __builtin_popcount(x);
#else
// The C standard guarantees that unsigned long will always be big enough
// to hold a uint32_t value without losing information.
Py_BUILD_ASSERT(sizeof(x) <= sizeof(unsigned long));
return __builtin_popcountl(x);
#endif
#else
// 32-bit SWAR (SIMD Within A Register) popcount
// Binary: 0 1 0 1 ...
const uint32_t M1 = 0x55555555;
// Binary: 00 11 00 11. ..
const uint32_t M2 = 0x33333333;
// Binary: 0000 1111 0000 1111 ...
const uint32_t M4 = 0x0F0F0F0F;
// Put count of each 2 bits into those 2 bits
x = x - ((x >> 1) & M1);
// Put count of each 4 bits into those 4 bits
x = (x & M2) + ((x >> 2) & M2);
// Put count of each 8 bits into those 8 bits
x = (x + (x >> 4)) & M4;
// Sum of the 4 byte counts.
// Take care when considering changes to the next line. Portability and
// correctness are delicate here, thanks to C's "integer promotions" (C99
// §6.3.1.1p2). On machines where the `int` type has width greater than 32
// bits, `x` will be promoted to an `int`, and following C's "usual
// arithmetic conversions" (C99 §6.3.1.8), the multiplication will be
// performed as a multiplication of two `unsigned int` operands. In this
// case it's critical that we cast back to `uint32_t` in order to keep only
// the least significant 32 bits. On machines where the `int` type has
// width no greater than 32, the multiplication is of two 32-bit unsigned
// integer types, and the (uint32_t) cast is a no-op. In both cases, we
// avoid the risk of undefined behaviour due to overflow of a
// multiplication of signed integer types.
return (uint32_t)(x * 0x01010101U) >> 24;
#endif
}
// Return the index of the most significant 1 bit in 'x'. This is the smallest
// integer k such that x < 2**k. Equivalent to floor(log2(x)) + 1 for x != 0.
static inline int
_Py_bit_length(unsigned long x)
{
#if (defined(__clang__) || defined(__GNUC__))
if (x != 0) {
// __builtin_clzl() is available since GCC 3.4.
// Undefined behavior for x == 0.
return (int)sizeof(unsigned long) * 8 - __builtin_clzl(x);
}
else {
return 0;
}
#elif defined(_MSC_VER)
// _BitScanReverse() is documented to search 32 bits.
Py_BUILD_ASSERT(sizeof(unsigned long) <= 4);
unsigned long msb;
if (_BitScanReverse(&msb, x)) {
return (int)msb + 1;
}
else {
return 0;
}
#else
const int BIT_LENGTH_TABLE[32] = {
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
};
int msb = 0;
while (x >= 32) {
msb += 6;
x >>= 6;
}
msb += BIT_LENGTH_TABLE[x];
return msb;
#endif
}
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_BITUTILS_H */

View File

@@ -0,0 +1,321 @@
/*
_BlocksOutputBuffer is used to maintain an output buffer
that has unpredictable size. Suitable for compression/decompression
API (bz2/lzma/zlib) that has stream->next_out and stream->avail_out:
stream->next_out: point to the next output position.
stream->avail_out: the number of available bytes left in the buffer.
It maintains a list of bytes object, so there is no overhead of resizing
the buffer.
Usage:
1, Initialize the struct instance like this:
_BlocksOutputBuffer buffer = {.list = NULL};
Set .list to NULL for _BlocksOutputBuffer_OnError()
2, Initialize the buffer use one of these functions:
_BlocksOutputBuffer_InitAndGrow()
_BlocksOutputBuffer_InitWithSize()
3, If (avail_out == 0), grow the buffer:
_BlocksOutputBuffer_Grow()
4, Get the current outputted data size:
_BlocksOutputBuffer_GetDataSize()
5, Finish the buffer, and return a bytes object:
_BlocksOutputBuffer_Finish()
6, Clean up the buffer when an error occurred:
_BlocksOutputBuffer_OnError()
*/
#ifndef Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H
#define Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H
#ifdef __cplusplus
extern "C" {
#endif
#include "Python.h"
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
typedef struct {
// List of bytes objects
PyObject *list;
// Number of whole allocated size
Py_ssize_t allocated;
// Max length of the buffer, negative number means unlimited length.
Py_ssize_t max_length;
} _BlocksOutputBuffer;
static const char unable_allocate_msg[] = "Unable to allocate output buffer.";
/* In 32-bit build, the max block size should <= INT32_MAX. */
#define OUTPUT_BUFFER_MAX_BLOCK_SIZE (256*1024*1024)
/* Block size sequence */
#define KB (1024)
#define MB (1024*1024)
static const Py_ssize_t BUFFER_BLOCK_SIZE[] =
{ 32*KB, 64*KB, 256*KB, 1*MB, 4*MB, 8*MB, 16*MB, 16*MB,
32*MB, 32*MB, 32*MB, 32*MB, 64*MB, 64*MB, 128*MB, 128*MB,
OUTPUT_BUFFER_MAX_BLOCK_SIZE };
#undef KB
#undef MB
/* According to the block sizes defined by BUFFER_BLOCK_SIZE, the whole
allocated size growth step is:
1 32 KB +32 KB
2 96 KB +64 KB
3 352 KB +256 KB
4 1.34 MB +1 MB
5 5.34 MB +4 MB
6 13.34 MB +8 MB
7 29.34 MB +16 MB
8 45.34 MB +16 MB
9 77.34 MB +32 MB
10 109.34 MB +32 MB
11 141.34 MB +32 MB
12 173.34 MB +32 MB
13 237.34 MB +64 MB
14 301.34 MB +64 MB
15 429.34 MB +128 MB
16 557.34 MB +128 MB
17 813.34 MB +256 MB
18 1069.34 MB +256 MB
19 1325.34 MB +256 MB
20 1581.34 MB +256 MB
21 1837.34 MB +256 MB
22 2093.34 MB +256 MB
...
*/
/* Initialize the buffer, and grow the buffer.
max_length: Max length of the buffer, -1 for unlimited length.
On success, return allocated size (>=0)
On failure, return -1
*/
static inline Py_ssize_t
_BlocksOutputBuffer_InitAndGrow(_BlocksOutputBuffer *buffer,
const Py_ssize_t max_length,
void **next_out)
{
PyObject *b;
Py_ssize_t block_size;
// ensure .list was set to NULL
assert(buffer->list == NULL);
// get block size
if (0 <= max_length && max_length < BUFFER_BLOCK_SIZE[0]) {
block_size = max_length;
} else {
block_size = BUFFER_BLOCK_SIZE[0];
}
// the first block
b = PyBytes_FromStringAndSize(NULL, block_size);
if (b == NULL) {
return -1;
}
// create the list
buffer->list = PyList_New(1);
if (buffer->list == NULL) {
Py_DECREF(b);
return -1;
}
PyList_SET_ITEM(buffer->list, 0, b);
// set variables
buffer->allocated = block_size;
buffer->max_length = max_length;
*next_out = PyBytes_AS_STRING(b);
return block_size;
}
/* Initialize the buffer, with an initial size.
Check block size limit in the outer wrapper function. For example, some libs
accept UINT32_MAX as the maximum block size, then init_size should <= it.
On success, return allocated size (>=0)
On failure, return -1
*/
static inline Py_ssize_t
_BlocksOutputBuffer_InitWithSize(_BlocksOutputBuffer *buffer,
const Py_ssize_t init_size,
void **next_out)
{
PyObject *b;
// ensure .list was set to NULL
assert(buffer->list == NULL);
// the first block
b = PyBytes_FromStringAndSize(NULL, init_size);
if (b == NULL) {
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
return -1;
}
// create the list
buffer->list = PyList_New(1);
if (buffer->list == NULL) {
Py_DECREF(b);
return -1;
}
PyList_SET_ITEM(buffer->list, 0, b);
// set variables
buffer->allocated = init_size;
buffer->max_length = -1;
*next_out = PyBytes_AS_STRING(b);
return init_size;
}
/* Grow the buffer. The avail_out must be 0, please check it before calling.
On success, return allocated size (>=0)
On failure, return -1
*/
static inline Py_ssize_t
_BlocksOutputBuffer_Grow(_BlocksOutputBuffer *buffer,
void **next_out,
const Py_ssize_t avail_out)
{
PyObject *b;
const Py_ssize_t list_len = Py_SIZE(buffer->list);
Py_ssize_t block_size;
// ensure no gaps in the data
if (avail_out != 0) {
PyErr_SetString(PyExc_SystemError,
"avail_out is non-zero in _BlocksOutputBuffer_Grow().");
return -1;
}
// get block size
if (list_len < (Py_ssize_t) Py_ARRAY_LENGTH(BUFFER_BLOCK_SIZE)) {
block_size = BUFFER_BLOCK_SIZE[list_len];
} else {
block_size = BUFFER_BLOCK_SIZE[Py_ARRAY_LENGTH(BUFFER_BLOCK_SIZE) - 1];
}
// check max_length
if (buffer->max_length >= 0) {
// if (rest == 0), should not grow the buffer.
Py_ssize_t rest = buffer->max_length - buffer->allocated;
assert(rest > 0);
// block_size of the last block
if (block_size > rest) {
block_size = rest;
}
}
// check buffer->allocated overflow
if (block_size > PY_SSIZE_T_MAX - buffer->allocated) {
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
return -1;
}
// create the block
b = PyBytes_FromStringAndSize(NULL, block_size);
if (b == NULL) {
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
return -1;
}
if (PyList_Append(buffer->list, b) < 0) {
Py_DECREF(b);
return -1;
}
Py_DECREF(b);
// set variables
buffer->allocated += block_size;
*next_out = PyBytes_AS_STRING(b);
return block_size;
}
/* Return the current outputted data size. */
static inline Py_ssize_t
_BlocksOutputBuffer_GetDataSize(_BlocksOutputBuffer *buffer,
const Py_ssize_t avail_out)
{
return buffer->allocated - avail_out;
}
/* Finish the buffer.
Return a bytes object on success
Return NULL on failure
*/
static inline PyObject *
_BlocksOutputBuffer_Finish(_BlocksOutputBuffer *buffer,
const Py_ssize_t avail_out)
{
PyObject *result, *block;
const Py_ssize_t list_len = Py_SIZE(buffer->list);
// fast path for single block
if ((list_len == 1 && avail_out == 0) ||
(list_len == 2 && Py_SIZE(PyList_GET_ITEM(buffer->list, 1)) == avail_out))
{
block = PyList_GET_ITEM(buffer->list, 0);
Py_INCREF(block);
Py_CLEAR(buffer->list);
return block;
}
// final bytes object
result = PyBytes_FromStringAndSize(NULL, buffer->allocated - avail_out);
if (result == NULL) {
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
return NULL;
}
// memory copy
if (list_len > 0) {
char *posi = PyBytes_AS_STRING(result);
// blocks except the last one
Py_ssize_t i = 0;
for (; i < list_len-1; i++) {
block = PyList_GET_ITEM(buffer->list, i);
memcpy(posi, PyBytes_AS_STRING(block), Py_SIZE(block));
posi += Py_SIZE(block);
}
// the last block
block = PyList_GET_ITEM(buffer->list, i);
memcpy(posi, PyBytes_AS_STRING(block), Py_SIZE(block) - avail_out);
} else {
assert(Py_SIZE(result) == 0);
}
Py_CLEAR(buffer->list);
return result;
}
/* Clean up the buffer when an error occurred. */
static inline void
_BlocksOutputBuffer_OnError(_BlocksOutputBuffer *buffer)
{
Py_CLEAR(buffer->list);
}
#ifdef __cplusplus
}
#endif
#endif /* Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H */

View File

@@ -0,0 +1,74 @@
#ifndef Py_INTERNAL_BRC_H
#define Py_INTERNAL_BRC_H
#include <stdint.h>
#include "pycore_llist.h" // struct llist_node
#include "pycore_lock.h" // PyMutex
#include "pycore_object_stack.h" // _PyObjectStack
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#ifdef Py_GIL_DISABLED
// Prime number to avoid correlations with memory addresses.
#define _Py_BRC_NUM_BUCKETS 257
// Hash table bucket
struct _brc_bucket {
// Mutex protects both the bucket and thread state queues in this bucket.
PyMutex mutex;
// Linked list of _PyThreadStateImpl objects hashed to this bucket.
struct llist_node root;
};
// Per-interpreter biased reference counting state
struct _brc_state {
// Hash table of thread states by thread-id. Thread states within a bucket
// are chained using a doubly-linked list.
struct _brc_bucket table[_Py_BRC_NUM_BUCKETS];
};
// Per-thread biased reference counting state
struct _brc_thread_state {
// Linked-list of thread states per hash bucket
struct llist_node bucket_node;
// Thread-id as determined by _PyThread_Id()
uintptr_t tid;
// Objects with refcounts to be merged (protected by bucket mutex)
_PyObjectStack objects_to_merge;
// Local stack of objects to be merged (not accessed by other threads)
_PyObjectStack local_objects_to_merge;
};
// Initialize/finalize the per-thread biased reference counting state
void _Py_brc_init_thread(PyThreadState *tstate);
void _Py_brc_remove_thread(PyThreadState *tstate);
// Initialize per-interpreter state
void _Py_brc_init_state(PyInterpreterState *interp);
void _Py_brc_after_fork(PyInterpreterState *interp);
// Enqueues an object to be merged by it's owning thread (tid). This
// steals a reference to the object.
void _Py_brc_queue_object(PyObject *ob);
// Merge the refcounts of queued objects for the current thread.
void _Py_brc_merge_refcounts(PyThreadState *tstate);
#endif /* Py_GIL_DISABLED */
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_BRC_H */

View File

@@ -0,0 +1,82 @@
#ifndef Py_LIMITED_API
#ifndef Py_BYTES_CTYPE_H
#define Py_BYTES_CTYPE_H
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/*
* The internal implementation behind PyBytes (bytes) and PyByteArray (bytearray)
* methods of the given names, they operate on ASCII byte strings.
*/
extern PyObject* _Py_bytes_isspace(const char *cptr, Py_ssize_t len);
extern PyObject* _Py_bytes_isalpha(const char *cptr, Py_ssize_t len);
extern PyObject* _Py_bytes_isalnum(const char *cptr, Py_ssize_t len);
extern PyObject* _Py_bytes_isascii(const char *cptr, Py_ssize_t len);
extern PyObject* _Py_bytes_isdigit(const char *cptr, Py_ssize_t len);
extern PyObject* _Py_bytes_islower(const char *cptr, Py_ssize_t len);
extern PyObject* _Py_bytes_isupper(const char *cptr, Py_ssize_t len);
extern PyObject* _Py_bytes_istitle(const char *cptr, Py_ssize_t len);
/* These store their len sized answer in the given preallocated *result arg. */
extern void _Py_bytes_lower(char *result, const char *cptr, Py_ssize_t len);
extern void _Py_bytes_upper(char *result, const char *cptr, Py_ssize_t len);
extern void _Py_bytes_title(char *result, const char *s, Py_ssize_t len);
extern void _Py_bytes_capitalize(char *result, const char *s, Py_ssize_t len);
extern void _Py_bytes_swapcase(char *result, const char *s, Py_ssize_t len);
extern PyObject *_Py_bytes_find(const char *str, Py_ssize_t len, PyObject *sub,
Py_ssize_t start, Py_ssize_t end);
extern PyObject *_Py_bytes_index(const char *str, Py_ssize_t len, PyObject *sub,
Py_ssize_t start, Py_ssize_t end);
extern PyObject *_Py_bytes_rfind(const char *str, Py_ssize_t len, PyObject *sub,
Py_ssize_t start, Py_ssize_t end);
extern PyObject *_Py_bytes_rindex(const char *str, Py_ssize_t len, PyObject *sub,
Py_ssize_t start, Py_ssize_t end);
extern PyObject *_Py_bytes_count(const char *str, Py_ssize_t len, PyObject *sub,
Py_ssize_t start, Py_ssize_t end);
extern int _Py_bytes_contains(const char *str, Py_ssize_t len, PyObject *arg);
extern PyObject *_Py_bytes_startswith(const char *str, Py_ssize_t len,
PyObject *subobj, Py_ssize_t start,
Py_ssize_t end);
extern PyObject *_Py_bytes_endswith(const char *str, Py_ssize_t len,
PyObject *subobj, Py_ssize_t start,
Py_ssize_t end);
/* The maketrans() static method. */
extern PyObject* _Py_bytes_maketrans(Py_buffer *frm, Py_buffer *to);
/* Shared __doc__ strings. */
extern const char _Py_isspace__doc__[];
extern const char _Py_isalpha__doc__[];
extern const char _Py_isalnum__doc__[];
extern const char _Py_isascii__doc__[];
extern const char _Py_isdigit__doc__[];
extern const char _Py_islower__doc__[];
extern const char _Py_isupper__doc__[];
extern const char _Py_istitle__doc__[];
extern const char _Py_lower__doc__[];
extern const char _Py_upper__doc__[];
extern const char _Py_title__doc__[];
extern const char _Py_capitalize__doc__[];
extern const char _Py_swapcase__doc__[];
extern const char _Py_count__doc__[];
extern const char _Py_find__doc__[];
extern const char _Py_index__doc__[];
extern const char _Py_rfind__doc__[];
extern const char _Py_rindex__doc__[];
extern const char _Py_startswith__doc__[];
extern const char _Py_endswith__doc__[];
extern const char _Py_maketrans__doc__[];
extern const char _Py_expandtabs__doc__[];
extern const char _Py_ljust__doc__[];
extern const char _Py_rjust__doc__[];
extern const char _Py_center__doc__[];
extern const char _Py_zfill__doc__[];
/* this is needed because some docs are shared from the .o, not static */
#define PyDoc_STRVAR_shared(name,str) const char name[] = PyDoc_STR(str)
#endif /* !Py_BYTES_CTYPE_H */
#endif /* !Py_LIMITED_API */

View File

@@ -0,0 +1,148 @@
#ifndef Py_INTERNAL_BYTESOBJECT_H
#define Py_INTERNAL_BYTESOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
extern PyObject* _PyBytes_FormatEx(
const char *format,
Py_ssize_t format_len,
PyObject *args,
int use_bytearray);
extern PyObject* _PyBytes_FromHex(
PyObject *string,
int use_bytearray);
// Helper for PyBytes_DecodeEscape that detects invalid escape chars.
// Export for test_peg_generator.
PyAPI_FUNC(PyObject*) _PyBytes_DecodeEscape(const char *, Py_ssize_t,
const char *, const char **);
// Substring Search.
//
// Returns the index of the first occurrence of
// a substring ("needle") in a larger text ("haystack").
// If the needle is not found, return -1.
// If the needle is found, add offset to the index.
//
// Export for 'mmap' shared extension.
PyAPI_FUNC(Py_ssize_t)
_PyBytes_Find(const char *haystack, Py_ssize_t len_haystack,
const char *needle, Py_ssize_t len_needle,
Py_ssize_t offset);
// Same as above, but search right-to-left.
// Export for 'mmap' shared extension.
PyAPI_FUNC(Py_ssize_t)
_PyBytes_ReverseFind(const char *haystack, Py_ssize_t len_haystack,
const char *needle, Py_ssize_t len_needle,
Py_ssize_t offset);
// Helper function to implement the repeat and inplace repeat methods on a
// buffer.
//
// len_dest is assumed to be an integer multiple of len_src.
// If src equals dest, then assume the operation is inplace.
//
// This method repeately doubles the number of bytes copied to reduce
// the number of invocations of memcpy.
//
// Export for 'array' shared extension.
PyAPI_FUNC(void)
_PyBytes_Repeat(char* dest, Py_ssize_t len_dest,
const char* src, Py_ssize_t len_src);
/* --- _PyBytesWriter ----------------------------------------------------- */
/* The _PyBytesWriter structure is big: it contains an embedded "stack buffer".
A _PyBytesWriter variable must be declared at the end of variables in a
function to optimize the memory allocation on the stack. */
typedef struct {
/* bytes, bytearray or NULL (when the small buffer is used) */
PyObject *buffer;
/* Number of allocated size. */
Py_ssize_t allocated;
/* Minimum number of allocated bytes,
incremented by _PyBytesWriter_Prepare() */
Py_ssize_t min_size;
/* If non-zero, use a bytearray instead of a bytes object for buffer. */
int use_bytearray;
/* If non-zero, overallocate the buffer (default: 0).
This flag must be zero if use_bytearray is non-zero. */
int overallocate;
/* Stack buffer */
int use_small_buffer;
char small_buffer[512];
} _PyBytesWriter;
/* Initialize a bytes writer
By default, the overallocation is disabled. Set the overallocate attribute
to control the allocation of the buffer.
Export _PyBytesWriter API for '_pickle' shared extension. */
PyAPI_FUNC(void) _PyBytesWriter_Init(_PyBytesWriter *writer);
/* Get the buffer content and reset the writer.
Return a bytes object, or a bytearray object if use_bytearray is non-zero.
Raise an exception and return NULL on error. */
PyAPI_FUNC(PyObject *) _PyBytesWriter_Finish(_PyBytesWriter *writer,
void *str);
/* Deallocate memory of a writer (clear its internal buffer). */
PyAPI_FUNC(void) _PyBytesWriter_Dealloc(_PyBytesWriter *writer);
/* Allocate the buffer to write size bytes.
Return the pointer to the beginning of buffer data.
Raise an exception and return NULL on error. */
PyAPI_FUNC(void*) _PyBytesWriter_Alloc(_PyBytesWriter *writer,
Py_ssize_t size);
/* Ensure that the buffer is large enough to write *size* bytes.
Add size to the writer minimum size (min_size attribute).
str is the current pointer inside the buffer.
Return the updated current pointer inside the buffer.
Raise an exception and return NULL on error. */
PyAPI_FUNC(void*) _PyBytesWriter_Prepare(_PyBytesWriter *writer,
void *str,
Py_ssize_t size);
/* Resize the buffer to make it larger.
The new buffer may be larger than size bytes because of overallocation.
Return the updated current pointer inside the buffer.
Raise an exception and return NULL on error.
Note: size must be greater than the number of allocated bytes in the writer.
This function doesn't use the writer minimum size (min_size attribute).
See also _PyBytesWriter_Prepare().
*/
PyAPI_FUNC(void*) _PyBytesWriter_Resize(_PyBytesWriter *writer,
void *str,
Py_ssize_t size);
/* Write bytes.
Raise an exception and return NULL on error. */
PyAPI_FUNC(void*) _PyBytesWriter_WriteBytes(_PyBytesWriter *writer,
void *str,
const void *bytes,
Py_ssize_t size);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_BYTESOBJECT_H */

Some files were not shown because too many files have changed in this diff Show More