Removed the Requirement to Install Python and NodeJS (Now Bundled with Borealis)

This commit is contained in:
2025-04-24 00:42:19 -06:00
parent 785265d3e7
commit 9c68cdea84
7786 changed files with 2386458 additions and 217 deletions

View File

@@ -0,0 +1,87 @@
#ifndef Py_CPYTHON_ABSTRACTOBJECT_H
# error "this header file must not be included directly"
#endif
/* === Object Protocol ================================================== */
/* Like PyObject_CallMethod(), but expect a _Py_Identifier*
as the method name. */
PyAPI_FUNC(PyObject*) _PyObject_CallMethodId(
PyObject *obj,
_Py_Identifier *name,
const char *format, ...);
/* Convert keyword arguments from the FASTCALL (stack: C array, kwnames: tuple)
format to a Python dictionary ("kwargs" dict).
The type of kwnames keys is not checked. The final function getting
arguments is responsible to check if all keys are strings, for example using
PyArg_ParseTupleAndKeywords() or PyArg_ValidateKeywordArguments().
Duplicate keys are merged using the last value. If duplicate keys must raise
an exception, the caller is responsible to implement an explicit keys on
kwnames. */
PyAPI_FUNC(PyObject*) _PyStack_AsDict(PyObject *const *values, PyObject *kwnames);
/* === Vectorcall protocol (PEP 590) ============================= */
// PyVectorcall_NARGS() is exported as a function for the stable ABI.
// Here (when we are not using the stable ABI), the name is overridden to
// call a static inline function for best performance.
static inline Py_ssize_t
_PyVectorcall_NARGS(size_t n)
{
return n & ~PY_VECTORCALL_ARGUMENTS_OFFSET;
}
#define PyVectorcall_NARGS(n) _PyVectorcall_NARGS(n)
PyAPI_FUNC(vectorcallfunc) PyVectorcall_Function(PyObject *callable);
// Backwards compatibility aliases (PEP 590) for API that was provisional
// in Python 3.8
#define _PyObject_Vectorcall PyObject_Vectorcall
#define _PyObject_VectorcallMethod PyObject_VectorcallMethod
#define _PyObject_FastCallDict PyObject_VectorcallDict
#define _PyVectorcall_Function PyVectorcall_Function
#define _PyObject_CallOneArg PyObject_CallOneArg
#define _PyObject_CallMethodNoArgs PyObject_CallMethodNoArgs
#define _PyObject_CallMethodOneArg PyObject_CallMethodOneArg
/* Same as PyObject_Vectorcall except that keyword arguments are passed as
dict, which may be NULL if there are no keyword arguments. */
PyAPI_FUNC(PyObject *) PyObject_VectorcallDict(
PyObject *callable,
PyObject *const *args,
size_t nargsf,
PyObject *kwargs);
PyAPI_FUNC(PyObject *) PyObject_CallOneArg(PyObject *func, PyObject *arg);
static inline PyObject *
PyObject_CallMethodNoArgs(PyObject *self, PyObject *name)
{
size_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET;
return PyObject_VectorcallMethod(name, &self, nargsf, _Py_NULL);
}
static inline PyObject *
PyObject_CallMethodOneArg(PyObject *self, PyObject *name, PyObject *arg)
{
PyObject *args[2] = {self, arg};
size_t nargsf = 2 | PY_VECTORCALL_ARGUMENTS_OFFSET;
assert(arg != NULL);
return PyObject_VectorcallMethod(name, args, nargsf, _Py_NULL);
}
/* Guess the size of object 'o' using len(o) or o.__length_hint__().
If neither of those return a non-negative value, then return the default
value. If one of the calls fails, this function returns -1. */
PyAPI_FUNC(Py_ssize_t) PyObject_LengthHint(PyObject *o, Py_ssize_t);
/* === Sequence protocol ================================================ */
/* Assume tp_as_sequence and sq_item exist and that 'i' does not
need to be corrected for a negative index. */
#define PySequence_ITEM(o, i)\
( Py_TYPE(o)->tp_as_sequence->sq_item((o), (i)) )

View File

@@ -0,0 +1,34 @@
#ifndef Py_CPYTHON_BYTEARRAYOBJECT_H
# error "this header file must not be included directly"
#endif
/* Object layout */
typedef struct {
PyObject_VAR_HEAD
Py_ssize_t ob_alloc; /* How many bytes allocated in ob_bytes */
char *ob_bytes; /* Physical backing buffer */
char *ob_start; /* Logical start inside ob_bytes */
Py_ssize_t ob_exports; /* How many buffer exports */
} PyByteArrayObject;
PyAPI_DATA(char) _PyByteArray_empty_string[];
/* Macros and static inline functions, trading safety for speed */
#define _PyByteArray_CAST(op) \
(assert(PyByteArray_Check(op)), _Py_CAST(PyByteArrayObject*, op))
static inline char* PyByteArray_AS_STRING(PyObject *op)
{
PyByteArrayObject *self = _PyByteArray_CAST(op);
if (Py_SIZE(self)) {
return self->ob_start;
}
return _PyByteArray_empty_string;
}
#define PyByteArray_AS_STRING(self) PyByteArray_AS_STRING(_PyObject_CAST(self))
static inline Py_ssize_t PyByteArray_GET_SIZE(PyObject *op) {
PyByteArrayObject *self = _PyByteArray_CAST(op);
return Py_SIZE(self);
}
#define PyByteArray_GET_SIZE(self) PyByteArray_GET_SIZE(_PyObject_CAST(self))

View File

@@ -0,0 +1,37 @@
#ifndef Py_CPYTHON_BYTESOBJECT_H
# error "this header file must not be included directly"
#endif
typedef struct {
PyObject_VAR_HEAD
Py_DEPRECATED(3.11) Py_hash_t ob_shash;
char ob_sval[1];
/* Invariants:
* ob_sval contains space for 'ob_size+1' elements.
* ob_sval[ob_size] == 0.
* ob_shash is the hash of the byte string or -1 if not computed yet.
*/
} PyBytesObject;
PyAPI_FUNC(int) _PyBytes_Resize(PyObject **, Py_ssize_t);
/* Macros and static inline functions, trading safety for speed */
#define _PyBytes_CAST(op) \
(assert(PyBytes_Check(op)), _Py_CAST(PyBytesObject*, op))
static inline char* PyBytes_AS_STRING(PyObject *op)
{
return _PyBytes_CAST(op)->ob_sval;
}
#define PyBytes_AS_STRING(op) PyBytes_AS_STRING(_PyObject_CAST(op))
static inline Py_ssize_t PyBytes_GET_SIZE(PyObject *op) {
PyBytesObject *self = _PyBytes_CAST(op);
return Py_SIZE(self);
}
#define PyBytes_GET_SIZE(self) PyBytes_GET_SIZE(_PyObject_CAST(self))
/* _PyBytes_Join(sep, x) is like sep.join(x). sep must be PyBytesObject*,
x must be an iterable object. */
PyAPI_FUNC(PyObject*) _PyBytes_Join(PyObject *sep, PyObject *x);

View File

@@ -0,0 +1,44 @@
/* Cell object interface */
#ifndef Py_LIMITED_API
#ifndef Py_CELLOBJECT_H
#define Py_CELLOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
PyObject_HEAD
/* Content of the cell or NULL when empty */
PyObject *ob_ref;
} PyCellObject;
PyAPI_DATA(PyTypeObject) PyCell_Type;
#define PyCell_Check(op) Py_IS_TYPE((op), &PyCell_Type)
PyAPI_FUNC(PyObject *) PyCell_New(PyObject *);
PyAPI_FUNC(PyObject *) PyCell_Get(PyObject *);
PyAPI_FUNC(int) PyCell_Set(PyObject *, PyObject *);
static inline PyObject* PyCell_GET(PyObject *op) {
PyCellObject *cell;
assert(PyCell_Check(op));
cell = _Py_CAST(PyCellObject*, op);
return cell->ob_ref;
}
#define PyCell_GET(op) PyCell_GET(_PyObject_CAST(op))
static inline void PyCell_SET(PyObject *op, PyObject *value) {
PyCellObject *cell;
assert(PyCell_Check(op));
cell = _Py_CAST(PyCellObject*, op);
cell->ob_ref = value;
}
#define PyCell_SET(op, value) PyCell_SET(_PyObject_CAST(op), (value))
#ifdef __cplusplus
}
#endif
#endif /* !Py_TUPLEOBJECT_H */
#endif /* Py_LIMITED_API */

View File

@@ -0,0 +1,25 @@
#ifndef Py_CPYTHON_CEVAL_H
# error "this header file must not be included directly"
#endif
PyAPI_FUNC(void) PyEval_SetProfile(Py_tracefunc, PyObject *);
PyAPI_FUNC(void) PyEval_SetProfileAllThreads(Py_tracefunc, PyObject *);
PyAPI_FUNC(void) PyEval_SetTrace(Py_tracefunc, PyObject *);
PyAPI_FUNC(void) PyEval_SetTraceAllThreads(Py_tracefunc, PyObject *);
/* Look at the current frame's (if any) code's co_flags, and turn on
the corresponding compiler flags in cf->cf_flags. Return 1 if any
flag was set, else return 0. */
PyAPI_FUNC(int) PyEval_MergeCompilerFlags(PyCompilerFlags *cf);
PyAPI_FUNC(PyObject *) _PyEval_EvalFrameDefault(PyThreadState *tstate, struct _PyInterpreterFrame *f, int exc);
PyAPI_FUNC(Py_ssize_t) PyUnstable_Eval_RequestCodeExtraIndex(freefunc);
// Old name -- remove when this API changes:
_Py_DEPRECATED_EXTERNALLY(3.12) static inline Py_ssize_t
_PyEval_RequestCodeExtraIndex(freefunc f) {
return PyUnstable_Eval_RequestCodeExtraIndex(f);
}
PyAPI_FUNC(int) _PyEval_SliceIndex(PyObject *, Py_ssize_t *);
PyAPI_FUNC(int) _PyEval_SliceIndexNotNone(PyObject *, Py_ssize_t *);

View File

@@ -0,0 +1,71 @@
/* Former class object interface -- now only bound methods are here */
/* Revealing some structures (not for general use) */
#ifndef Py_LIMITED_API
#ifndef Py_CLASSOBJECT_H
#define Py_CLASSOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
PyObject_HEAD
PyObject *im_func; /* The callable object implementing the method */
PyObject *im_self; /* The instance it is bound to */
PyObject *im_weakreflist; /* List of weak references */
vectorcallfunc vectorcall;
} PyMethodObject;
PyAPI_DATA(PyTypeObject) PyMethod_Type;
#define PyMethod_Check(op) Py_IS_TYPE((op), &PyMethod_Type)
PyAPI_FUNC(PyObject *) PyMethod_New(PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyMethod_Function(PyObject *);
PyAPI_FUNC(PyObject *) PyMethod_Self(PyObject *);
#define _PyMethod_CAST(meth) \
(assert(PyMethod_Check(meth)), _Py_CAST(PyMethodObject*, meth))
/* Static inline functions for direct access to these values.
Type checks are *not* done, so use with care. */
static inline PyObject* PyMethod_GET_FUNCTION(PyObject *meth) {
return _PyMethod_CAST(meth)->im_func;
}
#define PyMethod_GET_FUNCTION(meth) PyMethod_GET_FUNCTION(_PyObject_CAST(meth))
static inline PyObject* PyMethod_GET_SELF(PyObject *meth) {
return _PyMethod_CAST(meth)->im_self;
}
#define PyMethod_GET_SELF(meth) PyMethod_GET_SELF(_PyObject_CAST(meth))
typedef struct {
PyObject_HEAD
PyObject *func;
} PyInstanceMethodObject;
PyAPI_DATA(PyTypeObject) PyInstanceMethod_Type;
#define PyInstanceMethod_Check(op) Py_IS_TYPE((op), &PyInstanceMethod_Type)
PyAPI_FUNC(PyObject *) PyInstanceMethod_New(PyObject *);
PyAPI_FUNC(PyObject *) PyInstanceMethod_Function(PyObject *);
#define _PyInstanceMethod_CAST(meth) \
(assert(PyInstanceMethod_Check(meth)), \
_Py_CAST(PyInstanceMethodObject*, meth))
/* Static inline function for direct access to these values.
Type checks are *not* done, so use with care. */
static inline PyObject* PyInstanceMethod_GET_FUNCTION(PyObject *meth) {
return _PyInstanceMethod_CAST(meth)->func;
}
#define PyInstanceMethod_GET_FUNCTION(meth) PyInstanceMethod_GET_FUNCTION(_PyObject_CAST(meth))
#ifdef __cplusplus
}
#endif
#endif // !Py_CLASSOBJECT_H
#endif // !Py_LIMITED_API

View File

@@ -0,0 +1,358 @@
/* Definitions for bytecode */
#ifndef Py_LIMITED_API
#ifndef Py_CODE_H
#define Py_CODE_H
#ifdef __cplusplus
extern "C" {
#endif
/* Count of all local monitoring events */
#define _PY_MONITORING_LOCAL_EVENTS 10
/* Count of all "real" monitoring events (not derived from other events) */
#define _PY_MONITORING_UNGROUPED_EVENTS 15
/* Count of all monitoring events */
#define _PY_MONITORING_EVENTS 17
/* Tables of which tools are active for each monitored event. */
typedef struct _Py_LocalMonitors {
uint8_t tools[_PY_MONITORING_LOCAL_EVENTS];
} _Py_LocalMonitors;
typedef struct _Py_GlobalMonitors {
uint8_t tools[_PY_MONITORING_UNGROUPED_EVENTS];
} _Py_GlobalMonitors;
typedef struct {
PyObject *_co_code;
PyObject *_co_varnames;
PyObject *_co_cellvars;
PyObject *_co_freevars;
} _PyCoCached;
/* Ancillary data structure used for instrumentation.
Line instrumentation creates this with sufficient
space for one entry per code unit. The total size
of the data will be `bytes_per_entry * Py_SIZE(code)` */
typedef struct {
uint8_t bytes_per_entry;
uint8_t data[1];
} _PyCoLineInstrumentationData;
typedef struct {
int size;
int capacity;
struct _PyExecutorObject *executors[1];
} _PyExecutorArray;
/* Main data structure used for instrumentation.
* This is allocated when needed for instrumentation
*/
typedef struct {
/* Monitoring specific to this code object */
_Py_LocalMonitors local_monitors;
/* Monitoring that is active on this code object */
_Py_LocalMonitors active_monitors;
/* The tools that are to be notified for events for the matching code unit */
uint8_t *tools;
/* Information to support line events */
_PyCoLineInstrumentationData *lines;
/* The tools that are to be notified for line events for the matching code unit */
uint8_t *line_tools;
/* Information to support instruction events */
/* The underlying instructions, which can themselves be instrumented */
uint8_t *per_instruction_opcodes;
/* The tools that are to be notified for instruction events for the matching code unit */
uint8_t *per_instruction_tools;
} _PyCoMonitoringData;
// To avoid repeating ourselves in deepfreeze.py, all PyCodeObject members are
// defined in this macro:
#define _PyCode_DEF(SIZE) { \
PyObject_VAR_HEAD \
\
/* Note only the following fields are used in hash and/or comparisons \
* \
* - co_name \
* - co_argcount \
* - co_posonlyargcount \
* - co_kwonlyargcount \
* - co_nlocals \
* - co_stacksize \
* - co_flags \
* - co_firstlineno \
* - co_consts \
* - co_names \
* - co_localsplusnames \
* This is done to preserve the name and line number for tracebacks \
* and debuggers; otherwise, constant de-duplication would collapse \
* identical functions/lambdas defined on different lines. \
*/ \
\
/* These fields are set with provided values on new code objects. */ \
\
/* The hottest fields (in the eval loop) are grouped here at the top. */ \
PyObject *co_consts; /* list (constants used) */ \
PyObject *co_names; /* list of strings (names used) */ \
PyObject *co_exceptiontable; /* Byte string encoding exception handling \
table */ \
int co_flags; /* CO_..., see below */ \
\
/* The rest are not so impactful on performance. */ \
int co_argcount; /* #arguments, except *args */ \
int co_posonlyargcount; /* #positional only arguments */ \
int co_kwonlyargcount; /* #keyword only arguments */ \
int co_stacksize; /* #entries needed for evaluation stack */ \
int co_firstlineno; /* first source line number */ \
\
/* redundant values (derived from co_localsplusnames and \
co_localspluskinds) */ \
int co_nlocalsplus; /* number of local + cell + free variables */ \
int co_framesize; /* Size of frame in words */ \
int co_nlocals; /* number of local variables */ \
int co_ncellvars; /* total number of cell variables */ \
int co_nfreevars; /* number of free variables */ \
uint32_t co_version; /* version number */ \
\
PyObject *co_localsplusnames; /* tuple mapping offsets to names */ \
PyObject *co_localspluskinds; /* Bytes mapping to local kinds (one byte \
per variable) */ \
PyObject *co_filename; /* unicode (where it was loaded from) */ \
PyObject *co_name; /* unicode (name, for reference) */ \
PyObject *co_qualname; /* unicode (qualname, for reference) */ \
PyObject *co_linetable; /* bytes object that holds location info */ \
PyObject *co_weakreflist; /* to support weakrefs to code objects */ \
_PyExecutorArray *co_executors; /* executors from optimizer */ \
_PyCoCached *_co_cached; /* cached co_* attributes */ \
uintptr_t _co_instrumentation_version; /* current instrumentation version */ \
_PyCoMonitoringData *_co_monitoring; /* Monitoring data */ \
int _co_firsttraceable; /* index of first traceable instruction */ \
/* Scratch space for extra data relating to the code object. \
Type is a void* to keep the format private in codeobject.c to force \
people to go through the proper APIs. */ \
void *co_extra; \
char co_code_adaptive[(SIZE)]; \
}
/* Bytecode object */
struct PyCodeObject _PyCode_DEF(1);
/* Masks for co_flags above */
#define CO_OPTIMIZED 0x0001
#define CO_NEWLOCALS 0x0002
#define CO_VARARGS 0x0004
#define CO_VARKEYWORDS 0x0008
#define CO_NESTED 0x0010
#define CO_GENERATOR 0x0020
/* The CO_COROUTINE flag is set for coroutine functions (defined with
``async def`` keywords) */
#define CO_COROUTINE 0x0080
#define CO_ITERABLE_COROUTINE 0x0100
#define CO_ASYNC_GENERATOR 0x0200
/* bpo-39562: These constant values are changed in Python 3.9
to prevent collision with compiler flags. CO_FUTURE_ and PyCF_
constants must be kept unique. PyCF_ constants can use bits from
0x0100 to 0x10000. CO_FUTURE_ constants use bits starting at 0x20000. */
#define CO_FUTURE_DIVISION 0x20000
#define CO_FUTURE_ABSOLUTE_IMPORT 0x40000 /* do absolute imports by default */
#define CO_FUTURE_WITH_STATEMENT 0x80000
#define CO_FUTURE_PRINT_FUNCTION 0x100000
#define CO_FUTURE_UNICODE_LITERALS 0x200000
#define CO_FUTURE_BARRY_AS_BDFL 0x400000
#define CO_FUTURE_GENERATOR_STOP 0x800000
#define CO_FUTURE_ANNOTATIONS 0x1000000
#define CO_NO_MONITORING_EVENTS 0x2000000
/* This should be defined if a future statement modifies the syntax.
For example, when a keyword is added.
*/
#define PY_PARSER_REQUIRES_FUTURE_KEYWORD
#define CO_MAXBLOCKS 21 /* Max static block nesting within a function */
PyAPI_DATA(PyTypeObject) PyCode_Type;
#define PyCode_Check(op) Py_IS_TYPE((op), &PyCode_Type)
static inline Py_ssize_t PyCode_GetNumFree(PyCodeObject *op) {
assert(PyCode_Check(op));
return op->co_nfreevars;
}
static inline int PyUnstable_Code_GetFirstFree(PyCodeObject *op) {
assert(PyCode_Check(op));
return op->co_nlocalsplus - op->co_nfreevars;
}
Py_DEPRECATED(3.13) static inline int PyCode_GetFirstFree(PyCodeObject *op) {
return PyUnstable_Code_GetFirstFree(op);
}
/* Unstable public interface */
PyAPI_FUNC(PyCodeObject *) PyUnstable_Code_New(
int, int, int, int, int, PyObject *, PyObject *,
PyObject *, PyObject *, PyObject *, PyObject *,
PyObject *, PyObject *, PyObject *, int, PyObject *,
PyObject *);
PyAPI_FUNC(PyCodeObject *) PyUnstable_Code_NewWithPosOnlyArgs(
int, int, int, int, int, int, PyObject *, PyObject *,
PyObject *, PyObject *, PyObject *, PyObject *,
PyObject *, PyObject *, PyObject *, int, PyObject *,
PyObject *);
/* same as struct above */
// Old names -- remove when this API changes:
_Py_DEPRECATED_EXTERNALLY(3.12) static inline PyCodeObject *
PyCode_New(
int a, int b, int c, int d, int e, PyObject *f, PyObject *g,
PyObject *h, PyObject *i, PyObject *j, PyObject *k,
PyObject *l, PyObject *m, PyObject *n, int o, PyObject *p,
PyObject *q)
{
return PyUnstable_Code_New(
a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q);
}
_Py_DEPRECATED_EXTERNALLY(3.12) static inline PyCodeObject *
PyCode_NewWithPosOnlyArgs(
int a, int poac, int b, int c, int d, int e, PyObject *f, PyObject *g,
PyObject *h, PyObject *i, PyObject *j, PyObject *k,
PyObject *l, PyObject *m, PyObject *n, int o, PyObject *p,
PyObject *q)
{
return PyUnstable_Code_NewWithPosOnlyArgs(
a, poac, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q);
}
/* Creates a new empty code object with the specified source location. */
PyAPI_FUNC(PyCodeObject *)
PyCode_NewEmpty(const char *filename, const char *funcname, int firstlineno);
/* Return the line number associated with the specified bytecode index
in this code object. If you just need the line number of a frame,
use PyFrame_GetLineNumber() instead. */
PyAPI_FUNC(int) PyCode_Addr2Line(PyCodeObject *, int);
PyAPI_FUNC(int) PyCode_Addr2Location(PyCodeObject *, int, int *, int *, int *, int *);
#define PY_FOREACH_CODE_EVENT(V) \
V(CREATE) \
V(DESTROY)
typedef enum {
#define PY_DEF_EVENT(op) PY_CODE_EVENT_##op,
PY_FOREACH_CODE_EVENT(PY_DEF_EVENT)
#undef PY_DEF_EVENT
} PyCodeEvent;
/*
* A callback that is invoked for different events in a code object's lifecycle.
*
* The callback is invoked with a borrowed reference to co, after it is
* created and before it is destroyed.
*
* If the callback sets an exception, it must return -1. Otherwise
* it should return 0.
*/
typedef int (*PyCode_WatchCallback)(
PyCodeEvent event,
PyCodeObject* co);
/*
* Register a per-interpreter callback that will be invoked for code object
* lifecycle events.
*
* Returns a handle that may be passed to PyCode_ClearWatcher on success,
* or -1 and sets an error if no more handles are available.
*/
PyAPI_FUNC(int) PyCode_AddWatcher(PyCode_WatchCallback callback);
/*
* Clear the watcher associated with the watcher_id handle.
*
* Returns 0 on success or -1 if no watcher exists for the provided id.
*/
PyAPI_FUNC(int) PyCode_ClearWatcher(int watcher_id);
/* for internal use only */
struct _opaque {
int computed_line;
const uint8_t *lo_next;
const uint8_t *limit;
};
typedef struct _line_offsets {
int ar_start;
int ar_end;
int ar_line;
struct _opaque opaque;
} PyCodeAddressRange;
/* Update *bounds to describe the first and one-past-the-last instructions in the
same line as lasti. Return the number of that line.
*/
PyAPI_FUNC(int) _PyCode_CheckLineNumber(int lasti, PyCodeAddressRange *bounds);
/* Create a comparable key used to compare constants taking in account the
* object type. It is used to make sure types are not coerced (e.g., float and
* complex) _and_ to distinguish 0.0 from -0.0 e.g. on IEEE platforms
*
* Return (type(obj), obj, ...): a tuple with variable size (at least 2 items)
* depending on the type and the value. The type is the first item to not
* compare bytes and str which can raise a BytesWarning exception. */
PyAPI_FUNC(PyObject*) _PyCode_ConstantKey(PyObject *obj);
PyAPI_FUNC(PyObject*) PyCode_Optimize(PyObject *code, PyObject* consts,
PyObject *names, PyObject *lnotab);
PyAPI_FUNC(int) PyUnstable_Code_GetExtra(
PyObject *code, Py_ssize_t index, void **extra);
PyAPI_FUNC(int) PyUnstable_Code_SetExtra(
PyObject *code, Py_ssize_t index, void *extra);
// Old names -- remove when this API changes:
_Py_DEPRECATED_EXTERNALLY(3.12) static inline int
_PyCode_GetExtra(PyObject *code, Py_ssize_t index, void **extra)
{
return PyUnstable_Code_GetExtra(code, index, extra);
}
_Py_DEPRECATED_EXTERNALLY(3.12) static inline int
_PyCode_SetExtra(PyObject *code, Py_ssize_t index, void *extra)
{
return PyUnstable_Code_SetExtra(code, index, extra);
}
/* Equivalent to getattr(code, 'co_code') in Python.
Returns a strong reference to a bytes object. */
PyAPI_FUNC(PyObject *) PyCode_GetCode(PyCodeObject *code);
/* Equivalent to getattr(code, 'co_varnames') in Python. */
PyAPI_FUNC(PyObject *) PyCode_GetVarnames(PyCodeObject *code);
/* Equivalent to getattr(code, 'co_cellvars') in Python. */
PyAPI_FUNC(PyObject *) PyCode_GetCellvars(PyCodeObject *code);
/* Equivalent to getattr(code, 'co_freevars') in Python. */
PyAPI_FUNC(PyObject *) PyCode_GetFreevars(PyCodeObject *code);
typedef enum _PyCodeLocationInfoKind {
/* short forms are 0 to 9 */
PY_CODE_LOCATION_INFO_SHORT0 = 0,
/* one lineforms are 10 to 12 */
PY_CODE_LOCATION_INFO_ONE_LINE0 = 10,
PY_CODE_LOCATION_INFO_ONE_LINE1 = 11,
PY_CODE_LOCATION_INFO_ONE_LINE2 = 12,
PY_CODE_LOCATION_INFO_NO_COLUMNS = 13,
PY_CODE_LOCATION_INFO_LONG = 14,
PY_CODE_LOCATION_INFO_NONE = 15
} _PyCodeLocationInfoKind;
#ifdef __cplusplus
}
#endif
#endif // !Py_CODE_H
#endif // !Py_LIMITED_API

View File

@@ -0,0 +1,50 @@
#ifndef Py_CPYTHON_COMPILE_H
# error "this header file must not be included directly"
#endif
/* Public interface */
#define PyCF_MASK (CO_FUTURE_DIVISION | CO_FUTURE_ABSOLUTE_IMPORT | \
CO_FUTURE_WITH_STATEMENT | CO_FUTURE_PRINT_FUNCTION | \
CO_FUTURE_UNICODE_LITERALS | CO_FUTURE_BARRY_AS_BDFL | \
CO_FUTURE_GENERATOR_STOP | CO_FUTURE_ANNOTATIONS)
#define PyCF_MASK_OBSOLETE (CO_NESTED)
/* bpo-39562: CO_FUTURE_ and PyCF_ constants must be kept unique.
PyCF_ constants can use bits from 0x0100 to 0x10000.
CO_FUTURE_ constants use bits starting at 0x20000. */
#define PyCF_SOURCE_IS_UTF8 0x0100
#define PyCF_DONT_IMPLY_DEDENT 0x0200
#define PyCF_ONLY_AST 0x0400
#define PyCF_IGNORE_COOKIE 0x0800
#define PyCF_TYPE_COMMENTS 0x1000
#define PyCF_ALLOW_TOP_LEVEL_AWAIT 0x2000
#define PyCF_ALLOW_INCOMPLETE_INPUT 0x4000
#define PyCF_OPTIMIZED_AST (0x8000 | PyCF_ONLY_AST)
#define PyCF_COMPILE_MASK (PyCF_ONLY_AST | PyCF_ALLOW_TOP_LEVEL_AWAIT | \
PyCF_TYPE_COMMENTS | PyCF_DONT_IMPLY_DEDENT | \
PyCF_ALLOW_INCOMPLETE_INPUT | PyCF_OPTIMIZED_AST)
typedef struct {
int cf_flags; /* bitmask of CO_xxx flags relevant to future */
int cf_feature_version; /* minor Python version (PyCF_ONLY_AST) */
} PyCompilerFlags;
#define _PyCompilerFlags_INIT \
(PyCompilerFlags){.cf_flags = 0, .cf_feature_version = PY_MINOR_VERSION}
/* Future feature support */
#define FUTURE_NESTED_SCOPES "nested_scopes"
#define FUTURE_GENERATORS "generators"
#define FUTURE_DIVISION "division"
#define FUTURE_ABSOLUTE_IMPORT "absolute_import"
#define FUTURE_WITH_STATEMENT "with_statement"
#define FUTURE_PRINT_FUNCTION "print_function"
#define FUTURE_UNICODE_LITERALS "unicode_literals"
#define FUTURE_BARRY_AS_BDFL "barry_as_FLUFL"
#define FUTURE_GENERATOR_STOP "generator_stop"
#define FUTURE_ANNOTATIONS "annotations"
#define PY_INVALID_STACK_EFFECT INT_MAX
PyAPI_FUNC(int) PyCompile_OpcodeStackEffect(int opcode, int oparg);
PyAPI_FUNC(int) PyCompile_OpcodeStackEffectWithJump(int opcode, int oparg, int jump);

View File

@@ -0,0 +1,33 @@
#ifndef Py_CPYTHON_COMPLEXOBJECT_H
# error "this header file must not be included directly"
#endif
typedef struct {
double real;
double imag;
} Py_complex;
// Operations on complex numbers.
PyAPI_FUNC(Py_complex) _Py_c_sum(Py_complex, Py_complex);
PyAPI_FUNC(Py_complex) _Py_c_diff(Py_complex, Py_complex);
PyAPI_FUNC(Py_complex) _Py_c_neg(Py_complex);
PyAPI_FUNC(Py_complex) _Py_c_prod(Py_complex, Py_complex);
PyAPI_FUNC(Py_complex) _Py_c_quot(Py_complex, Py_complex);
PyAPI_FUNC(Py_complex) _Py_c_pow(Py_complex, Py_complex);
PyAPI_FUNC(double) _Py_c_abs(Py_complex);
/* Complex object interface */
/*
PyComplexObject represents a complex number with double-precision
real and imaginary parts.
*/
typedef struct {
PyObject_HEAD
Py_complex cval;
} PyComplexObject;
PyAPI_FUNC(PyObject *) PyComplex_FromCComplex(Py_complex);
PyAPI_FUNC(Py_complex) PyComplex_AsCComplex(PyObject *op);

View File

@@ -0,0 +1,74 @@
#ifndef Py_LIMITED_API
#ifndef Py_CONTEXT_H
#define Py_CONTEXT_H
#ifdef __cplusplus
extern "C" {
#endif
PyAPI_DATA(PyTypeObject) PyContext_Type;
typedef struct _pycontextobject PyContext;
PyAPI_DATA(PyTypeObject) PyContextVar_Type;
typedef struct _pycontextvarobject PyContextVar;
PyAPI_DATA(PyTypeObject) PyContextToken_Type;
typedef struct _pycontexttokenobject PyContextToken;
#define PyContext_CheckExact(o) Py_IS_TYPE((o), &PyContext_Type)
#define PyContextVar_CheckExact(o) Py_IS_TYPE((o), &PyContextVar_Type)
#define PyContextToken_CheckExact(o) Py_IS_TYPE((o), &PyContextToken_Type)
PyAPI_FUNC(PyObject *) PyContext_New(void);
PyAPI_FUNC(PyObject *) PyContext_Copy(PyObject *);
PyAPI_FUNC(PyObject *) PyContext_CopyCurrent(void);
PyAPI_FUNC(int) PyContext_Enter(PyObject *);
PyAPI_FUNC(int) PyContext_Exit(PyObject *);
/* Create a new context variable.
default_value can be NULL.
*/
PyAPI_FUNC(PyObject *) PyContextVar_New(
const char *name, PyObject *default_value);
/* Get a value for the variable.
Returns -1 if an error occurred during lookup.
Returns 0 if value either was or was not found.
If value was found, *value will point to it.
If not, it will point to:
- default_value, if not NULL;
- the default value of "var", if not NULL;
- NULL.
'*value' will be a new ref, if not NULL.
*/
PyAPI_FUNC(int) PyContextVar_Get(
PyObject *var, PyObject *default_value, PyObject **value);
/* Set a new value for the variable.
Returns NULL if an error occurs.
*/
PyAPI_FUNC(PyObject *) PyContextVar_Set(PyObject *var, PyObject *value);
/* Reset a variable to its previous value.
Returns 0 on success, -1 on error.
*/
PyAPI_FUNC(int) PyContextVar_Reset(PyObject *var, PyObject *token);
#ifdef __cplusplus
}
#endif
#endif /* !Py_CONTEXT_H */
#endif /* !Py_LIMITED_API */

View File

@@ -0,0 +1,134 @@
#ifndef Py_CPYTHON_CRITICAL_SECTION_H
# error "this header file must not be included directly"
#endif
// Python critical sections
//
// Conceptually, critical sections are a deadlock avoidance layer on top of
// per-object locks. These helpers, in combination with those locks, replace
// our usage of the global interpreter lock to provide thread-safety for
// otherwise thread-unsafe objects, such as dict.
//
// NOTE: These APIs are no-ops in non-free-threaded builds.
//
// Straightforward per-object locking could introduce deadlocks that were not
// present when running with the GIL. Threads may hold locks for multiple
// objects simultaneously because Python operations can nest. If threads were
// to acquire the same locks in different orders, they would deadlock.
//
// One way to avoid deadlocks is to allow threads to hold only the lock (or
// locks) for a single operation at a time (typically a single lock, but some
// operations involve two locks). When a thread begins a nested operation it
// could suspend the locks for any outer operation: before beginning the nested
// operation, the locks for the outer operation are released and when the
// nested operation completes, the locks for the outer operation are
// reacquired.
//
// To improve performance, this API uses a variation of the above scheme.
// Instead of immediately suspending locks any time a nested operation begins,
// locks are only suspended if the thread would block. This reduces the number
// of lock acquisitions and releases for nested operations, while still
// avoiding deadlocks.
//
// Additionally, the locks for any active operation are suspended around
// other potentially blocking operations, such as I/O. This is because the
// interaction between locks and blocking operations can lead to deadlocks in
// the same way as the interaction between multiple locks.
//
// Each thread's critical sections and their corresponding locks are tracked in
// a stack in `PyThreadState.critical_section`. When a thread calls
// `_PyThreadState_Detach()`, such as before a blocking I/O operation or when
// waiting to acquire a lock, the thread suspends all of its active critical
// sections, temporarily releasing the associated locks. When the thread calls
// `_PyThreadState_Attach()`, it resumes the top-most (i.e., most recent)
// critical section by reacquiring the associated lock or locks. See
// `_PyCriticalSection_Resume()`.
//
// NOTE: Only the top-most critical section is guaranteed to be active.
// Operations that need to lock two objects at once must use
// `Py_BEGIN_CRITICAL_SECTION2()`. You *CANNOT* use nested critical sections
// to lock more than one object at once, because the inner critical section
// may suspend the outer critical sections. This API does not provide a way
// to lock more than two objects at once (though it could be added later
// if actually needed).
//
// NOTE: Critical sections implicitly behave like reentrant locks because
// attempting to acquire the same lock will suspend any outer (earlier)
// critical sections. However, they are less efficient for this use case than
// purposefully designed reentrant locks.
//
// Example usage:
// Py_BEGIN_CRITICAL_SECTION(op);
// ...
// Py_END_CRITICAL_SECTION();
//
// To lock two objects at once:
// Py_BEGIN_CRITICAL_SECTION2(op1, op2);
// ...
// Py_END_CRITICAL_SECTION2();
typedef struct PyCriticalSection PyCriticalSection;
typedef struct PyCriticalSection2 PyCriticalSection2;
PyAPI_FUNC(void)
PyCriticalSection_Begin(PyCriticalSection *c, PyObject *op);
PyAPI_FUNC(void)
PyCriticalSection_End(PyCriticalSection *c);
PyAPI_FUNC(void)
PyCriticalSection2_Begin(PyCriticalSection2 *c, PyObject *a, PyObject *b);
PyAPI_FUNC(void)
PyCriticalSection2_End(PyCriticalSection2 *c);
#ifndef Py_GIL_DISABLED
# define Py_BEGIN_CRITICAL_SECTION(op) \
{
# define Py_END_CRITICAL_SECTION() \
}
# define Py_BEGIN_CRITICAL_SECTION2(a, b) \
{
# define Py_END_CRITICAL_SECTION2() \
}
#else /* !Py_GIL_DISABLED */
// NOTE: the contents of this struct are private and may change betweeen
// Python releases without a deprecation period.
struct PyCriticalSection {
// Tagged pointer to an outer active critical section (or 0).
uintptr_t _cs_prev;
// Mutex used to protect critical section
PyMutex *_cs_mutex;
};
// A critical section protected by two mutexes. Use
// Py_BEGIN_CRITICAL_SECTION2 and Py_END_CRITICAL_SECTION2.
// NOTE: the contents of this struct are private and may change betweeen
// Python releases without a deprecation period.
struct PyCriticalSection2 {
PyCriticalSection _cs_base;
PyMutex *_cs_mutex2;
};
# define Py_BEGIN_CRITICAL_SECTION(op) \
{ \
PyCriticalSection _py_cs; \
PyCriticalSection_Begin(&_py_cs, _PyObject_CAST(op))
# define Py_END_CRITICAL_SECTION() \
PyCriticalSection_End(&_py_cs); \
}
# define Py_BEGIN_CRITICAL_SECTION2(a, b) \
{ \
PyCriticalSection2 _py_cs2; \
PyCriticalSection2_Begin(&_py_cs2, _PyObject_CAST(a), _PyObject_CAST(b))
# define Py_END_CRITICAL_SECTION2() \
PyCriticalSection2_End(&_py_cs2); \
}
#endif

View File

@@ -0,0 +1,62 @@
#ifndef Py_CPYTHON_DESCROBJECT_H
# error "this header file must not be included directly"
#endif
typedef PyObject *(*wrapperfunc)(PyObject *self, PyObject *args,
void *wrapped);
typedef PyObject *(*wrapperfunc_kwds)(PyObject *self, PyObject *args,
void *wrapped, PyObject *kwds);
struct wrapperbase {
const char *name;
int offset;
void *function;
wrapperfunc wrapper;
const char *doc;
int flags;
PyObject *name_strobj;
};
/* Flags for above struct */
#define PyWrapperFlag_KEYWORDS 1 /* wrapper function takes keyword args */
/* Various kinds of descriptor objects */
typedef struct {
PyObject_HEAD
PyTypeObject *d_type;
PyObject *d_name;
PyObject *d_qualname;
} PyDescrObject;
#define PyDescr_COMMON PyDescrObject d_common
#define PyDescr_TYPE(x) (((PyDescrObject *)(x))->d_type)
#define PyDescr_NAME(x) (((PyDescrObject *)(x))->d_name)
typedef struct {
PyDescr_COMMON;
PyMethodDef *d_method;
vectorcallfunc vectorcall;
} PyMethodDescrObject;
typedef struct {
PyDescr_COMMON;
PyMemberDef *d_member;
} PyMemberDescrObject;
typedef struct {
PyDescr_COMMON;
PyGetSetDef *d_getset;
} PyGetSetDescrObject;
typedef struct {
PyDescr_COMMON;
struct wrapperbase *d_base;
void *d_wrapped; /* This can be any function pointer */
} PyWrapperDescrObject;
PyAPI_FUNC(PyObject *) PyDescr_NewWrapper(PyTypeObject *,
struct wrapperbase *, void *);
PyAPI_FUNC(int) PyDescr_IsData(PyObject *);

View File

@@ -0,0 +1,102 @@
#ifndef Py_CPYTHON_DICTOBJECT_H
# error "this header file must not be included directly"
#endif
typedef struct _dictkeysobject PyDictKeysObject;
typedef struct _dictvalues PyDictValues;
/* The ma_values pointer is NULL for a combined table
* or points to an array of PyObject* for a split table
*/
typedef struct {
PyObject_HEAD
/* Number of items in the dictionary */
Py_ssize_t ma_used;
/* Dictionary version: globally unique, value change each time
the dictionary is modified */
#ifdef Py_BUILD_CORE
/* Bits 0-7 are for dict watchers.
* Bits 8-11 are for the watched mutation counter (used by tier2 optimization)
* The remaining bits (12-63) are the actual version tag. */
uint64_t ma_version_tag;
#else
Py_DEPRECATED(3.12) uint64_t ma_version_tag;
#endif
PyDictKeysObject *ma_keys;
/* If ma_values is NULL, the table is "combined": keys and values
are stored in ma_keys.
If ma_values is not NULL, the table is split:
keys are stored in ma_keys and values are stored in ma_values */
PyDictValues *ma_values;
} PyDictObject;
PyAPI_FUNC(PyObject *) _PyDict_GetItem_KnownHash(PyObject *mp, PyObject *key,
Py_hash_t hash);
PyAPI_FUNC(PyObject *) _PyDict_GetItemStringWithError(PyObject *, const char *);
PyAPI_FUNC(PyObject *) PyDict_SetDefault(
PyObject *mp, PyObject *key, PyObject *defaultobj);
// Inserts `key` with a value `default_value`, if `key` is not already present
// in the dictionary. If `result` is not NULL, then the value associated
// with `key` is returned in `*result` (either the existing value, or the now
// inserted `default_value`).
// Returns:
// -1 on error
// 0 if `key` was not present and `default_value` was inserted
// 1 if `key` was present and `default_value` was not inserted
PyAPI_FUNC(int) PyDict_SetDefaultRef(PyObject *mp, PyObject *key, PyObject *default_value, PyObject **result);
/* Get the number of items of a dictionary. */
static inline Py_ssize_t PyDict_GET_SIZE(PyObject *op) {
PyDictObject *mp;
assert(PyDict_Check(op));
mp = _Py_CAST(PyDictObject*, op);
#ifdef Py_GIL_DISABLED
return _Py_atomic_load_ssize_relaxed(&mp->ma_used);
#else
return mp->ma_used;
#endif
}
#define PyDict_GET_SIZE(op) PyDict_GET_SIZE(_PyObject_CAST(op))
PyAPI_FUNC(int) PyDict_ContainsString(PyObject *mp, const char *key);
PyAPI_FUNC(PyObject *) _PyDict_NewPresized(Py_ssize_t minused);
PyAPI_FUNC(int) PyDict_Pop(PyObject *dict, PyObject *key, PyObject **result);
PyAPI_FUNC(int) PyDict_PopString(PyObject *dict, const char *key, PyObject **result);
PyAPI_FUNC(PyObject *) _PyDict_Pop(PyObject *dict, PyObject *key, PyObject *default_value);
/* Dictionary watchers */
#define PY_FOREACH_DICT_EVENT(V) \
V(ADDED) \
V(MODIFIED) \
V(DELETED) \
V(CLONED) \
V(CLEARED) \
V(DEALLOCATED)
typedef enum {
#define PY_DEF_EVENT(EVENT) PyDict_EVENT_##EVENT,
PY_FOREACH_DICT_EVENT(PY_DEF_EVENT)
#undef PY_DEF_EVENT
} PyDict_WatchEvent;
// Callback to be invoked when a watched dict is cleared, dealloced, or modified.
// In clear/dealloc case, key and new_value will be NULL. Otherwise, new_value will be the
// new value for key, NULL if key is being deleted.
typedef int(*PyDict_WatchCallback)(PyDict_WatchEvent event, PyObject* dict, PyObject* key, PyObject* new_value);
// Register/unregister a dict-watcher callback
PyAPI_FUNC(int) PyDict_AddWatcher(PyDict_WatchCallback callback);
PyAPI_FUNC(int) PyDict_ClearWatcher(int watcher_id);
// Mark given dictionary as "watched" (callback will be called if it is modified)
PyAPI_FUNC(int) PyDict_Watch(int watcher_id, PyObject* dict);
PyAPI_FUNC(int) PyDict_Unwatch(int watcher_id, PyObject* dict);

View File

@@ -0,0 +1,16 @@
#ifndef Py_CPYTHON_FILEOBJECT_H
# error "this header file must not be included directly"
#endif
PyAPI_FUNC(char *) Py_UniversalNewlineFgets(char *, int, FILE*, PyObject *);
/* The std printer acts as a preliminary sys.stderr until the new io
infrastructure is in place. */
PyAPI_FUNC(PyObject *) PyFile_NewStdPrinter(int);
PyAPI_DATA(PyTypeObject) PyStdPrinter_Type;
typedef PyObject * (*Py_OpenCodeHookFunction)(PyObject *, void *);
PyAPI_FUNC(PyObject *) PyFile_OpenCode(const char *utf8path);
PyAPI_FUNC(PyObject *) PyFile_OpenCodeObject(PyObject *path);
PyAPI_FUNC(int) PyFile_SetOpenCodeHook(Py_OpenCodeHookFunction hook, void *userData);

View File

@@ -0,0 +1,8 @@
#ifndef Py_CPYTHON_FILEUTILS_H
# error "this header file must not be included directly"
#endif
// Used by _testcapi which must not use the internal C API
PyAPI_FUNC(FILE*) _Py_fopen_obj(
PyObject *path,
const char *mode);

View File

@@ -0,0 +1,27 @@
#ifndef Py_CPYTHON_FLOATOBJECT_H
# error "this header file must not be included directly"
#endif
typedef struct {
PyObject_HEAD
double ob_fval;
} PyFloatObject;
#define _PyFloat_CAST(op) \
(assert(PyFloat_Check(op)), _Py_CAST(PyFloatObject*, op))
// Static inline version of PyFloat_AsDouble() trading safety for speed.
// It doesn't check if op is a double object.
static inline double PyFloat_AS_DOUBLE(PyObject *op) {
return _PyFloat_CAST(op)->ob_fval;
}
#define PyFloat_AS_DOUBLE(op) PyFloat_AS_DOUBLE(_PyObject_CAST(op))
PyAPI_FUNC(int) PyFloat_Pack2(double x, char *p, int le);
PyAPI_FUNC(int) PyFloat_Pack4(double x, char *p, int le);
PyAPI_FUNC(int) PyFloat_Pack8(double x, char *p, int le);
PyAPI_FUNC(double) PyFloat_Unpack2(const char *p, int le);
PyAPI_FUNC(double) PyFloat_Unpack4(const char *p, int le);
PyAPI_FUNC(double) PyFloat_Unpack8(const char *p, int le);

View File

@@ -0,0 +1,35 @@
/* Frame object interface */
#ifndef Py_CPYTHON_FRAMEOBJECT_H
# error "this header file must not be included directly"
#endif
/* Standard object interface */
PyAPI_FUNC(PyFrameObject *) PyFrame_New(PyThreadState *, PyCodeObject *,
PyObject *, PyObject *);
/* The rest of the interface is specific for frame objects */
/* Conversions between "fast locals" and locals in dictionary */
PyAPI_FUNC(void) PyFrame_LocalsToFast(PyFrameObject *, int);
/* -- Caveat emptor --
* The concept of entry frames is an implementation detail of the CPython
* interpreter. This API is considered unstable and is provided for the
* convenience of debuggers, profilers and state-inspecting tools. Notice that
* this API can be changed in future minor versions if the underlying frame
* mechanism change or the concept of an 'entry frame' or its semantics becomes
* obsolete or outdated. */
PyAPI_FUNC(int) _PyFrame_IsEntryFrame(PyFrameObject *frame);
PyAPI_FUNC(int) PyFrame_FastToLocalsWithError(PyFrameObject *f);
PyAPI_FUNC(void) PyFrame_FastToLocals(PyFrameObject *);
typedef struct {
PyObject_HEAD
PyFrameObject* frame;
} PyFrameLocalsProxyObject;

View File

@@ -0,0 +1,184 @@
/* Function object interface */
#ifndef Py_LIMITED_API
#ifndef Py_FUNCOBJECT_H
#define Py_FUNCOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#define _Py_COMMON_FIELDS(PREFIX) \
PyObject *PREFIX ## globals; \
PyObject *PREFIX ## builtins; \
PyObject *PREFIX ## name; \
PyObject *PREFIX ## qualname; \
PyObject *PREFIX ## code; /* A code object, the __code__ attribute */ \
PyObject *PREFIX ## defaults; /* NULL or a tuple */ \
PyObject *PREFIX ## kwdefaults; /* NULL or a dict */ \
PyObject *PREFIX ## closure; /* NULL or a tuple of cell objects */
typedef struct {
_Py_COMMON_FIELDS(fc_)
} PyFrameConstructor;
/* Function objects and code objects should not be confused with each other:
*
* Function objects are created by the execution of the 'def' statement.
* They reference a code object in their __code__ attribute, which is a
* purely syntactic object, i.e. nothing more than a compiled version of some
* source code lines. There is one code object per source code "fragment",
* but each code object can be referenced by zero or many function objects
* depending only on how many times the 'def' statement in the source was
* executed so far.
*/
typedef struct {
PyObject_HEAD
_Py_COMMON_FIELDS(func_)
PyObject *func_doc; /* The __doc__ attribute, can be anything */
PyObject *func_dict; /* The __dict__ attribute, a dict or NULL */
PyObject *func_weakreflist; /* List of weak references */
PyObject *func_module; /* The __module__ attribute, can be anything */
PyObject *func_annotations; /* Annotations, a dict or NULL */
PyObject *func_typeparams; /* Tuple of active type variables or NULL */
vectorcallfunc vectorcall;
/* Version number for use by specializer.
* Can set to non-zero when we want to specialize.
* Will be set to zero if any of these change:
* defaults
* kwdefaults (only if the object changes, not the contents of the dict)
* code
* annotations
* vectorcall function pointer */
uint32_t func_version;
/* Invariant:
* func_closure contains the bindings for func_code->co_freevars, so
* PyTuple_Size(func_closure) == PyCode_GetNumFree(func_code)
* (func_closure may be NULL if PyCode_GetNumFree(func_code) == 0).
*/
} PyFunctionObject;
#undef _Py_COMMON_FIELDS
PyAPI_DATA(PyTypeObject) PyFunction_Type;
#define PyFunction_Check(op) Py_IS_TYPE((op), &PyFunction_Type)
PyAPI_FUNC(PyObject *) PyFunction_New(PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_NewWithQualName(PyObject *, PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_GetCode(PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_GetGlobals(PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_GetModule(PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_GetDefaults(PyObject *);
PyAPI_FUNC(int) PyFunction_SetDefaults(PyObject *, PyObject *);
PyAPI_FUNC(void) PyFunction_SetVectorcall(PyFunctionObject *, vectorcallfunc);
PyAPI_FUNC(PyObject *) PyFunction_GetKwDefaults(PyObject *);
PyAPI_FUNC(int) PyFunction_SetKwDefaults(PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_GetClosure(PyObject *);
PyAPI_FUNC(int) PyFunction_SetClosure(PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyFunction_GetAnnotations(PyObject *);
PyAPI_FUNC(int) PyFunction_SetAnnotations(PyObject *, PyObject *);
#define _PyFunction_CAST(func) \
(assert(PyFunction_Check(func)), _Py_CAST(PyFunctionObject*, func))
/* Static inline functions for direct access to these values.
Type checks are *not* done, so use with care. */
static inline PyObject* PyFunction_GET_CODE(PyObject *func) {
return _PyFunction_CAST(func)->func_code;
}
#define PyFunction_GET_CODE(func) PyFunction_GET_CODE(_PyObject_CAST(func))
static inline PyObject* PyFunction_GET_GLOBALS(PyObject *func) {
return _PyFunction_CAST(func)->func_globals;
}
#define PyFunction_GET_GLOBALS(func) PyFunction_GET_GLOBALS(_PyObject_CAST(func))
static inline PyObject* PyFunction_GET_MODULE(PyObject *func) {
return _PyFunction_CAST(func)->func_module;
}
#define PyFunction_GET_MODULE(func) PyFunction_GET_MODULE(_PyObject_CAST(func))
static inline PyObject* PyFunction_GET_DEFAULTS(PyObject *func) {
return _PyFunction_CAST(func)->func_defaults;
}
#define PyFunction_GET_DEFAULTS(func) PyFunction_GET_DEFAULTS(_PyObject_CAST(func))
static inline PyObject* PyFunction_GET_KW_DEFAULTS(PyObject *func) {
return _PyFunction_CAST(func)->func_kwdefaults;
}
#define PyFunction_GET_KW_DEFAULTS(func) PyFunction_GET_KW_DEFAULTS(_PyObject_CAST(func))
static inline PyObject* PyFunction_GET_CLOSURE(PyObject *func) {
return _PyFunction_CAST(func)->func_closure;
}
#define PyFunction_GET_CLOSURE(func) PyFunction_GET_CLOSURE(_PyObject_CAST(func))
static inline PyObject* PyFunction_GET_ANNOTATIONS(PyObject *func) {
return _PyFunction_CAST(func)->func_annotations;
}
#define PyFunction_GET_ANNOTATIONS(func) PyFunction_GET_ANNOTATIONS(_PyObject_CAST(func))
/* The classmethod and staticmethod types lives here, too */
PyAPI_DATA(PyTypeObject) PyClassMethod_Type;
PyAPI_DATA(PyTypeObject) PyStaticMethod_Type;
PyAPI_FUNC(PyObject *) PyClassMethod_New(PyObject *);
PyAPI_FUNC(PyObject *) PyStaticMethod_New(PyObject *);
#define PY_FOREACH_FUNC_EVENT(V) \
V(CREATE) \
V(DESTROY) \
V(MODIFY_CODE) \
V(MODIFY_DEFAULTS) \
V(MODIFY_KWDEFAULTS)
typedef enum {
#define PY_DEF_EVENT(EVENT) PyFunction_EVENT_##EVENT,
PY_FOREACH_FUNC_EVENT(PY_DEF_EVENT)
#undef PY_DEF_EVENT
} PyFunction_WatchEvent;
/*
* A callback that is invoked for different events in a function's lifecycle.
*
* The callback is invoked with a borrowed reference to func, after it is
* created and before it is modified or destroyed. The callback should not
* modify func.
*
* When a function's code object, defaults, or kwdefaults are modified the
* callback will be invoked with the respective event and new_value will
* contain a borrowed reference to the new value that is about to be stored in
* the function. Otherwise the third argument is NULL.
*
* If the callback returns with an exception set, it must return -1. Otherwise
* it should return 0.
*/
typedef int (*PyFunction_WatchCallback)(
PyFunction_WatchEvent event,
PyFunctionObject *func,
PyObject *new_value);
/*
* Register a per-interpreter callback that will be invoked for function lifecycle
* events.
*
* Returns a handle that may be passed to PyFunction_ClearWatcher on success,
* or -1 and sets an error if no more handles are available.
*/
PyAPI_FUNC(int) PyFunction_AddWatcher(PyFunction_WatchCallback callback);
/*
* Clear the watcher associated with the watcher_id handle.
*
* Returns 0 on success or -1 if no watcher exists for the supplied id.
*/
PyAPI_FUNC(int) PyFunction_ClearWatcher(int watcher_id);
#ifdef __cplusplus
}
#endif
#endif /* !Py_FUNCOBJECT_H */
#endif /* Py_LIMITED_API */

View File

@@ -0,0 +1,83 @@
/* Generator object interface */
#ifndef Py_LIMITED_API
#ifndef Py_GENOBJECT_H
#define Py_GENOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
/* --- Generators --------------------------------------------------------- */
/* _PyGenObject_HEAD defines the initial segment of generator
and coroutine objects. */
#define _PyGenObject_HEAD(prefix) \
PyObject_HEAD \
/* List of weak reference. */ \
PyObject *prefix##_weakreflist; \
/* Name of the generator. */ \
PyObject *prefix##_name; \
/* Qualified name of the generator. */ \
PyObject *prefix##_qualname; \
_PyErr_StackItem prefix##_exc_state; \
PyObject *prefix##_origin_or_finalizer; \
char prefix##_hooks_inited; \
char prefix##_closed; \
char prefix##_running_async; \
/* The frame */ \
int8_t prefix##_frame_state; \
PyObject *prefix##_iframe[1]; \
typedef struct {
/* The gi_ prefix is intended to remind of generator-iterator. */
_PyGenObject_HEAD(gi)
} PyGenObject;
PyAPI_DATA(PyTypeObject) PyGen_Type;
#define PyGen_Check(op) PyObject_TypeCheck((op), &PyGen_Type)
#define PyGen_CheckExact(op) Py_IS_TYPE((op), &PyGen_Type)
PyAPI_FUNC(PyObject *) PyGen_New(PyFrameObject *);
PyAPI_FUNC(PyObject *) PyGen_NewWithQualName(PyFrameObject *,
PyObject *name, PyObject *qualname);
PyAPI_FUNC(PyCodeObject *) PyGen_GetCode(PyGenObject *gen);
/* --- PyCoroObject ------------------------------------------------------- */
typedef struct {
_PyGenObject_HEAD(cr)
} PyCoroObject;
PyAPI_DATA(PyTypeObject) PyCoro_Type;
#define PyCoro_CheckExact(op) Py_IS_TYPE((op), &PyCoro_Type)
PyAPI_FUNC(PyObject *) PyCoro_New(PyFrameObject *,
PyObject *name, PyObject *qualname);
/* --- Asynchronous Generators -------------------------------------------- */
typedef struct {
_PyGenObject_HEAD(ag)
} PyAsyncGenObject;
PyAPI_DATA(PyTypeObject) PyAsyncGen_Type;
PyAPI_DATA(PyTypeObject) _PyAsyncGenASend_Type;
PyAPI_FUNC(PyObject *) PyAsyncGen_New(PyFrameObject *,
PyObject *name, PyObject *qualname);
#define PyAsyncGen_CheckExact(op) Py_IS_TYPE((op), &PyAsyncGen_Type)
#define PyAsyncGenASend_CheckExact(op) Py_IS_TYPE((op), &_PyAsyncGenASend_Type)
#undef _PyGenObject_HEAD
#ifdef __cplusplus
}
#endif
#endif /* !Py_GENOBJECT_H */
#endif /* Py_LIMITED_API */

View File

@@ -0,0 +1,25 @@
#ifndef Py_CPYTHON_IMPORT_H
# error "this header file must not be included directly"
#endif
PyMODINIT_FUNC PyInit__imp(void);
struct _inittab {
const char *name; /* ASCII encoded string */
PyObject* (*initfunc)(void);
};
// This is not used after Py_Initialize() is called.
PyAPI_DATA(struct _inittab *) PyImport_Inittab;
PyAPI_FUNC(int) PyImport_ExtendInittab(struct _inittab *newtab);
struct _frozen {
const char *name; /* ASCII encoded string */
const unsigned char *code;
int size;
int is_package;
};
/* Embedding apps may change this pointer to point to their favorite
collection of frozen modules: */
PyAPI_DATA(const struct _frozen *) PyImport_FrozenModules;

View File

@@ -0,0 +1,274 @@
#ifndef Py_PYCORECONFIG_H
#define Py_PYCORECONFIG_H
#ifndef Py_LIMITED_API
#ifdef __cplusplus
extern "C" {
#endif
/* --- PyStatus ----------------------------------------------- */
typedef struct {
enum {
_PyStatus_TYPE_OK=0,
_PyStatus_TYPE_ERROR=1,
_PyStatus_TYPE_EXIT=2
} _type;
const char *func;
const char *err_msg;
int exitcode;
} PyStatus;
PyAPI_FUNC(PyStatus) PyStatus_Ok(void);
PyAPI_FUNC(PyStatus) PyStatus_Error(const char *err_msg);
PyAPI_FUNC(PyStatus) PyStatus_NoMemory(void);
PyAPI_FUNC(PyStatus) PyStatus_Exit(int exitcode);
PyAPI_FUNC(int) PyStatus_IsError(PyStatus err);
PyAPI_FUNC(int) PyStatus_IsExit(PyStatus err);
PyAPI_FUNC(int) PyStatus_Exception(PyStatus err);
/* --- PyWideStringList ------------------------------------------------ */
typedef struct {
/* If length is greater than zero, items must be non-NULL
and all items strings must be non-NULL */
Py_ssize_t length;
wchar_t **items;
} PyWideStringList;
PyAPI_FUNC(PyStatus) PyWideStringList_Append(PyWideStringList *list,
const wchar_t *item);
PyAPI_FUNC(PyStatus) PyWideStringList_Insert(PyWideStringList *list,
Py_ssize_t index,
const wchar_t *item);
/* --- PyPreConfig ----------------------------------------------- */
typedef struct PyPreConfig {
int _config_init; /* _PyConfigInitEnum value */
/* Parse Py_PreInitializeFromBytesArgs() arguments?
See PyConfig.parse_argv */
int parse_argv;
/* If greater than 0, enable isolated mode: sys.path contains
neither the script's directory nor the user's site-packages directory.
Set to 1 by the -I command line option. If set to -1 (default), inherit
Py_IsolatedFlag value. */
int isolated;
/* If greater than 0: use environment variables.
Set to 0 by -E command line option. If set to -1 (default), it is
set to !Py_IgnoreEnvironmentFlag. */
int use_environment;
/* Set the LC_CTYPE locale to the user preferred locale? If equals to 0,
set coerce_c_locale and coerce_c_locale_warn to 0. */
int configure_locale;
/* Coerce the LC_CTYPE locale if it's equal to "C"? (PEP 538)
Set to 0 by PYTHONCOERCECLOCALE=0. Set to 1 by PYTHONCOERCECLOCALE=1.
Set to 2 if the user preferred LC_CTYPE locale is "C".
If it is equal to 1, LC_CTYPE locale is read to decide if it should be
coerced or not (ex: PYTHONCOERCECLOCALE=1). Internally, it is set to 2
if the LC_CTYPE locale must be coerced.
Disable by default (set to 0). Set it to -1 to let Python decide if it
should be enabled or not. */
int coerce_c_locale;
/* Emit a warning if the LC_CTYPE locale is coerced?
Set to 1 by PYTHONCOERCECLOCALE=warn.
Disable by default (set to 0). Set it to -1 to let Python decide if it
should be enabled or not. */
int coerce_c_locale_warn;
#ifdef MS_WINDOWS
/* If greater than 1, use the "mbcs" encoding instead of the UTF-8
encoding for the filesystem encoding.
Set to 1 if the PYTHONLEGACYWINDOWSFSENCODING environment variable is
set to a non-empty string. If set to -1 (default), inherit
Py_LegacyWindowsFSEncodingFlag value.
See PEP 529 for more details. */
int legacy_windows_fs_encoding;
#endif
/* Enable UTF-8 mode? (PEP 540)
Disabled by default (equals to 0).
Set to 1 by "-X utf8" and "-X utf8=1" command line options.
Set to 1 by PYTHONUTF8=1 environment variable.
Set to 0 by "-X utf8=0" and PYTHONUTF8=0.
If equals to -1, it is set to 1 if the LC_CTYPE locale is "C" or
"POSIX", otherwise it is set to 0. Inherit Py_UTF8Mode value value. */
int utf8_mode;
/* If non-zero, enable the Python Development Mode.
Set to 1 by the -X dev command line option. Set by the PYTHONDEVMODE
environment variable. */
int dev_mode;
/* Memory allocator: PYTHONMALLOC env var.
See PyMemAllocatorName for valid values. */
int allocator;
} PyPreConfig;
PyAPI_FUNC(void) PyPreConfig_InitPythonConfig(PyPreConfig *config);
PyAPI_FUNC(void) PyPreConfig_InitIsolatedConfig(PyPreConfig *config);
/* --- PyConfig ---------------------------------------------- */
/* This structure is best documented in the Doc/c-api/init_config.rst file. */
typedef struct PyConfig {
int _config_init; /* _PyConfigInitEnum value */
int isolated;
int use_environment;
int dev_mode;
int install_signal_handlers;
int use_hash_seed;
unsigned long hash_seed;
int faulthandler;
int tracemalloc;
int perf_profiling;
int import_time;
int code_debug_ranges;
int show_ref_count;
int dump_refs;
wchar_t *dump_refs_file;
int malloc_stats;
wchar_t *filesystem_encoding;
wchar_t *filesystem_errors;
wchar_t *pycache_prefix;
int parse_argv;
PyWideStringList orig_argv;
PyWideStringList argv;
PyWideStringList xoptions;
PyWideStringList warnoptions;
int site_import;
int bytes_warning;
int warn_default_encoding;
int inspect;
int interactive;
int optimization_level;
int parser_debug;
int write_bytecode;
int verbose;
int quiet;
int user_site_directory;
int configure_c_stdio;
int buffered_stdio;
wchar_t *stdio_encoding;
wchar_t *stdio_errors;
#ifdef MS_WINDOWS
int legacy_windows_stdio;
#endif
wchar_t *check_hash_pycs_mode;
int use_frozen_modules;
int safe_path;
int int_max_str_digits;
int cpu_count;
#ifdef Py_GIL_DISABLED
int enable_gil;
#endif
/* --- Path configuration inputs ------------ */
int pathconfig_warnings;
wchar_t *program_name;
wchar_t *pythonpath_env;
wchar_t *home;
wchar_t *platlibdir;
/* --- Path configuration outputs ----------- */
int module_search_paths_set;
PyWideStringList module_search_paths;
wchar_t *stdlib_dir;
wchar_t *executable;
wchar_t *base_executable;
wchar_t *prefix;
wchar_t *base_prefix;
wchar_t *exec_prefix;
wchar_t *base_exec_prefix;
/* --- Parameter only used by Py_Main() ---------- */
int skip_source_first_line;
wchar_t *run_command;
wchar_t *run_module;
wchar_t *run_filename;
/* --- Set by Py_Main() -------------------------- */
wchar_t *sys_path_0;
/* --- Private fields ---------------------------- */
// Install importlib? If equals to 0, importlib is not initialized at all.
// Needed by freeze_importlib.
int _install_importlib;
// If equal to 0, stop Python initialization before the "main" phase.
int _init_main;
// If non-zero, we believe we're running from a source tree.
int _is_python_build;
#ifdef Py_STATS
// If non-zero, turns on statistics gathering.
int _pystats;
#endif
#ifdef Py_DEBUG
// If not empty, import a non-__main__ module before site.py is executed.
// PYTHON_PRESITE=package.module or -X presite=package.module
wchar_t *run_presite;
#endif
} PyConfig;
PyAPI_FUNC(void) PyConfig_InitPythonConfig(PyConfig *config);
PyAPI_FUNC(void) PyConfig_InitIsolatedConfig(PyConfig *config);
PyAPI_FUNC(void) PyConfig_Clear(PyConfig *);
PyAPI_FUNC(PyStatus) PyConfig_SetString(
PyConfig *config,
wchar_t **config_str,
const wchar_t *str);
PyAPI_FUNC(PyStatus) PyConfig_SetBytesString(
PyConfig *config,
wchar_t **config_str,
const char *str);
PyAPI_FUNC(PyStatus) PyConfig_Read(PyConfig *config);
PyAPI_FUNC(PyStatus) PyConfig_SetBytesArgv(
PyConfig *config,
Py_ssize_t argc,
char * const *argv);
PyAPI_FUNC(PyStatus) PyConfig_SetArgv(PyConfig *config,
Py_ssize_t argc,
wchar_t * const *argv);
PyAPI_FUNC(PyStatus) PyConfig_SetWideStringList(PyConfig *config,
PyWideStringList *list,
Py_ssize_t length, wchar_t **items);
/* --- Helper functions --------------------------------------- */
/* Get the original command line arguments, before Python modified them.
See also PyConfig.orig_argv. */
PyAPI_FUNC(void) Py_GetArgcArgv(int *argc, wchar_t ***argv);
#ifdef __cplusplus
}
#endif
#endif /* !Py_LIMITED_API */
#endif /* !Py_PYCORECONFIG_H */

View File

@@ -0,0 +1,53 @@
#ifndef Py_CPYTHON_LISTOBJECT_H
# error "this header file must not be included directly"
#endif
typedef struct {
PyObject_VAR_HEAD
/* Vector of pointers to list elements. list[0] is ob_item[0], etc. */
PyObject **ob_item;
/* ob_item contains space for 'allocated' elements. The number
* currently in use is ob_size.
* Invariants:
* 0 <= ob_size <= allocated
* len(list) == ob_size
* ob_item == NULL implies ob_size == allocated == 0
* list.sort() temporarily sets allocated to -1 to detect mutations.
*
* Items must normally not be NULL, except during construction when
* the list is not yet visible outside the function that builds it.
*/
Py_ssize_t allocated;
} PyListObject;
/* Cast argument to PyListObject* type. */
#define _PyList_CAST(op) \
(assert(PyList_Check(op)), _Py_CAST(PyListObject*, (op)))
// Macros and static inline functions, trading safety for speed
static inline Py_ssize_t PyList_GET_SIZE(PyObject *op) {
PyListObject *list = _PyList_CAST(op);
#ifdef Py_GIL_DISABLED
return _Py_atomic_load_ssize_relaxed(&(_PyVarObject_CAST(list)->ob_size));
#else
return Py_SIZE(list);
#endif
}
#define PyList_GET_SIZE(op) PyList_GET_SIZE(_PyObject_CAST(op))
#define PyList_GET_ITEM(op, index) (_PyList_CAST(op)->ob_item[(index)])
static inline void
PyList_SET_ITEM(PyObject *op, Py_ssize_t index, PyObject *value) {
PyListObject *list = _PyList_CAST(op);
assert(0 <= index);
assert(index < list->allocated);
list->ob_item[index] = value;
}
#define PyList_SET_ITEM(op, index, value) \
PyList_SET_ITEM(_PyObject_CAST(op), (index), _PyObject_CAST(value))
PyAPI_FUNC(int) PyList_Extend(PyObject *self, PyObject *iterable);
PyAPI_FUNC(int) PyList_Clear(PyObject *self);

View File

@@ -0,0 +1,63 @@
#ifndef Py_CPYTHON_LOCK_H
# error "this header file must not be included directly"
#endif
#define _Py_UNLOCKED 0
#define _Py_LOCKED 1
// A mutex that occupies one byte. The lock can be zero initialized to
// represent the unlocked state.
//
// Typical initialization:
// PyMutex m = (PyMutex){0};
//
// Or initialize as global variables:
// static PyMutex m;
//
// Typical usage:
// PyMutex_Lock(&m);
// ...
// PyMutex_Unlock(&m);
//
// The contents of the PyMutex are not part of the public API, but are
// described to aid in understanding the implementation and debugging. Only
// the two least significant bits are used. The remaining bits are always zero:
// 0b00: unlocked
// 0b01: locked
// 0b10: unlocked and has parked threads
// 0b11: locked and has parked threads
typedef struct PyMutex {
uint8_t _bits; // (private)
} PyMutex;
// exported function for locking the mutex
PyAPI_FUNC(void) PyMutex_Lock(PyMutex *m);
// exported function for unlocking the mutex
PyAPI_FUNC(void) PyMutex_Unlock(PyMutex *m);
// Locks the mutex.
//
// If the mutex is currently locked, the calling thread will be parked until
// the mutex is unlocked. If the current thread holds the GIL, then the GIL
// will be released while the thread is parked.
static inline void
_PyMutex_Lock(PyMutex *m)
{
uint8_t expected = _Py_UNLOCKED;
if (!_Py_atomic_compare_exchange_uint8(&m->_bits, &expected, _Py_LOCKED)) {
PyMutex_Lock(m);
}
}
#define PyMutex_Lock _PyMutex_Lock
// Unlocks the mutex.
static inline void
_PyMutex_Unlock(PyMutex *m)
{
uint8_t expected = _Py_LOCKED;
if (!_Py_atomic_compare_exchange_uint8(&m->_bits, &expected, _Py_UNLOCKED)) {
PyMutex_Unlock(m);
}
}
#define PyMutex_Unlock _PyMutex_Unlock

View File

@@ -0,0 +1,146 @@
#ifndef Py_LIMITED_API
#ifndef Py_LONGINTREPR_H
#define Py_LONGINTREPR_H
#ifdef __cplusplus
extern "C" {
#endif
/* This is published for the benefit of "friends" marshal.c and _decimal.c. */
/* Parameters of the integer representation. There are two different
sets of parameters: one set for 30-bit digits, stored in an unsigned 32-bit
integer type, and one set for 15-bit digits with each digit stored in an
unsigned short. The value of PYLONG_BITS_IN_DIGIT, defined either at
configure time or in pyport.h, is used to decide which digit size to use.
Type 'digit' should be able to hold 2*PyLong_BASE-1, and type 'twodigits'
should be an unsigned integer type able to hold all integers up to
PyLong_BASE*PyLong_BASE-1. x_sub assumes that 'digit' is an unsigned type,
and that overflow is handled by taking the result modulo 2**N for some N >
PyLong_SHIFT. The majority of the code doesn't care about the precise
value of PyLong_SHIFT, but there are some notable exceptions:
- PyLong_{As,From}ByteArray require that PyLong_SHIFT be at least 8
- long_hash() requires that PyLong_SHIFT is *strictly* less than the number
of bits in an unsigned long, as do the PyLong <-> long (or unsigned long)
conversion functions
- the Python int <-> size_t/Py_ssize_t conversion functions expect that
PyLong_SHIFT is strictly less than the number of bits in a size_t
- the marshal code currently expects that PyLong_SHIFT is a multiple of 15
- NSMALLNEGINTS and NSMALLPOSINTS should be small enough to fit in a single
digit; with the current values this forces PyLong_SHIFT >= 9
The values 15 and 30 should fit all of the above requirements, on any
platform.
*/
#if PYLONG_BITS_IN_DIGIT == 30
typedef uint32_t digit;
typedef int32_t sdigit; /* signed variant of digit */
typedef uint64_t twodigits;
typedef int64_t stwodigits; /* signed variant of twodigits */
#define PyLong_SHIFT 30
#define _PyLong_DECIMAL_SHIFT 9 /* max(e such that 10**e fits in a digit) */
#define _PyLong_DECIMAL_BASE ((digit)1000000000) /* 10 ** DECIMAL_SHIFT */
#elif PYLONG_BITS_IN_DIGIT == 15
typedef unsigned short digit;
typedef short sdigit; /* signed variant of digit */
typedef unsigned long twodigits;
typedef long stwodigits; /* signed variant of twodigits */
#define PyLong_SHIFT 15
#define _PyLong_DECIMAL_SHIFT 4 /* max(e such that 10**e fits in a digit) */
#define _PyLong_DECIMAL_BASE ((digit)10000) /* 10 ** DECIMAL_SHIFT */
#else
#error "PYLONG_BITS_IN_DIGIT should be 15 or 30"
#endif
#define PyLong_BASE ((digit)1 << PyLong_SHIFT)
#define PyLong_MASK ((digit)(PyLong_BASE - 1))
/* Long integer representation.
Long integers are made up of a number of 30- or 15-bit digits, depending on
the platform. The number of digits (ndigits) is stored in the high bits of
the lv_tag field (lvtag >> _PyLong_NON_SIZE_BITS).
The absolute value of a number is equal to
SUM(for i=0 through ndigits-1) ob_digit[i] * 2**(PyLong_SHIFT*i)
The sign of the value is stored in the lower 2 bits of lv_tag.
- 0: Positive
- 1: Zero
- 2: Negative
The third lowest bit of lv_tag is reserved for an immortality flag, but is
not currently used.
In a normalized number, ob_digit[ndigits-1] (the most significant
digit) is never zero. Also, in all cases, for all valid i,
0 <= ob_digit[i] <= PyLong_MASK.
The allocation function takes care of allocating extra memory
so that ob_digit[0] ... ob_digit[ndigits-1] are actually available.
We always allocate memory for at least one digit, so accessing ob_digit[0]
is always safe. However, in the case ndigits == 0, the contents of
ob_digit[0] may be undefined.
*/
typedef struct _PyLongValue {
uintptr_t lv_tag; /* Number of digits, sign and flags */
digit ob_digit[1];
} _PyLongValue;
struct _longobject {
PyObject_HEAD
_PyLongValue long_value;
};
PyAPI_FUNC(PyLongObject*) _PyLong_New(Py_ssize_t);
// Return a copy of src.
PyAPI_FUNC(PyObject*) _PyLong_Copy(PyLongObject *src);
PyAPI_FUNC(PyLongObject*) _PyLong_FromDigits(
int negative,
Py_ssize_t digit_count,
digit *digits);
/* Inline some internals for speed. These should be in pycore_long.h
* if user code didn't need them inlined. */
#define _PyLong_SIGN_MASK 3
#define _PyLong_NON_SIZE_BITS 3
static inline int
_PyLong_IsCompact(const PyLongObject* op) {
assert(PyType_HasFeature((op)->ob_base.ob_type, Py_TPFLAGS_LONG_SUBCLASS));
return op->long_value.lv_tag < (2 << _PyLong_NON_SIZE_BITS);
}
#define PyUnstable_Long_IsCompact _PyLong_IsCompact
static inline Py_ssize_t
_PyLong_CompactValue(const PyLongObject *op)
{
Py_ssize_t sign;
assert(PyType_HasFeature((op)->ob_base.ob_type, Py_TPFLAGS_LONG_SUBCLASS));
assert(PyUnstable_Long_IsCompact(op));
sign = 1 - (op->long_value.lv_tag & _PyLong_SIGN_MASK);
return sign * (Py_ssize_t)op->long_value.ob_digit[0];
}
#define PyUnstable_Long_CompactValue _PyLong_CompactValue
#ifdef __cplusplus
}
#endif
#endif /* !Py_LONGINTREPR_H */
#endif /* Py_LIMITED_API */

View File

@@ -0,0 +1,116 @@
#ifndef Py_CPYTHON_LONGOBJECT_H
# error "this header file must not be included directly"
#endif
PyAPI_FUNC(PyObject*) PyLong_FromUnicodeObject(PyObject *u, int base);
#define Py_ASNATIVEBYTES_DEFAULTS -1
#define Py_ASNATIVEBYTES_BIG_ENDIAN 0
#define Py_ASNATIVEBYTES_LITTLE_ENDIAN 1
#define Py_ASNATIVEBYTES_NATIVE_ENDIAN 3
#define Py_ASNATIVEBYTES_UNSIGNED_BUFFER 4
#define Py_ASNATIVEBYTES_REJECT_NEGATIVE 8
#define Py_ASNATIVEBYTES_ALLOW_INDEX 16
/* PyLong_AsNativeBytes: Copy the integer value to a native variable.
buffer points to the first byte of the variable.
n_bytes is the number of bytes available in the buffer. Pass 0 to request
the required size for the value.
flags is a bitfield of the following flags:
* 1 - little endian
* 2 - native endian
* 4 - unsigned destination (e.g. don't reject copying 255 into one byte)
* 8 - raise an exception for negative inputs
* 16 - call __index__ on non-int types
If flags is -1 (all bits set), native endian is used, value truncation
behaves most like C (allows negative inputs and allow MSB set), and non-int
objects will raise a TypeError.
Big endian mode will write the most significant byte into the address
directly referenced by buffer; little endian will write the least significant
byte into that address.
If an exception is raised, returns a negative value.
Otherwise, returns the number of bytes that are required to store the value.
To check that the full value is represented, ensure that the return value is
equal or less than n_bytes.
All n_bytes are guaranteed to be written (unless an exception occurs), and
so ignoring a positive return value is the equivalent of a downcast in C.
In cases where the full value could not be represented, the returned value
may be larger than necessary - this function is not an accurate way to
calculate the bit length of an integer object.
*/
PyAPI_FUNC(Py_ssize_t) PyLong_AsNativeBytes(PyObject* v, void* buffer,
Py_ssize_t n_bytes, int flags);
/* PyLong_FromNativeBytes: Create an int value from a native integer
n_bytes is the number of bytes to read from the buffer. Passing 0 will
always produce the zero int.
PyLong_FromUnsignedNativeBytes always produces a non-negative int.
flags is the same as for PyLong_AsNativeBytes, but only supports selecting
the endianness or forcing an unsigned buffer.
Returns the int object, or NULL with an exception set. */
PyAPI_FUNC(PyObject*) PyLong_FromNativeBytes(const void* buffer, size_t n_bytes,
int flags);
PyAPI_FUNC(PyObject*) PyLong_FromUnsignedNativeBytes(const void* buffer,
size_t n_bytes, int flags);
PyAPI_FUNC(int) PyUnstable_Long_IsCompact(const PyLongObject* op);
PyAPI_FUNC(Py_ssize_t) PyUnstable_Long_CompactValue(const PyLongObject* op);
// _PyLong_Sign. Return 0 if v is 0, -1 if v < 0, +1 if v > 0.
// v must not be NULL, and must be a normalized long.
// There are no error cases.
PyAPI_FUNC(int) _PyLong_Sign(PyObject *v);
/* _PyLong_NumBits. Return the number of bits needed to represent the
absolute value of a long. For example, this returns 1 for 1 and -1, 2
for 2 and -2, and 2 for 3 and -3. It returns 0 for 0.
v must not be NULL, and must be a normalized long.
(size_t)-1 is returned and OverflowError set if the true result doesn't
fit in a size_t.
*/
PyAPI_FUNC(size_t) _PyLong_NumBits(PyObject *v);
/* _PyLong_FromByteArray: View the n unsigned bytes as a binary integer in
base 256, and return a Python int with the same numeric value.
If n is 0, the integer is 0. Else:
If little_endian is 1/true, bytes[n-1] is the MSB and bytes[0] the LSB;
else (little_endian is 0/false) bytes[0] is the MSB and bytes[n-1] the
LSB.
If is_signed is 0/false, view the bytes as a non-negative integer.
If is_signed is 1/true, view the bytes as a 2's-complement integer,
non-negative if bit 0x80 of the MSB is clear, negative if set.
Error returns:
+ Return NULL with the appropriate exception set if there's not
enough memory to create the Python int.
*/
PyAPI_FUNC(PyObject *) _PyLong_FromByteArray(
const unsigned char* bytes, size_t n,
int little_endian, int is_signed);
/* _PyLong_AsByteArray: Convert the least-significant 8*n bits of long
v to a base-256 integer, stored in array bytes. Normally return 0,
return -1 on error.
If little_endian is 1/true, store the MSB at bytes[n-1] and the LSB at
bytes[0]; else (little_endian is 0/false) store the MSB at bytes[0] and
the LSB at bytes[n-1].
If is_signed is 0/false, it's an error if v < 0; else (v >= 0) n bytes
are filled and there's nothing special about bit 0x80 of the MSB.
If is_signed is 1/true, bytes is filled with the 2's-complement
representation of v's value. Bit 0x80 of the MSB is the sign bit.
Error returns (-1):
+ is_signed is 0 and v < 0. TypeError is set in this case, and bytes
isn't altered.
+ n isn't big enough to hold the full mathematical value of v. For
example, if is_signed is 0 and there are more digits in the v than
fit in n; or if is_signed is 1, v < 0, and n is just 1 bit shy of
being large enough to hold a sign bit. OverflowError is set in this
case, but bytes holds the least-significant n bytes of the true value.
*/
PyAPI_FUNC(int) _PyLong_AsByteArray(PyLongObject* v,
unsigned char* bytes, size_t n,
int little_endian, int is_signed, int with_exceptions);
/* For use by the gcd function in mathmodule.c */
PyAPI_FUNC(PyObject *) _PyLong_GCD(PyObject *, PyObject *);

View File

@@ -0,0 +1,50 @@
#ifndef Py_CPYTHON_MEMORYOBJECT_H
# error "this header file must not be included directly"
#endif
/* The structs are declared here so that macros can work, but they shouldn't
be considered public. Don't access their fields directly, use the macros
and functions instead! */
#define _Py_MANAGED_BUFFER_RELEASED 0x001 /* access to exporter blocked */
#define _Py_MANAGED_BUFFER_FREE_FORMAT 0x002 /* free format */
typedef struct {
PyObject_HEAD
int flags; /* state flags */
Py_ssize_t exports; /* number of direct memoryview exports */
Py_buffer master; /* snapshot buffer obtained from the original exporter */
} _PyManagedBufferObject;
/* memoryview state flags */
#define _Py_MEMORYVIEW_RELEASED 0x001 /* access to master buffer blocked */
#define _Py_MEMORYVIEW_C 0x002 /* C-contiguous layout */
#define _Py_MEMORYVIEW_FORTRAN 0x004 /* Fortran contiguous layout */
#define _Py_MEMORYVIEW_SCALAR 0x008 /* scalar: ndim = 0 */
#define _Py_MEMORYVIEW_PIL 0x010 /* PIL-style layout */
#define _Py_MEMORYVIEW_RESTRICTED 0x020 /* Disallow new references to the memoryview's buffer */
typedef struct {
PyObject_VAR_HEAD
_PyManagedBufferObject *mbuf; /* managed buffer */
Py_hash_t hash; /* hash value for read-only views */
int flags; /* state flags */
Py_ssize_t exports; /* number of buffer re-exports */
Py_buffer view; /* private copy of the exporter's view */
PyObject *weakreflist;
Py_ssize_t ob_array[1]; /* shape, strides, suboffsets */
} PyMemoryViewObject;
#define _PyMemoryView_CAST(op) _Py_CAST(PyMemoryViewObject*, op)
/* Get a pointer to the memoryview's private copy of the exporter's buffer. */
static inline Py_buffer* PyMemoryView_GET_BUFFER(PyObject *op) {
return (&_PyMemoryView_CAST(op)->view);
}
#define PyMemoryView_GET_BUFFER(op) PyMemoryView_GET_BUFFER(_PyObject_CAST(op))
/* Get a pointer to the exporting object (this may be NULL!). */
static inline PyObject* PyMemoryView_GET_BASE(PyObject *op) {
return _PyMemoryView_CAST(op)->view.obj;
}
#define PyMemoryView_GET_BASE(op) PyMemoryView_GET_BASE(_PyObject_CAST(op))

View File

@@ -0,0 +1,66 @@
#ifndef Py_CPYTHON_METHODOBJECT_H
# error "this header file must not be included directly"
#endif
// PyCFunctionObject structure
typedef struct {
PyObject_HEAD
PyMethodDef *m_ml; /* Description of the C function to call */
PyObject *m_self; /* Passed as 'self' arg to the C func, can be NULL */
PyObject *m_module; /* The __module__ attribute, can be anything */
PyObject *m_weakreflist; /* List of weak references */
vectorcallfunc vectorcall;
} PyCFunctionObject;
#define _PyCFunctionObject_CAST(func) \
(assert(PyCFunction_Check(func)), \
_Py_CAST(PyCFunctionObject*, (func)))
// PyCMethodObject structure
typedef struct {
PyCFunctionObject func;
PyTypeObject *mm_class; /* Class that defines this method */
} PyCMethodObject;
#define _PyCMethodObject_CAST(func) \
(assert(PyCMethod_Check(func)), \
_Py_CAST(PyCMethodObject*, (func)))
PyAPI_DATA(PyTypeObject) PyCMethod_Type;
#define PyCMethod_CheckExact(op) Py_IS_TYPE((op), &PyCMethod_Type)
#define PyCMethod_Check(op) PyObject_TypeCheck((op), &PyCMethod_Type)
/* Static inline functions for direct access to these values.
Type checks are *not* done, so use with care. */
static inline PyCFunction PyCFunction_GET_FUNCTION(PyObject *func) {
return _PyCFunctionObject_CAST(func)->m_ml->ml_meth;
}
#define PyCFunction_GET_FUNCTION(func) PyCFunction_GET_FUNCTION(_PyObject_CAST(func))
static inline PyObject* PyCFunction_GET_SELF(PyObject *func_obj) {
PyCFunctionObject *func = _PyCFunctionObject_CAST(func_obj);
if (func->m_ml->ml_flags & METH_STATIC) {
return _Py_NULL;
}
return func->m_self;
}
#define PyCFunction_GET_SELF(func) PyCFunction_GET_SELF(_PyObject_CAST(func))
static inline int PyCFunction_GET_FLAGS(PyObject *func) {
return _PyCFunctionObject_CAST(func)->m_ml->ml_flags;
}
#define PyCFunction_GET_FLAGS(func) PyCFunction_GET_FLAGS(_PyObject_CAST(func))
static inline PyTypeObject* PyCFunction_GET_CLASS(PyObject *func_obj) {
PyCFunctionObject *func = _PyCFunctionObject_CAST(func_obj);
if (func->m_ml->ml_flags & METH_METHOD) {
return _PyCMethodObject_CAST(func)->mm_class;
}
return _Py_NULL;
}
#define PyCFunction_GET_CLASS(func) PyCFunction_GET_CLASS(_PyObject_CAST(func))

View File

@@ -0,0 +1,26 @@
#ifndef Py_CPYTHON_MODSUPPORT_H
# error "this header file must not be included directly"
#endif
// A data structure that can be used to run initialization code once in a
// thread-safe manner. The C++11 equivalent is std::call_once.
typedef struct {
uint8_t v;
} _PyOnceFlag;
typedef struct _PyArg_Parser {
const char *format;
const char * const *keywords;
const char *fname;
const char *custom_msg;
_PyOnceFlag once; /* atomic one-time initialization flag */
int is_kwtuple_owned; /* does this parser own the kwtuple object? */
int pos; /* number of positional-only arguments */
int min; /* minimal number of arguments */
int max; /* maximal number of positional arguments */
PyObject *kwtuple; /* tuple of keyword parameter names */
struct _PyArg_Parser *next;
} _PyArg_Parser;
PyAPI_FUNC(int) _PyArg_ParseTupleAndKeywordsFast(PyObject *, PyObject *,
struct _PyArg_Parser *, ...);

View File

@@ -0,0 +1,250 @@
#ifndef Py_CPYTHON_MONITORING_H
# error "this header file must not be included directly"
#endif
/* Local events.
* These require bytecode instrumentation */
#define PY_MONITORING_EVENT_PY_START 0
#define PY_MONITORING_EVENT_PY_RESUME 1
#define PY_MONITORING_EVENT_PY_RETURN 2
#define PY_MONITORING_EVENT_PY_YIELD 3
#define PY_MONITORING_EVENT_CALL 4
#define PY_MONITORING_EVENT_LINE 5
#define PY_MONITORING_EVENT_INSTRUCTION 6
#define PY_MONITORING_EVENT_JUMP 7
#define PY_MONITORING_EVENT_BRANCH 8
#define PY_MONITORING_EVENT_STOP_ITERATION 9
#define PY_MONITORING_IS_INSTRUMENTED_EVENT(ev) \
((ev) < _PY_MONITORING_LOCAL_EVENTS)
/* Other events, mainly exceptions */
#define PY_MONITORING_EVENT_RAISE 10
#define PY_MONITORING_EVENT_EXCEPTION_HANDLED 11
#define PY_MONITORING_EVENT_PY_UNWIND 12
#define PY_MONITORING_EVENT_PY_THROW 13
#define PY_MONITORING_EVENT_RERAISE 14
/* Ancillary events */
#define PY_MONITORING_EVENT_C_RETURN 15
#define PY_MONITORING_EVENT_C_RAISE 16
typedef struct _PyMonitoringState {
uint8_t active;
uint8_t opaque;
} PyMonitoringState;
PyAPI_FUNC(int)
PyMonitoring_EnterScope(PyMonitoringState *state_array, uint64_t *version,
const uint8_t *event_types, Py_ssize_t length);
PyAPI_FUNC(int)
PyMonitoring_ExitScope(void);
PyAPI_FUNC(int)
_PyMonitoring_FirePyStartEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FirePyResumeEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FirePyReturnEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *retval);
PyAPI_FUNC(int)
_PyMonitoring_FirePyYieldEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *retval);
PyAPI_FUNC(int)
_PyMonitoring_FireCallEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject* callable, PyObject *arg0);
PyAPI_FUNC(int)
_PyMonitoring_FireLineEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
int lineno);
PyAPI_FUNC(int)
_PyMonitoring_FireJumpEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *target_offset);
PyAPI_FUNC(int)
_PyMonitoring_FireBranchEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *target_offset);
PyAPI_FUNC(int)
_PyMonitoring_FireCReturnEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *retval);
PyAPI_FUNC(int)
_PyMonitoring_FirePyThrowEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FireRaiseEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FireReraiseEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FireExceptionHandledEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FireCRaiseEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FirePyUnwindEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset);
PyAPI_FUNC(int)
_PyMonitoring_FireStopIterationEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset, PyObject *value);
#define _PYMONITORING_IF_ACTIVE(STATE, X) \
if ((STATE)->active) { \
return (X); \
} \
else { \
return 0; \
}
static inline int
PyMonitoring_FirePyStartEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FirePyStartEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FirePyResumeEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FirePyResumeEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FirePyReturnEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *retval)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FirePyReturnEvent(state, codelike, offset, retval));
}
static inline int
PyMonitoring_FirePyYieldEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *retval)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FirePyYieldEvent(state, codelike, offset, retval));
}
static inline int
PyMonitoring_FireCallEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject* callable, PyObject *arg0)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireCallEvent(state, codelike, offset, callable, arg0));
}
static inline int
PyMonitoring_FireLineEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
int lineno)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireLineEvent(state, codelike, offset, lineno));
}
static inline int
PyMonitoring_FireJumpEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *target_offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireJumpEvent(state, codelike, offset, target_offset));
}
static inline int
PyMonitoring_FireBranchEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *target_offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireBranchEvent(state, codelike, offset, target_offset));
}
static inline int
PyMonitoring_FireCReturnEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset,
PyObject *retval)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireCReturnEvent(state, codelike, offset, retval));
}
static inline int
PyMonitoring_FirePyThrowEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FirePyThrowEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FireRaiseEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireRaiseEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FireReraiseEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireReraiseEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FireExceptionHandledEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireExceptionHandledEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FireCRaiseEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireCRaiseEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FirePyUnwindEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FirePyUnwindEvent(state, codelike, offset));
}
static inline int
PyMonitoring_FireStopIterationEvent(PyMonitoringState *state, PyObject *codelike, int32_t offset, PyObject *value)
{
_PYMONITORING_IF_ACTIVE(
state,
_PyMonitoring_FireStopIterationEvent(state, codelike, offset, value));
}
#undef _PYMONITORING_IF_ACTIVE

View File

@@ -0,0 +1,525 @@
#ifndef Py_CPYTHON_OBJECT_H
# error "this header file must not be included directly"
#endif
PyAPI_FUNC(void) _Py_NewReference(PyObject *op);
PyAPI_FUNC(void) _Py_NewReferenceNoTotal(PyObject *op);
PyAPI_FUNC(void) _Py_ResurrectReference(PyObject *op);
#ifdef Py_REF_DEBUG
/* These are useful as debugging aids when chasing down refleaks. */
PyAPI_FUNC(Py_ssize_t) _Py_GetGlobalRefTotal(void);
# define _Py_GetRefTotal() _Py_GetGlobalRefTotal()
PyAPI_FUNC(Py_ssize_t) _Py_GetLegacyRefTotal(void);
PyAPI_FUNC(Py_ssize_t) _PyInterpreterState_GetRefTotal(PyInterpreterState *);
#endif
/********************* String Literals ****************************************/
/* This structure helps managing static strings. The basic usage goes like this:
Instead of doing
r = PyObject_CallMethod(o, "foo", "args", ...);
do
_Py_IDENTIFIER(foo);
...
r = _PyObject_CallMethodId(o, &PyId_foo, "args", ...);
PyId_foo is a static variable, either on block level or file level. On first
usage, the string "foo" is interned, and the structures are linked. On interpreter
shutdown, all strings are released.
Alternatively, _Py_static_string allows choosing the variable name.
_PyUnicode_FromId returns a borrowed reference to the interned string.
_PyObject_{Get,Set,Has}AttrId are __getattr__ versions using _Py_Identifier*.
*/
typedef struct _Py_Identifier {
const char* string;
// Index in PyInterpreterState.unicode.ids.array. It is process-wide
// unique and must be initialized to -1.
Py_ssize_t index;
// Hidden PyMutex struct for non free-threaded build.
struct {
uint8_t v;
} mutex;
} _Py_Identifier;
#ifndef Py_BUILD_CORE
// For now we are keeping _Py_IDENTIFIER for continued use
// in non-builtin extensions (and naughty PyPI modules).
#define _Py_static_string_init(value) { .string = (value), .index = -1 }
#define _Py_static_string(varname, value) static _Py_Identifier varname = _Py_static_string_init(value)
#define _Py_IDENTIFIER(varname) _Py_static_string(PyId_##varname, #varname)
#endif /* !Py_BUILD_CORE */
typedef struct {
/* Number implementations must check *both*
arguments for proper type and implement the necessary conversions
in the slot functions themselves. */
binaryfunc nb_add;
binaryfunc nb_subtract;
binaryfunc nb_multiply;
binaryfunc nb_remainder;
binaryfunc nb_divmod;
ternaryfunc nb_power;
unaryfunc nb_negative;
unaryfunc nb_positive;
unaryfunc nb_absolute;
inquiry nb_bool;
unaryfunc nb_invert;
binaryfunc nb_lshift;
binaryfunc nb_rshift;
binaryfunc nb_and;
binaryfunc nb_xor;
binaryfunc nb_or;
unaryfunc nb_int;
void *nb_reserved; /* the slot formerly known as nb_long */
unaryfunc nb_float;
binaryfunc nb_inplace_add;
binaryfunc nb_inplace_subtract;
binaryfunc nb_inplace_multiply;
binaryfunc nb_inplace_remainder;
ternaryfunc nb_inplace_power;
binaryfunc nb_inplace_lshift;
binaryfunc nb_inplace_rshift;
binaryfunc nb_inplace_and;
binaryfunc nb_inplace_xor;
binaryfunc nb_inplace_or;
binaryfunc nb_floor_divide;
binaryfunc nb_true_divide;
binaryfunc nb_inplace_floor_divide;
binaryfunc nb_inplace_true_divide;
unaryfunc nb_index;
binaryfunc nb_matrix_multiply;
binaryfunc nb_inplace_matrix_multiply;
} PyNumberMethods;
typedef struct {
lenfunc sq_length;
binaryfunc sq_concat;
ssizeargfunc sq_repeat;
ssizeargfunc sq_item;
void *was_sq_slice;
ssizeobjargproc sq_ass_item;
void *was_sq_ass_slice;
objobjproc sq_contains;
binaryfunc sq_inplace_concat;
ssizeargfunc sq_inplace_repeat;
} PySequenceMethods;
typedef struct {
lenfunc mp_length;
binaryfunc mp_subscript;
objobjargproc mp_ass_subscript;
} PyMappingMethods;
typedef PySendResult (*sendfunc)(PyObject *iter, PyObject *value, PyObject **result);
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
sendfunc am_send;
} PyAsyncMethods;
typedef struct {
getbufferproc bf_getbuffer;
releasebufferproc bf_releasebuffer;
} PyBufferProcs;
/* Allow printfunc in the tp_vectorcall_offset slot for
* backwards-compatibility */
typedef Py_ssize_t printfunc;
// If this structure is modified, Doc/includes/typestruct.h should be updated
// as well.
struct _typeobject {
PyObject_VAR_HEAD
const char *tp_name; /* For printing, in format "<module>.<name>" */
Py_ssize_t tp_basicsize, tp_itemsize; /* For allocation */
/* Methods to implement standard operations */
destructor tp_dealloc;
Py_ssize_t tp_vectorcall_offset;
getattrfunc tp_getattr;
setattrfunc tp_setattr;
PyAsyncMethods *tp_as_async; /* formerly known as tp_compare (Python 2)
or tp_reserved (Python 3) */
reprfunc tp_repr;
/* Method suites for standard classes */
PyNumberMethods *tp_as_number;
PySequenceMethods *tp_as_sequence;
PyMappingMethods *tp_as_mapping;
/* More standard operations (here for binary compatibility) */
hashfunc tp_hash;
ternaryfunc tp_call;
reprfunc tp_str;
getattrofunc tp_getattro;
setattrofunc tp_setattro;
/* Functions to access object as input/output buffer */
PyBufferProcs *tp_as_buffer;
/* Flags to define presence of optional/expanded features */
unsigned long tp_flags;
const char *tp_doc; /* Documentation string */
/* Assigned meaning in release 2.0 */
/* call function for all accessible objects */
traverseproc tp_traverse;
/* delete references to contained objects */
inquiry tp_clear;
/* Assigned meaning in release 2.1 */
/* rich comparisons */
richcmpfunc tp_richcompare;
/* weak reference enabler */
Py_ssize_t tp_weaklistoffset;
/* Iterators */
getiterfunc tp_iter;
iternextfunc tp_iternext;
/* Attribute descriptor and subclassing stuff */
PyMethodDef *tp_methods;
PyMemberDef *tp_members;
PyGetSetDef *tp_getset;
// Strong reference on a heap type, borrowed reference on a static type
PyTypeObject *tp_base;
PyObject *tp_dict;
descrgetfunc tp_descr_get;
descrsetfunc tp_descr_set;
Py_ssize_t tp_dictoffset;
initproc tp_init;
allocfunc tp_alloc;
newfunc tp_new;
freefunc tp_free; /* Low-level free-memory routine */
inquiry tp_is_gc; /* For PyObject_IS_GC */
PyObject *tp_bases;
PyObject *tp_mro; /* method resolution order */
PyObject *tp_cache; /* no longer used */
void *tp_subclasses; /* for static builtin types this is an index */
PyObject *tp_weaklist; /* not used for static builtin types */
destructor tp_del;
/* Type attribute cache version tag. Added in version 2.6 */
unsigned int tp_version_tag;
destructor tp_finalize;
vectorcallfunc tp_vectorcall;
/* bitset of which type-watchers care about this type */
unsigned char tp_watched;
uint16_t tp_versions_used;
};
/* This struct is used by the specializer
* It should be treated as an opaque blob
* by code other than the specializer and interpreter. */
struct _specialization_cache {
// In order to avoid bloating the bytecode with lots of inline caches, the
// members of this structure have a somewhat unique contract. They are set
// by the specialization machinery, and are invalidated by PyType_Modified.
// The rules for using them are as follows:
// - If getitem is non-NULL, then it is the same Python function that
// PyType_Lookup(cls, "__getitem__") would return.
// - If getitem is NULL, then getitem_version is meaningless.
// - If getitem->func_version == getitem_version, then getitem can be called
// with two positional arguments and no keyword arguments, and has neither
// *args nor **kwargs (as required by BINARY_SUBSCR_GETITEM):
PyObject *getitem;
uint32_t getitem_version;
PyObject *init;
};
/* The *real* layout of a type object when allocated on the heap */
typedef struct _heaptypeobject {
/* Note: there's a dependency on the order of these members
in slotptr() in typeobject.c . */
PyTypeObject ht_type;
PyAsyncMethods as_async;
PyNumberMethods as_number;
PyMappingMethods as_mapping;
PySequenceMethods as_sequence; /* as_sequence comes after as_mapping,
so that the mapping wins when both
the mapping and the sequence define
a given operator (e.g. __getitem__).
see add_operators() in typeobject.c . */
PyBufferProcs as_buffer;
PyObject *ht_name, *ht_slots, *ht_qualname;
struct _dictkeysobject *ht_cached_keys;
PyObject *ht_module;
char *_ht_tpname; // Storage for "tp_name"; see PyType_FromModuleAndSpec
struct _specialization_cache _spec_cache; // For use by the specializer.
/* here are optional user slots, followed by the members. */
} PyHeapTypeObject;
PyAPI_FUNC(const char *) _PyType_Name(PyTypeObject *);
PyAPI_FUNC(PyObject *) _PyType_Lookup(PyTypeObject *, PyObject *);
PyAPI_FUNC(PyObject *) _PyType_LookupRef(PyTypeObject *, PyObject *);
PyAPI_FUNC(PyObject *) PyType_GetDict(PyTypeObject *);
PyAPI_FUNC(int) PyObject_Print(PyObject *, FILE *, int);
PyAPI_FUNC(void) _Py_BreakPoint(void);
PyAPI_FUNC(void) _PyObject_Dump(PyObject *);
PyAPI_FUNC(PyObject*) _PyObject_GetAttrId(PyObject *, _Py_Identifier *);
PyAPI_FUNC(PyObject **) _PyObject_GetDictPtr(PyObject *);
PyAPI_FUNC(void) PyObject_CallFinalizer(PyObject *);
PyAPI_FUNC(int) PyObject_CallFinalizerFromDealloc(PyObject *);
PyAPI_FUNC(void) PyUnstable_Object_ClearWeakRefsNoCallbacks(PyObject *);
/* Same as PyObject_Generic{Get,Set}Attr, but passing the attributes
dict as the last parameter. */
PyAPI_FUNC(PyObject *)
_PyObject_GenericGetAttrWithDict(PyObject *, PyObject *, PyObject *, int);
PyAPI_FUNC(int)
_PyObject_GenericSetAttrWithDict(PyObject *, PyObject *,
PyObject *, PyObject *);
PyAPI_FUNC(PyObject *) _PyObject_FunctionStr(PyObject *);
/* Safely decref `dst` and set `dst` to `src`.
*
* As in case of Py_CLEAR "the obvious" code can be deadly:
*
* Py_DECREF(dst);
* dst = src;
*
* The safe way is:
*
* Py_SETREF(dst, src);
*
* That arranges to set `dst` to `src` _before_ decref'ing, so that any code
* triggered as a side-effect of `dst` getting torn down no longer believes
* `dst` points to a valid object.
*
* Temporary variables are used to only evalutate macro arguments once and so
* avoid the duplication of side effects. _Py_TYPEOF() or memcpy() is used to
* avoid a miscompilation caused by type punning. See Py_CLEAR() comment for
* implementation details about type punning.
*
* The memcpy() implementation does not emit a compiler warning if 'src' has
* not the same type than 'src': any pointer type is accepted for 'src'.
*/
#ifdef _Py_TYPEOF
#define Py_SETREF(dst, src) \
do { \
_Py_TYPEOF(dst)* _tmp_dst_ptr = &(dst); \
_Py_TYPEOF(dst) _tmp_old_dst = (*_tmp_dst_ptr); \
*_tmp_dst_ptr = (src); \
Py_DECREF(_tmp_old_dst); \
} while (0)
#else
#define Py_SETREF(dst, src) \
do { \
PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \
PyObject *_tmp_old_dst = (*_tmp_dst_ptr); \
PyObject *_tmp_src = _PyObject_CAST(src); \
memcpy(_tmp_dst_ptr, &_tmp_src, sizeof(PyObject*)); \
Py_DECREF(_tmp_old_dst); \
} while (0)
#endif
/* Py_XSETREF() is a variant of Py_SETREF() that uses Py_XDECREF() instead of
* Py_DECREF().
*/
#ifdef _Py_TYPEOF
#define Py_XSETREF(dst, src) \
do { \
_Py_TYPEOF(dst)* _tmp_dst_ptr = &(dst); \
_Py_TYPEOF(dst) _tmp_old_dst = (*_tmp_dst_ptr); \
*_tmp_dst_ptr = (src); \
Py_XDECREF(_tmp_old_dst); \
} while (0)
#else
#define Py_XSETREF(dst, src) \
do { \
PyObject **_tmp_dst_ptr = _Py_CAST(PyObject**, &(dst)); \
PyObject *_tmp_old_dst = (*_tmp_dst_ptr); \
PyObject *_tmp_src = _PyObject_CAST(src); \
memcpy(_tmp_dst_ptr, &_tmp_src, sizeof(PyObject*)); \
Py_XDECREF(_tmp_old_dst); \
} while (0)
#endif
/* Define a pair of assertion macros:
_PyObject_ASSERT_FROM(), _PyObject_ASSERT_WITH_MSG() and _PyObject_ASSERT().
These work like the regular C assert(), in that they will abort the
process with a message on stderr if the given condition fails to hold,
but compile away to nothing if NDEBUG is defined.
However, before aborting, Python will also try to call _PyObject_Dump() on
the given object. This may be of use when investigating bugs in which a
particular object is corrupt (e.g. buggy a tp_visit method in an extension
module breaking the garbage collector), to help locate the broken objects.
The WITH_MSG variant allows you to supply an additional message that Python
will attempt to print to stderr, after the object dump. */
#ifdef NDEBUG
/* No debugging: compile away the assertions: */
# define _PyObject_ASSERT_FROM(obj, expr, msg, filename, lineno, func) \
((void)0)
#else
/* With debugging: generate checks: */
# define _PyObject_ASSERT_FROM(obj, expr, msg, filename, lineno, func) \
((expr) \
? (void)(0) \
: _PyObject_AssertFailed((obj), Py_STRINGIFY(expr), \
(msg), (filename), (lineno), (func)))
#endif
#define _PyObject_ASSERT_WITH_MSG(obj, expr, msg) \
_PyObject_ASSERT_FROM((obj), expr, (msg), __FILE__, __LINE__, __func__)
#define _PyObject_ASSERT(obj, expr) \
_PyObject_ASSERT_WITH_MSG((obj), expr, NULL)
#define _PyObject_ASSERT_FAILED_MSG(obj, msg) \
_PyObject_AssertFailed((obj), NULL, (msg), __FILE__, __LINE__, __func__)
/* Declare and define _PyObject_AssertFailed() even when NDEBUG is defined,
to avoid causing compiler/linker errors when building extensions without
NDEBUG against a Python built with NDEBUG defined.
msg, expr and function can be NULL. */
PyAPI_FUNC(void) _Py_NO_RETURN _PyObject_AssertFailed(
PyObject *obj,
const char *expr,
const char *msg,
const char *file,
int line,
const char *function);
/* Trashcan mechanism, thanks to Christian Tismer.
When deallocating a container object, it's possible to trigger an unbounded
chain of deallocations, as each Py_DECREF in turn drops the refcount on "the
next" object in the chain to 0. This can easily lead to stack overflows,
especially in threads (which typically have less stack space to work with).
A container object can avoid this by bracketing the body of its tp_dealloc
function with a pair of macros:
static void
mytype_dealloc(mytype *p)
{
... declarations go here ...
PyObject_GC_UnTrack(p); // must untrack first
Py_TRASHCAN_BEGIN(p, mytype_dealloc)
... The body of the deallocator goes here, including all calls ...
... to Py_DECREF on contained objects. ...
Py_TRASHCAN_END // there should be no code after this
}
CAUTION: Never return from the middle of the body! If the body needs to
"get out early", put a label immediately before the Py_TRASHCAN_END
call, and goto it. Else the call-depth counter (see below) will stay
above 0 forever, and the trashcan will never get emptied.
How it works: The BEGIN macro increments a call-depth counter. So long
as this counter is small, the body of the deallocator is run directly without
further ado. But if the counter gets large, it instead adds p to a list of
objects to be deallocated later, skips the body of the deallocator, and
resumes execution after the END macro. The tp_dealloc routine then returns
without deallocating anything (and so unbounded call-stack depth is avoided).
When the call stack finishes unwinding again, code generated by the END macro
notices this, and calls another routine to deallocate all the objects that
may have been added to the list of deferred deallocations. In effect, a
chain of N deallocations is broken into (N-1)/(Py_TRASHCAN_HEADROOM-1) pieces,
with the call stack never exceeding a depth of Py_TRASHCAN_HEADROOM.
Since the tp_dealloc of a subclass typically calls the tp_dealloc of the base
class, we need to ensure that the trashcan is only triggered on the tp_dealloc
of the actual class being deallocated. Otherwise we might end up with a
partially-deallocated object. To check this, the tp_dealloc function must be
passed as second argument to Py_TRASHCAN_BEGIN().
*/
/* Python 3.9 private API, invoked by the macros below. */
PyAPI_FUNC(int) _PyTrash_begin(PyThreadState *tstate, PyObject *op);
PyAPI_FUNC(void) _PyTrash_end(PyThreadState *tstate);
PyAPI_FUNC(void) _PyTrash_thread_deposit_object(PyThreadState *tstate, PyObject *op);
PyAPI_FUNC(void) _PyTrash_thread_destroy_chain(PyThreadState *tstate);
/* Python 3.10 private API, invoked by the Py_TRASHCAN_BEGIN(). */
/* To avoid raising recursion errors during dealloc trigger trashcan before we reach
* recursion limit. To avoid trashing, we don't attempt to empty the trashcan until
* we have headroom above the trigger limit */
#define Py_TRASHCAN_HEADROOM 50
#define Py_TRASHCAN_BEGIN(op, dealloc) \
do { \
PyThreadState *tstate = PyThreadState_Get(); \
if (tstate->c_recursion_remaining <= Py_TRASHCAN_HEADROOM && Py_TYPE(op)->tp_dealloc == (destructor)dealloc) { \
_PyTrash_thread_deposit_object(tstate, (PyObject *)op); \
break; \
} \
tstate->c_recursion_remaining--;
/* The body of the deallocator is here. */
#define Py_TRASHCAN_END \
tstate->c_recursion_remaining++; \
if (tstate->delete_later && tstate->c_recursion_remaining > (Py_TRASHCAN_HEADROOM*2)) { \
_PyTrash_thread_destroy_chain(tstate); \
} \
} while (0);
PyAPI_FUNC(void *) PyObject_GetItemData(PyObject *obj);
PyAPI_FUNC(int) PyObject_VisitManagedDict(PyObject *obj, visitproc visit, void *arg);
PyAPI_FUNC(int) _PyObject_SetManagedDict(PyObject *obj, PyObject *new_dict);
PyAPI_FUNC(void) PyObject_ClearManagedDict(PyObject *obj);
#define TYPE_MAX_WATCHERS 8
typedef int(*PyType_WatchCallback)(PyTypeObject *);
PyAPI_FUNC(int) PyType_AddWatcher(PyType_WatchCallback callback);
PyAPI_FUNC(int) PyType_ClearWatcher(int watcher_id);
PyAPI_FUNC(int) PyType_Watch(int watcher_id, PyObject *type);
PyAPI_FUNC(int) PyType_Unwatch(int watcher_id, PyObject *type);
/* Attempt to assign a version tag to the given type.
*
* Returns 1 if the type already had a valid version tag or a new one was
* assigned, or 0 if a new tag could not be assigned.
*/
PyAPI_FUNC(int) PyUnstable_Type_AssignVersionTag(PyTypeObject *type);
typedef enum {
PyRefTracer_CREATE = 0,
PyRefTracer_DESTROY = 1,
} PyRefTracerEvent;
typedef int (*PyRefTracer)(PyObject *, PyRefTracerEvent event, void *);
PyAPI_FUNC(int) PyRefTracer_SetTracer(PyRefTracer tracer, void *data);
PyAPI_FUNC(PyRefTracer) PyRefTracer_GetTracer(void**);

View File

@@ -0,0 +1,104 @@
#ifndef Py_CPYTHON_OBJIMPL_H
# error "this header file must not be included directly"
#endif
static inline size_t _PyObject_SIZE(PyTypeObject *type) {
return _Py_STATIC_CAST(size_t, type->tp_basicsize);
}
/* _PyObject_VAR_SIZE returns the number of bytes (as size_t) allocated for a
vrbl-size object with nitems items, exclusive of gc overhead (if any). The
value is rounded up to the closest multiple of sizeof(void *), in order to
ensure that pointer fields at the end of the object are correctly aligned
for the platform (this is of special importance for subclasses of, e.g.,
str or int, so that pointers can be stored after the embedded data).
Note that there's no memory wastage in doing this, as malloc has to
return (at worst) pointer-aligned memory anyway.
*/
#if ((SIZEOF_VOID_P - 1) & SIZEOF_VOID_P) != 0
# error "_PyObject_VAR_SIZE requires SIZEOF_VOID_P be a power of 2"
#endif
static inline size_t _PyObject_VAR_SIZE(PyTypeObject *type, Py_ssize_t nitems) {
size_t size = _Py_STATIC_CAST(size_t, type->tp_basicsize);
size += _Py_STATIC_CAST(size_t, nitems) * _Py_STATIC_CAST(size_t, type->tp_itemsize);
return _Py_SIZE_ROUND_UP(size, SIZEOF_VOID_P);
}
/* This example code implements an object constructor with a custom
allocator, where PyObject_New is inlined, and shows the important
distinction between two steps (at least):
1) the actual allocation of the object storage;
2) the initialization of the Python specific fields
in this storage with PyObject_{Init, InitVar}.
PyObject *
YourObject_New(...)
{
PyObject *op;
op = (PyObject *) Your_Allocator(_PyObject_SIZE(YourTypeStruct));
if (op == NULL) {
return PyErr_NoMemory();
}
PyObject_Init(op, &YourTypeStruct);
op->ob_field = value;
...
return op;
}
Note that in C++, the use of the new operator usually implies that
the 1st step is performed automatically for you, so in a C++ class
constructor you would start directly with PyObject_Init/InitVar. */
typedef struct {
/* user context passed as the first argument to the 2 functions */
void *ctx;
/* allocate an arena of size bytes */
void* (*alloc) (void *ctx, size_t size);
/* free an arena */
void (*free) (void *ctx, void *ptr, size_t size);
} PyObjectArenaAllocator;
/* Get the arena allocator. */
PyAPI_FUNC(void) PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator);
/* Set the arena allocator. */
PyAPI_FUNC(void) PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator);
/* Test if an object implements the garbage collector protocol */
PyAPI_FUNC(int) PyObject_IS_GC(PyObject *obj);
// Test if a type supports weak references
PyAPI_FUNC(int) PyType_SUPPORTS_WEAKREFS(PyTypeObject *type);
PyAPI_FUNC(PyObject **) PyObject_GET_WEAKREFS_LISTPTR(PyObject *op);
PyAPI_FUNC(PyObject *) PyUnstable_Object_GC_NewWithExtraData(PyTypeObject *,
size_t);
/* Visit all live GC-capable objects, similar to gc.get_objects(None). The
* supplied callback is called on every such object with the void* arg set
* to the supplied arg. Returning 0 from the callback ends iteration, returning
* 1 allows iteration to continue. Returning any other value may result in
* undefined behaviour.
*
* If new objects are (de)allocated by the callback it is undefined if they
* will be visited.
* Garbage collection is disabled during operation. Explicitly running a
* collection in the callback may lead to undefined behaviour e.g. visiting the
* same objects multiple times or not at all.
*/
typedef int (*gcvisitobjects_t)(PyObject*, void*);
PyAPI_FUNC(void) PyUnstable_GC_VisitObjects(gcvisitobjects_t callback, void* arg);

View File

@@ -0,0 +1,43 @@
#ifndef Py_ODICTOBJECT_H
#define Py_ODICTOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
/* OrderedDict */
/* This API is optional and mostly redundant. */
#ifndef Py_LIMITED_API
typedef struct _odictobject PyODictObject;
PyAPI_DATA(PyTypeObject) PyODict_Type;
PyAPI_DATA(PyTypeObject) PyODictIter_Type;
PyAPI_DATA(PyTypeObject) PyODictKeys_Type;
PyAPI_DATA(PyTypeObject) PyODictItems_Type;
PyAPI_DATA(PyTypeObject) PyODictValues_Type;
#define PyODict_Check(op) PyObject_TypeCheck((op), &PyODict_Type)
#define PyODict_CheckExact(op) Py_IS_TYPE((op), &PyODict_Type)
#define PyODict_SIZE(op) PyDict_GET_SIZE((op))
PyAPI_FUNC(PyObject *) PyODict_New(void);
PyAPI_FUNC(int) PyODict_SetItem(PyObject *od, PyObject *key, PyObject *item);
PyAPI_FUNC(int) PyODict_DelItem(PyObject *od, PyObject *key);
/* wrappers around PyDict* functions */
#define PyODict_GetItem(od, key) PyDict_GetItem(_PyObject_CAST(od), (key))
#define PyODict_GetItemWithError(od, key) \
PyDict_GetItemWithError(_PyObject_CAST(od), (key))
#define PyODict_Contains(od, key) PyDict_Contains(_PyObject_CAST(od), (key))
#define PyODict_Size(od) PyDict_Size(_PyObject_CAST(od))
#define PyODict_GetItemString(od, key) \
PyDict_GetItemString(_PyObject_CAST(od), (key))
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_ODICTOBJECT_H */

View File

@@ -0,0 +1,31 @@
/* PickleBuffer object. This is built-in for ease of use from third-party
* C extensions.
*/
#ifndef Py_PICKLEBUFOBJECT_H
#define Py_PICKLEBUFOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_LIMITED_API
PyAPI_DATA(PyTypeObject) PyPickleBuffer_Type;
#define PyPickleBuffer_Check(op) Py_IS_TYPE((op), &PyPickleBuffer_Type)
/* Create a PickleBuffer redirecting to the given buffer-enabled object */
PyAPI_FUNC(PyObject *) PyPickleBuffer_FromObject(PyObject *);
/* Get the PickleBuffer's underlying view to the original object
* (NULL if released)
*/
PyAPI_FUNC(const Py_buffer *) PyPickleBuffer_GetBuffer(PyObject *);
/* Release the PickleBuffer. Returns 0 on success, -1 on error. */
PyAPI_FUNC(int) PyPickleBuffer_Release(PyObject *);
#endif /* !Py_LIMITED_API */
#ifdef __cplusplus
}
#endif
#endif /* !Py_PICKLEBUFOBJECT_H */

View File

@@ -0,0 +1,105 @@
#ifndef Py_CPYTHON_PTRHEAD_STUBS_H
#define Py_CPYTHON_PTRHEAD_STUBS_H
#if !defined(HAVE_PTHREAD_STUBS)
# error "this header file requires stubbed pthreads."
#endif
#ifndef _POSIX_THREADS
# define _POSIX_THREADS 1
#endif
/* Minimal pthread stubs for CPython.
*
* The stubs implement the minimum pthread API for CPython.
* - pthread_create() fails.
* - pthread_exit() calls exit(0).
* - pthread_key_*() functions implement minimal TSS without destructor.
* - all other functions do nothing and return 0.
*/
#ifdef __wasi__
// WASI's bits/alltypes.h provides type definitions when __NEED_ is set.
// The header file can be included multiple times.
//
// <sys/types.h> may also define these macros.
# ifndef __NEED_pthread_cond_t
# define __NEED_pthread_cond_t 1
# endif
# ifndef __NEED_pthread_condattr_t
# define __NEED_pthread_condattr_t 1
# endif
# ifndef __NEED_pthread_mutex_t
# define __NEED_pthread_mutex_t 1
# endif
# ifndef __NEED_pthread_mutexattr_t
# define __NEED_pthread_mutexattr_t 1
# endif
# ifndef __NEED_pthread_key_t
# define __NEED_pthread_key_t 1
# endif
# ifndef __NEED_pthread_t
# define __NEED_pthread_t 1
# endif
# ifndef __NEED_pthread_attr_t
# define __NEED_pthread_attr_t 1
# endif
# include <bits/alltypes.h>
#else
typedef struct { void *__x; } pthread_cond_t;
typedef struct { unsigned __attr; } pthread_condattr_t;
typedef struct { void *__x; } pthread_mutex_t;
typedef struct { unsigned __attr; } pthread_mutexattr_t;
typedef unsigned pthread_key_t;
typedef unsigned pthread_t;
typedef struct { unsigned __attr; } pthread_attr_t;
#endif
// mutex
PyAPI_FUNC(int) pthread_mutex_init(pthread_mutex_t *restrict mutex,
const pthread_mutexattr_t *restrict attr);
PyAPI_FUNC(int) pthread_mutex_destroy(pthread_mutex_t *mutex);
PyAPI_FUNC(int) pthread_mutex_trylock(pthread_mutex_t *mutex);
PyAPI_FUNC(int) pthread_mutex_lock(pthread_mutex_t *mutex);
PyAPI_FUNC(int) pthread_mutex_unlock(pthread_mutex_t *mutex);
// condition
PyAPI_FUNC(int) pthread_cond_init(pthread_cond_t *restrict cond,
const pthread_condattr_t *restrict attr);
PyAPI_FUNC(int) pthread_cond_destroy(pthread_cond_t *cond);
PyAPI_FUNC(int) pthread_cond_wait(pthread_cond_t *restrict cond,
pthread_mutex_t *restrict mutex);
PyAPI_FUNC(int) pthread_cond_timedwait(pthread_cond_t *restrict cond,
pthread_mutex_t *restrict mutex,
const struct timespec *restrict abstime);
PyAPI_FUNC(int) pthread_cond_signal(pthread_cond_t *cond);
PyAPI_FUNC(int) pthread_condattr_init(pthread_condattr_t *attr);
PyAPI_FUNC(int) pthread_condattr_setclock(
pthread_condattr_t *attr, clockid_t clock_id);
// pthread
PyAPI_FUNC(int) pthread_create(pthread_t *restrict thread,
const pthread_attr_t *restrict attr,
void *(*start_routine)(void *),
void *restrict arg);
PyAPI_FUNC(int) pthread_detach(pthread_t thread);
PyAPI_FUNC(int) pthread_join(pthread_t thread, void** value_ptr);
PyAPI_FUNC(pthread_t) pthread_self(void);
PyAPI_FUNC(int) pthread_exit(void *retval) __attribute__ ((__noreturn__));
PyAPI_FUNC(int) pthread_attr_init(pthread_attr_t *attr);
PyAPI_FUNC(int) pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize);
PyAPI_FUNC(int) pthread_attr_destroy(pthread_attr_t *attr);
// pthread_key
#ifndef PTHREAD_KEYS_MAX
# define PTHREAD_KEYS_MAX 128
#endif
PyAPI_FUNC(int) pthread_key_create(pthread_key_t *key,
void (*destr_function)(void *));
PyAPI_FUNC(int) pthread_key_delete(pthread_key_t key);
PyAPI_FUNC(void *) pthread_getspecific(pthread_key_t key);
PyAPI_FUNC(int) pthread_setspecific(pthread_key_t key, const void *value);
#endif // Py_CPYTHON_PTRHEAD_STUBS_H

View File

@@ -0,0 +1,569 @@
// This header provides cross-platform low-level atomic operations
// similar to C11 atomics.
//
// Operations are sequentially consistent unless they have a suffix indicating
// otherwise. If in doubt, prefer the sequentially consistent operations.
//
// The "_relaxed" suffix for load and store operations indicates the "relaxed"
// memory order. They don't provide synchronization, but (roughly speaking)
// guarantee somewhat sane behavior for races instead of undefined behavior.
// In practice, they correspond to "normal" hardware load and store
// instructions, so they are almost as inexpensive as plain loads and stores
// in C.
//
// Note that atomic read-modify-write operations like _Py_atomic_add_* return
// the previous value of the atomic variable, not the new value.
//
// See https://en.cppreference.com/w/c/atomic for more information on C11
// atomics.
// See https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf
// "A Relaxed Guide to memory_order_relaxed" for discussion of and common usage
// or relaxed atomics.
//
// Functions with pseudo Python code:
//
// def _Py_atomic_load(obj):
// return obj # sequential consistency
//
// def _Py_atomic_load_relaxed(obj):
// return obj # relaxed consistency
//
// def _Py_atomic_store(obj, value):
// obj = value # sequential consistency
//
// def _Py_atomic_store_relaxed(obj, value):
// obj = value # relaxed consistency
//
// def _Py_atomic_exchange(obj, value):
// # sequential consistency
// old_obj = obj
// obj = value
// return old_obj
//
// def _Py_atomic_compare_exchange(obj, expected, desired):
// # sequential consistency
// if obj == expected:
// obj = desired
// return True
// else:
// expected = obj
// return False
//
// def _Py_atomic_add(obj, value):
// # sequential consistency
// old_obj = obj
// obj += value
// return old_obj
//
// def _Py_atomic_and(obj, value):
// # sequential consistency
// old_obj = obj
// obj &= value
// return old_obj
//
// def _Py_atomic_or(obj, value):
// # sequential consistency
// old_obj = obj
// obj |= value
// return old_obj
//
// Other functions:
//
// def _Py_atomic_load_ptr_acquire(obj):
// return obj # acquire
//
// def _Py_atomic_store_ptr_release(obj):
// return obj # release
//
// def _Py_atomic_fence_seq_cst():
// # sequential consistency
// ...
//
// def _Py_atomic_fence_release():
// # release
// ...
#ifndef Py_CPYTHON_ATOMIC_H
# error "this header file must not be included directly"
#endif
// --- _Py_atomic_add --------------------------------------------------------
// Atomically adds `value` to `obj` and returns the previous value
static inline int
_Py_atomic_add_int(int *obj, int value);
static inline int8_t
_Py_atomic_add_int8(int8_t *obj, int8_t value);
static inline int16_t
_Py_atomic_add_int16(int16_t *obj, int16_t value);
static inline int32_t
_Py_atomic_add_int32(int32_t *obj, int32_t value);
static inline int64_t
_Py_atomic_add_int64(int64_t *obj, int64_t value);
static inline intptr_t
_Py_atomic_add_intptr(intptr_t *obj, intptr_t value);
static inline unsigned int
_Py_atomic_add_uint(unsigned int *obj, unsigned int value);
static inline uint8_t
_Py_atomic_add_uint8(uint8_t *obj, uint8_t value);
static inline uint16_t
_Py_atomic_add_uint16(uint16_t *obj, uint16_t value);
static inline uint32_t
_Py_atomic_add_uint32(uint32_t *obj, uint32_t value);
static inline uint64_t
_Py_atomic_add_uint64(uint64_t *obj, uint64_t value);
static inline uintptr_t
_Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value);
static inline Py_ssize_t
_Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value);
// --- _Py_atomic_compare_exchange -------------------------------------------
// Performs an atomic compare-and-exchange.
//
// - If `*obj` and `*expected` are equal, store `desired` into `*obj`
// and return 1 (success).
// - Otherwise, store the `*obj` current value into `*expected`
// and return 0 (failure).
//
// These correspond to the C11 atomic_compare_exchange_strong() function.
static inline int
_Py_atomic_compare_exchange_int(int *obj, int *expected, int desired);
static inline int
_Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t desired);
static inline int
_Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t desired);
static inline int
_Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t desired);
static inline int
_Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t desired);
static inline int
_Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t desired);
static inline int
_Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int desired);
static inline int
_Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t desired);
static inline int
_Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t desired);
static inline int
_Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t desired);
static inline int
_Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t desired);
static inline int
_Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t desired);
static inline int
_Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t desired);
// NOTE: `obj` and `expected` are logically `void**` types, but we use `void*`
// so that we can pass types like `PyObject**` without a cast.
static inline int
_Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *value);
// --- _Py_atomic_exchange ---------------------------------------------------
// Atomically replaces `*obj` with `value` and returns the previous value of `*obj`.
static inline int
_Py_atomic_exchange_int(int *obj, int value);
static inline int8_t
_Py_atomic_exchange_int8(int8_t *obj, int8_t value);
static inline int16_t
_Py_atomic_exchange_int16(int16_t *obj, int16_t value);
static inline int32_t
_Py_atomic_exchange_int32(int32_t *obj, int32_t value);
static inline int64_t
_Py_atomic_exchange_int64(int64_t *obj, int64_t value);
static inline intptr_t
_Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value);
static inline unsigned int
_Py_atomic_exchange_uint(unsigned int *obj, unsigned int value);
static inline uint8_t
_Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value);
static inline uint16_t
_Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value);
static inline uint32_t
_Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value);
static inline uint64_t
_Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value);
static inline uintptr_t
_Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value);
static inline Py_ssize_t
_Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value);
static inline void *
_Py_atomic_exchange_ptr(void *obj, void *value);
// --- _Py_atomic_and --------------------------------------------------------
// Performs `*obj &= value` atomically and returns the previous value of `*obj`.
static inline uint8_t
_Py_atomic_and_uint8(uint8_t *obj, uint8_t value);
static inline uint16_t
_Py_atomic_and_uint16(uint16_t *obj, uint16_t value);
static inline uint32_t
_Py_atomic_and_uint32(uint32_t *obj, uint32_t value);
static inline uint64_t
_Py_atomic_and_uint64(uint64_t *obj, uint64_t value);
static inline uintptr_t
_Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value);
// --- _Py_atomic_or ---------------------------------------------------------
// Performs `*obj |= value` atomically and returns the previous value of `*obj`.
static inline uint8_t
_Py_atomic_or_uint8(uint8_t *obj, uint8_t value);
static inline uint16_t
_Py_atomic_or_uint16(uint16_t *obj, uint16_t value);
static inline uint32_t
_Py_atomic_or_uint32(uint32_t *obj, uint32_t value);
static inline uint64_t
_Py_atomic_or_uint64(uint64_t *obj, uint64_t value);
static inline uintptr_t
_Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value);
// --- _Py_atomic_load -------------------------------------------------------
// Atomically loads `*obj` (sequential consistency)
static inline int
_Py_atomic_load_int(const int *obj);
static inline int8_t
_Py_atomic_load_int8(const int8_t *obj);
static inline int16_t
_Py_atomic_load_int16(const int16_t *obj);
static inline int32_t
_Py_atomic_load_int32(const int32_t *obj);
static inline int64_t
_Py_atomic_load_int64(const int64_t *obj);
static inline intptr_t
_Py_atomic_load_intptr(const intptr_t *obj);
static inline uint8_t
_Py_atomic_load_uint8(const uint8_t *obj);
static inline uint16_t
_Py_atomic_load_uint16(const uint16_t *obj);
static inline uint32_t
_Py_atomic_load_uint32(const uint32_t *obj);
static inline uint64_t
_Py_atomic_load_uint64(const uint64_t *obj);
static inline uintptr_t
_Py_atomic_load_uintptr(const uintptr_t *obj);
static inline unsigned int
_Py_atomic_load_uint(const unsigned int *obj);
static inline Py_ssize_t
_Py_atomic_load_ssize(const Py_ssize_t *obj);
static inline void *
_Py_atomic_load_ptr(const void *obj);
// --- _Py_atomic_load_relaxed -----------------------------------------------
// Loads `*obj` (relaxed consistency, i.e., no ordering)
static inline int
_Py_atomic_load_int_relaxed(const int *obj);
static inline int8_t
_Py_atomic_load_int8_relaxed(const int8_t *obj);
static inline int16_t
_Py_atomic_load_int16_relaxed(const int16_t *obj);
static inline int32_t
_Py_atomic_load_int32_relaxed(const int32_t *obj);
static inline int64_t
_Py_atomic_load_int64_relaxed(const int64_t *obj);
static inline intptr_t
_Py_atomic_load_intptr_relaxed(const intptr_t *obj);
static inline uint8_t
_Py_atomic_load_uint8_relaxed(const uint8_t *obj);
static inline uint16_t
_Py_atomic_load_uint16_relaxed(const uint16_t *obj);
static inline uint32_t
_Py_atomic_load_uint32_relaxed(const uint32_t *obj);
static inline uint64_t
_Py_atomic_load_uint64_relaxed(const uint64_t *obj);
static inline uintptr_t
_Py_atomic_load_uintptr_relaxed(const uintptr_t *obj);
static inline unsigned int
_Py_atomic_load_uint_relaxed(const unsigned int *obj);
static inline Py_ssize_t
_Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj);
static inline void *
_Py_atomic_load_ptr_relaxed(const void *obj);
static inline unsigned long long
_Py_atomic_load_ullong_relaxed(const unsigned long long *obj);
// --- _Py_atomic_store ------------------------------------------------------
// Atomically performs `*obj = value` (sequential consistency)
static inline void
_Py_atomic_store_int(int *obj, int value);
static inline void
_Py_atomic_store_int8(int8_t *obj, int8_t value);
static inline void
_Py_atomic_store_int16(int16_t *obj, int16_t value);
static inline void
_Py_atomic_store_int32(int32_t *obj, int32_t value);
static inline void
_Py_atomic_store_int64(int64_t *obj, int64_t value);
static inline void
_Py_atomic_store_intptr(intptr_t *obj, intptr_t value);
static inline void
_Py_atomic_store_uint8(uint8_t *obj, uint8_t value);
static inline void
_Py_atomic_store_uint16(uint16_t *obj, uint16_t value);
static inline void
_Py_atomic_store_uint32(uint32_t *obj, uint32_t value);
static inline void
_Py_atomic_store_uint64(uint64_t *obj, uint64_t value);
static inline void
_Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value);
static inline void
_Py_atomic_store_uint(unsigned int *obj, unsigned int value);
static inline void
_Py_atomic_store_ptr(void *obj, void *value);
static inline void
_Py_atomic_store_ssize(Py_ssize_t* obj, Py_ssize_t value);
// --- _Py_atomic_store_relaxed ----------------------------------------------
// Stores `*obj = value` (relaxed consistency, i.e., no ordering)
static inline void
_Py_atomic_store_int_relaxed(int *obj, int value);
static inline void
_Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value);
static inline void
_Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value);
static inline void
_Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value);
static inline void
_Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value);
static inline void
_Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value);
static inline void
_Py_atomic_store_uint8_relaxed(uint8_t* obj, uint8_t value);
static inline void
_Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value);
static inline void
_Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value);
static inline void
_Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value);
static inline void
_Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value);
static inline void
_Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value);
static inline void
_Py_atomic_store_ptr_relaxed(void *obj, void *value);
static inline void
_Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value);
static inline void
_Py_atomic_store_ullong_relaxed(unsigned long long *obj,
unsigned long long value);
// --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------
// Loads `*obj` (acquire operation)
static inline void *
_Py_atomic_load_ptr_acquire(const void *obj);
static inline uintptr_t
_Py_atomic_load_uintptr_acquire(const uintptr_t *obj);
// Stores `*obj = value` (release operation)
static inline void
_Py_atomic_store_ptr_release(void *obj, void *value);
static inline void
_Py_atomic_store_uintptr_release(uintptr_t *obj, uintptr_t value);
static inline void
_Py_atomic_store_ssize_release(Py_ssize_t *obj, Py_ssize_t value);
static inline void
_Py_atomic_store_int_release(int *obj, int value);
static inline int
_Py_atomic_load_int_acquire(const int *obj);
static inline void
_Py_atomic_store_uint32_release(uint32_t *obj, uint32_t value);
static inline void
_Py_atomic_store_uint64_release(uint64_t *obj, uint64_t value);
static inline uint64_t
_Py_atomic_load_uint64_acquire(const uint64_t *obj);
static inline uint32_t
_Py_atomic_load_uint32_acquire(const uint32_t *obj);
static inline Py_ssize_t
_Py_atomic_load_ssize_acquire(const Py_ssize_t *obj);
// --- _Py_atomic_fence ------------------------------------------------------
// Sequential consistency fence. C11 fences have complex semantics. When
// possible, use the atomic operations on variables defined above, which
// generally do not require explicit use of a fence.
// See https://en.cppreference.com/w/cpp/atomic/atomic_thread_fence
static inline void _Py_atomic_fence_seq_cst(void);
// Acquire fence
static inline void _Py_atomic_fence_acquire(void);
// Release fence
static inline void _Py_atomic_fence_release(void);
#ifndef _Py_USE_GCC_BUILTIN_ATOMICS
# if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
# define _Py_USE_GCC_BUILTIN_ATOMICS 1
# elif defined(__clang__)
# if __has_builtin(__atomic_load)
# define _Py_USE_GCC_BUILTIN_ATOMICS 1
# endif
# endif
#endif
#if _Py_USE_GCC_BUILTIN_ATOMICS
# define Py_ATOMIC_GCC_H
# include "pyatomic_gcc.h"
# undef Py_ATOMIC_GCC_H
#elif __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__)
# define Py_ATOMIC_STD_H
# include "pyatomic_std.h"
# undef Py_ATOMIC_STD_H
#elif defined(_MSC_VER)
# define Py_ATOMIC_MSC_H
# include "pyatomic_msc.h"
# undef Py_ATOMIC_MSC_H
#else
# error "no available pyatomic implementation for this platform/compiler"
#endif
// --- aliases ---------------------------------------------------------------
#if SIZEOF_LONG == 8
# define _Py_atomic_load_ulong(p) \
_Py_atomic_load_uint64((uint64_t *)p)
# define _Py_atomic_load_ulong_relaxed(p) \
_Py_atomic_load_uint64_relaxed((uint64_t *)p)
# define _Py_atomic_store_ulong(p, v) \
_Py_atomic_store_uint64((uint64_t *)p, v)
# define _Py_atomic_store_ulong_relaxed(p, v) \
_Py_atomic_store_uint64_relaxed((uint64_t *)p, v)
#elif SIZEOF_LONG == 4
# define _Py_atomic_load_ulong(p) \
_Py_atomic_load_uint32((uint32_t *)p)
# define _Py_atomic_load_ulong_relaxed(p) \
_Py_atomic_load_uint32_relaxed((uint32_t *)p)
# define _Py_atomic_store_ulong(p, v) \
_Py_atomic_store_uint32((uint32_t *)p, v)
# define _Py_atomic_store_ulong_relaxed(p, v) \
_Py_atomic_store_uint32_relaxed((uint32_t *)p, v)
#else
# error "long must be 4 or 8 bytes in size"
#endif // SIZEOF_LONG

View File

@@ -0,0 +1,551 @@
// This is the implementation of Python atomic operations using GCC's built-in
// functions that match the C+11 memory model. This implementation is preferred
// for GCC compatible compilers, such as Clang. These functions are available
// in GCC 4.8+ without needing to compile with --std=c11 or --std=gnu11.
#ifndef Py_ATOMIC_GCC_H
# error "this header file must not be included directly"
#endif
// --- _Py_atomic_add --------------------------------------------------------
static inline int
_Py_atomic_add_int(int *obj, int value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline int8_t
_Py_atomic_add_int8(int8_t *obj, int8_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline int16_t
_Py_atomic_add_int16(int16_t *obj, int16_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline int32_t
_Py_atomic_add_int32(int32_t *obj, int32_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline int64_t
_Py_atomic_add_int64(int64_t *obj, int64_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline intptr_t
_Py_atomic_add_intptr(intptr_t *obj, intptr_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline unsigned int
_Py_atomic_add_uint(unsigned int *obj, unsigned int value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uint8_t
_Py_atomic_add_uint8(uint8_t *obj, uint8_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_add_uint16(uint16_t *obj, uint16_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_add_uint32(uint32_t *obj, uint32_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_add_uint64(uint64_t *obj, uint64_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
static inline Py_ssize_t
_Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value)
{ return __atomic_fetch_add(obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_compare_exchange -------------------------------------------
static inline int
_Py_atomic_compare_exchange_int(int *obj, int *expected, int desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t desired)
{ return __atomic_compare_exchange_n(obj, expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
static inline int
_Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *desired)
{ return __atomic_compare_exchange_n((void **)obj, (void **)expected, desired, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_exchange ---------------------------------------------------
static inline int
_Py_atomic_exchange_int(int *obj, int value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline int8_t
_Py_atomic_exchange_int8(int8_t *obj, int8_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline int16_t
_Py_atomic_exchange_int16(int16_t *obj, int16_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline int32_t
_Py_atomic_exchange_int32(int32_t *obj, int32_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline int64_t
_Py_atomic_exchange_int64(int64_t *obj, int64_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline intptr_t
_Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline unsigned int
_Py_atomic_exchange_uint(unsigned int *obj, unsigned int value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uint8_t
_Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline Py_ssize_t
_Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value)
{ return __atomic_exchange_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void *
_Py_atomic_exchange_ptr(void *obj, void *value)
{ return __atomic_exchange_n((void **)obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_and --------------------------------------------------------
static inline uint8_t
_Py_atomic_and_uint8(uint8_t *obj, uint8_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_and_uint16(uint16_t *obj, uint16_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_and_uint32(uint32_t *obj, uint32_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_and_uint64(uint64_t *obj, uint64_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value)
{ return __atomic_fetch_and(obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_or ---------------------------------------------------------
static inline uint8_t
_Py_atomic_or_uint8(uint8_t *obj, uint8_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_or_uint16(uint16_t *obj, uint16_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_or_uint32(uint32_t *obj, uint32_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_or_uint64(uint64_t *obj, uint64_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value)
{ return __atomic_fetch_or(obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_load -------------------------------------------------------
static inline int
_Py_atomic_load_int(const int *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline int8_t
_Py_atomic_load_int8(const int8_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline int16_t
_Py_atomic_load_int16(const int16_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline int32_t
_Py_atomic_load_int32(const int32_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline int64_t
_Py_atomic_load_int64(const int64_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline intptr_t
_Py_atomic_load_intptr(const intptr_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uint8_t
_Py_atomic_load_uint8(const uint8_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uint16_t
_Py_atomic_load_uint16(const uint16_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uint32_t
_Py_atomic_load_uint32(const uint32_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uint64_t
_Py_atomic_load_uint64(const uint64_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline uintptr_t
_Py_atomic_load_uintptr(const uintptr_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline unsigned int
_Py_atomic_load_uint(const unsigned int *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline Py_ssize_t
_Py_atomic_load_ssize(const Py_ssize_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_SEQ_CST); }
static inline void *
_Py_atomic_load_ptr(const void *obj)
{ return (void *)__atomic_load_n((void * const *)obj, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_load_relaxed -----------------------------------------------
static inline int
_Py_atomic_load_int_relaxed(const int *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline int8_t
_Py_atomic_load_int8_relaxed(const int8_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline int16_t
_Py_atomic_load_int16_relaxed(const int16_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline int32_t
_Py_atomic_load_int32_relaxed(const int32_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline int64_t
_Py_atomic_load_int64_relaxed(const int64_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline intptr_t
_Py_atomic_load_intptr_relaxed(const intptr_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uint8_t
_Py_atomic_load_uint8_relaxed(const uint8_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uint16_t
_Py_atomic_load_uint16_relaxed(const uint16_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uint32_t
_Py_atomic_load_uint32_relaxed(const uint32_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uint64_t
_Py_atomic_load_uint64_relaxed(const uint64_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline uintptr_t
_Py_atomic_load_uintptr_relaxed(const uintptr_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline unsigned int
_Py_atomic_load_uint_relaxed(const unsigned int *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline Py_ssize_t
_Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
static inline void *
_Py_atomic_load_ptr_relaxed(const void *obj)
{ return (void *)__atomic_load_n((void * const *)obj, __ATOMIC_RELAXED); }
static inline unsigned long long
_Py_atomic_load_ullong_relaxed(const unsigned long long *obj)
{ return __atomic_load_n(obj, __ATOMIC_RELAXED); }
// --- _Py_atomic_store ------------------------------------------------------
static inline void
_Py_atomic_store_int(int *obj, int value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_int8(int8_t *obj, int8_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_int16(int16_t *obj, int16_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_int32(int32_t *obj, int32_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_int64(int64_t *obj, int64_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_intptr(intptr_t *obj, intptr_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint8(uint8_t *obj, uint8_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint16(uint16_t *obj, uint16_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint32(uint32_t *obj, uint32_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint64(uint64_t *obj, uint64_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_uint(unsigned int *obj, unsigned int value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_ptr(void *obj, void *value)
{ __atomic_store_n((void **)obj, value, __ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_store_ssize(Py_ssize_t *obj, Py_ssize_t value)
{ __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); }
// --- _Py_atomic_store_relaxed ----------------------------------------------
static inline void
_Py_atomic_store_int_relaxed(int *obj, int value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint8_relaxed(uint8_t *obj, uint8_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_ptr_relaxed(void *obj, void *value)
{ __atomic_store_n((void **)obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
static inline void
_Py_atomic_store_ullong_relaxed(unsigned long long *obj,
unsigned long long value)
{ __atomic_store_n(obj, value, __ATOMIC_RELAXED); }
// --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------
static inline void *
_Py_atomic_load_ptr_acquire(const void *obj)
{ return (void *)__atomic_load_n((void * const *)obj, __ATOMIC_ACQUIRE); }
static inline uintptr_t
_Py_atomic_load_uintptr_acquire(const uintptr_t *obj)
{ return (uintptr_t)__atomic_load_n(obj, __ATOMIC_ACQUIRE); }
static inline void
_Py_atomic_store_ptr_release(void *obj, void *value)
{ __atomic_store_n((void **)obj, value, __ATOMIC_RELEASE); }
static inline void
_Py_atomic_store_uintptr_release(uintptr_t *obj, uintptr_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }
static inline void
_Py_atomic_store_int_release(int *obj, int value)
{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }
static inline void
_Py_atomic_store_ssize_release(Py_ssize_t *obj, Py_ssize_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }
static inline int
_Py_atomic_load_int_acquire(const int *obj)
{ return __atomic_load_n(obj, __ATOMIC_ACQUIRE); }
static inline void
_Py_atomic_store_uint32_release(uint32_t *obj, uint32_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }
static inline void
_Py_atomic_store_uint64_release(uint64_t *obj, uint64_t value)
{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }
static inline uint64_t
_Py_atomic_load_uint64_acquire(const uint64_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_ACQUIRE); }
static inline uint32_t
_Py_atomic_load_uint32_acquire(const uint32_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_ACQUIRE); }
static inline Py_ssize_t
_Py_atomic_load_ssize_acquire(const Py_ssize_t *obj)
{ return __atomic_load_n(obj, __ATOMIC_ACQUIRE); }
// --- _Py_atomic_fence ------------------------------------------------------
static inline void
_Py_atomic_fence_seq_cst(void)
{ __atomic_thread_fence(__ATOMIC_SEQ_CST); }
static inline void
_Py_atomic_fence_acquire(void)
{ __atomic_thread_fence(__ATOMIC_ACQUIRE); }
static inline void
_Py_atomic_fence_release(void)
{ __atomic_thread_fence(__ATOMIC_RELEASE); }

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,976 @@
// This is the implementation of Python atomic operations using C++11 or C11
// atomics. Note that the pyatomic_gcc.h implementation is preferred for GCC
// compatible compilers, even if they support C++11 atomics.
#ifndef Py_ATOMIC_STD_H
# error "this header file must not be included directly"
#endif
#ifdef __cplusplus
extern "C++" {
# include <atomic>
}
# define _Py_USING_STD using namespace std
# define _Atomic(tp) atomic<tp>
#else
# define _Py_USING_STD
# include <stdatomic.h>
#endif
// --- _Py_atomic_add --------------------------------------------------------
static inline int
_Py_atomic_add_int(int *obj, int value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int)*)obj, value);
}
static inline int8_t
_Py_atomic_add_int8(int8_t *obj, int8_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int8_t)*)obj, value);
}
static inline int16_t
_Py_atomic_add_int16(int16_t *obj, int16_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int16_t)*)obj, value);
}
static inline int32_t
_Py_atomic_add_int32(int32_t *obj, int32_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int32_t)*)obj, value);
}
static inline int64_t
_Py_atomic_add_int64(int64_t *obj, int64_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(int64_t)*)obj, value);
}
static inline intptr_t
_Py_atomic_add_intptr(intptr_t *obj, intptr_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(intptr_t)*)obj, value);
}
static inline unsigned int
_Py_atomic_add_uint(unsigned int *obj, unsigned int value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(unsigned int)*)obj, value);
}
static inline uint8_t
_Py_atomic_add_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uint8_t)*)obj, value);
}
static inline uint16_t
_Py_atomic_add_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uint16_t)*)obj, value);
}
static inline uint32_t
_Py_atomic_add_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uint32_t)*)obj, value);
}
static inline uint64_t
_Py_atomic_add_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uint64_t)*)obj, value);
}
static inline uintptr_t
_Py_atomic_add_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(uintptr_t)*)obj, value);
}
static inline Py_ssize_t
_Py_atomic_add_ssize(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_USING_STD;
return atomic_fetch_add((_Atomic(Py_ssize_t)*)obj, value);
}
// --- _Py_atomic_compare_exchange -------------------------------------------
static inline int
_Py_atomic_compare_exchange_int(int *obj, int *expected, int desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_int8(int8_t *obj, int8_t *expected, int8_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int8_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_int16(int16_t *obj, int16_t *expected, int16_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int16_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_int32(int32_t *obj, int32_t *expected, int32_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int32_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_int64(int64_t *obj, int64_t *expected, int64_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(int64_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_intptr(intptr_t *obj, intptr_t *expected, intptr_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(intptr_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint(unsigned int *obj, unsigned int *expected, unsigned int desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(unsigned int)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint8(uint8_t *obj, uint8_t *expected, uint8_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uint8_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint16(uint16_t *obj, uint16_t *expected, uint16_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uint16_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint32(uint32_t *obj, uint32_t *expected, uint32_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uint32_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uint64(uint64_t *obj, uint64_t *expected, uint64_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uint64_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_uintptr(uintptr_t *obj, uintptr_t *expected, uintptr_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(uintptr_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_ssize(Py_ssize_t *obj, Py_ssize_t *expected, Py_ssize_t desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(Py_ssize_t)*)obj,
expected, desired);
}
static inline int
_Py_atomic_compare_exchange_ptr(void *obj, void *expected, void *desired)
{
_Py_USING_STD;
return atomic_compare_exchange_strong((_Atomic(void *)*)obj,
(void **)expected, desired);
}
// --- _Py_atomic_exchange ---------------------------------------------------
static inline int
_Py_atomic_exchange_int(int *obj, int value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int)*)obj, value);
}
static inline int8_t
_Py_atomic_exchange_int8(int8_t *obj, int8_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int8_t)*)obj, value);
}
static inline int16_t
_Py_atomic_exchange_int16(int16_t *obj, int16_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int16_t)*)obj, value);
}
static inline int32_t
_Py_atomic_exchange_int32(int32_t *obj, int32_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int32_t)*)obj, value);
}
static inline int64_t
_Py_atomic_exchange_int64(int64_t *obj, int64_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(int64_t)*)obj, value);
}
static inline intptr_t
_Py_atomic_exchange_intptr(intptr_t *obj, intptr_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(intptr_t)*)obj, value);
}
static inline unsigned int
_Py_atomic_exchange_uint(unsigned int *obj, unsigned int value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(unsigned int)*)obj, value);
}
static inline uint8_t
_Py_atomic_exchange_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uint8_t)*)obj, value);
}
static inline uint16_t
_Py_atomic_exchange_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uint16_t)*)obj, value);
}
static inline uint32_t
_Py_atomic_exchange_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uint32_t)*)obj, value);
}
static inline uint64_t
_Py_atomic_exchange_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uint64_t)*)obj, value);
}
static inline uintptr_t
_Py_atomic_exchange_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(uintptr_t)*)obj, value);
}
static inline Py_ssize_t
_Py_atomic_exchange_ssize(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(Py_ssize_t)*)obj, value);
}
static inline void*
_Py_atomic_exchange_ptr(void *obj, void *value)
{
_Py_USING_STD;
return atomic_exchange((_Atomic(void *)*)obj, value);
}
// --- _Py_atomic_and --------------------------------------------------------
static inline uint8_t
_Py_atomic_and_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uint8_t)*)obj, value);
}
static inline uint16_t
_Py_atomic_and_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uint16_t)*)obj, value);
}
static inline uint32_t
_Py_atomic_and_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uint32_t)*)obj, value);
}
static inline uint64_t
_Py_atomic_and_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uint64_t)*)obj, value);
}
static inline uintptr_t
_Py_atomic_and_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
return atomic_fetch_and((_Atomic(uintptr_t)*)obj, value);
}
// --- _Py_atomic_or ---------------------------------------------------------
static inline uint8_t
_Py_atomic_or_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uint8_t)*)obj, value);
}
static inline uint16_t
_Py_atomic_or_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uint16_t)*)obj, value);
}
static inline uint32_t
_Py_atomic_or_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uint32_t)*)obj, value);
}
static inline uint64_t
_Py_atomic_or_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uint64_t)*)obj, value);
}
static inline uintptr_t
_Py_atomic_or_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
return atomic_fetch_or((_Atomic(uintptr_t)*)obj, value);
}
// --- _Py_atomic_load -------------------------------------------------------
static inline int
_Py_atomic_load_int(const int *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int)*)obj);
}
static inline int8_t
_Py_atomic_load_int8(const int8_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int8_t)*)obj);
}
static inline int16_t
_Py_atomic_load_int16(const int16_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int16_t)*)obj);
}
static inline int32_t
_Py_atomic_load_int32(const int32_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int32_t)*)obj);
}
static inline int64_t
_Py_atomic_load_int64(const int64_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(int64_t)*)obj);
}
static inline intptr_t
_Py_atomic_load_intptr(const intptr_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(intptr_t)*)obj);
}
static inline uint8_t
_Py_atomic_load_uint8(const uint8_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uint8_t)*)obj);
}
static inline uint16_t
_Py_atomic_load_uint16(const uint16_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uint32_t)*)obj);
}
static inline uint32_t
_Py_atomic_load_uint32(const uint32_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uint32_t)*)obj);
}
static inline uint64_t
_Py_atomic_load_uint64(const uint64_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uint64_t)*)obj);
}
static inline uintptr_t
_Py_atomic_load_uintptr(const uintptr_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(uintptr_t)*)obj);
}
static inline unsigned int
_Py_atomic_load_uint(const unsigned int *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(unsigned int)*)obj);
}
static inline Py_ssize_t
_Py_atomic_load_ssize(const Py_ssize_t *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(Py_ssize_t)*)obj);
}
static inline void*
_Py_atomic_load_ptr(const void *obj)
{
_Py_USING_STD;
return atomic_load((const _Atomic(void*)*)obj);
}
// --- _Py_atomic_load_relaxed -----------------------------------------------
static inline int
_Py_atomic_load_int_relaxed(const int *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int)*)obj,
memory_order_relaxed);
}
static inline int8_t
_Py_atomic_load_int8_relaxed(const int8_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int8_t)*)obj,
memory_order_relaxed);
}
static inline int16_t
_Py_atomic_load_int16_relaxed(const int16_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int16_t)*)obj,
memory_order_relaxed);
}
static inline int32_t
_Py_atomic_load_int32_relaxed(const int32_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int32_t)*)obj,
memory_order_relaxed);
}
static inline int64_t
_Py_atomic_load_int64_relaxed(const int64_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int64_t)*)obj,
memory_order_relaxed);
}
static inline intptr_t
_Py_atomic_load_intptr_relaxed(const intptr_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(intptr_t)*)obj,
memory_order_relaxed);
}
static inline uint8_t
_Py_atomic_load_uint8_relaxed(const uint8_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint8_t)*)obj,
memory_order_relaxed);
}
static inline uint16_t
_Py_atomic_load_uint16_relaxed(const uint16_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint16_t)*)obj,
memory_order_relaxed);
}
static inline uint32_t
_Py_atomic_load_uint32_relaxed(const uint32_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint32_t)*)obj,
memory_order_relaxed);
}
static inline uint64_t
_Py_atomic_load_uint64_relaxed(const uint64_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint64_t)*)obj,
memory_order_relaxed);
}
static inline uintptr_t
_Py_atomic_load_uintptr_relaxed(const uintptr_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uintptr_t)*)obj,
memory_order_relaxed);
}
static inline unsigned int
_Py_atomic_load_uint_relaxed(const unsigned int *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(unsigned int)*)obj,
memory_order_relaxed);
}
static inline Py_ssize_t
_Py_atomic_load_ssize_relaxed(const Py_ssize_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(Py_ssize_t)*)obj,
memory_order_relaxed);
}
static inline void*
_Py_atomic_load_ptr_relaxed(const void *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(void*)*)obj,
memory_order_relaxed);
}
static inline unsigned long long
_Py_atomic_load_ullong_relaxed(const unsigned long long *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(unsigned long long)*)obj,
memory_order_relaxed);
}
// --- _Py_atomic_store ------------------------------------------------------
static inline void
_Py_atomic_store_int(int *obj, int value)
{
_Py_USING_STD;
atomic_store((_Atomic(int)*)obj, value);
}
static inline void
_Py_atomic_store_int8(int8_t *obj, int8_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(int8_t)*)obj, value);
}
static inline void
_Py_atomic_store_int16(int16_t *obj, int16_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(int16_t)*)obj, value);
}
static inline void
_Py_atomic_store_int32(int32_t *obj, int32_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(int32_t)*)obj, value);
}
static inline void
_Py_atomic_store_int64(int64_t *obj, int64_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(int64_t)*)obj, value);
}
static inline void
_Py_atomic_store_intptr(intptr_t *obj, intptr_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(intptr_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint8(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uint8_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint16(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uint16_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint32(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uint32_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint64(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uint64_t)*)obj, value);
}
static inline void
_Py_atomic_store_uintptr(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(uintptr_t)*)obj, value);
}
static inline void
_Py_atomic_store_uint(unsigned int *obj, unsigned int value)
{
_Py_USING_STD;
atomic_store((_Atomic(unsigned int)*)obj, value);
}
static inline void
_Py_atomic_store_ptr(void *obj, void *value)
{
_Py_USING_STD;
atomic_store((_Atomic(void*)*)obj, value);
}
static inline void
_Py_atomic_store_ssize(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_USING_STD;
atomic_store((_Atomic(Py_ssize_t)*)obj, value);
}
// --- _Py_atomic_store_relaxed ----------------------------------------------
static inline void
_Py_atomic_store_int_relaxed(int *obj, int value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_int8_relaxed(int8_t *obj, int8_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int8_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_int16_relaxed(int16_t *obj, int16_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int16_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_int32_relaxed(int32_t *obj, int32_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int32_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_int64_relaxed(int64_t *obj, int64_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int64_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_intptr_relaxed(intptr_t *obj, intptr_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(intptr_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint8_relaxed(uint8_t *obj, uint8_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint8_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint16_relaxed(uint16_t *obj, uint16_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint16_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint32_relaxed(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint32_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint64_relaxed(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint64_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uintptr_relaxed(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uintptr_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_uint_relaxed(unsigned int *obj, unsigned int value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(unsigned int)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_ptr_relaxed(void *obj, void *value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(void*)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_ssize_relaxed(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(Py_ssize_t)*)obj, value,
memory_order_relaxed);
}
static inline void
_Py_atomic_store_ullong_relaxed(unsigned long long *obj,
unsigned long long value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(unsigned long long)*)obj, value,
memory_order_relaxed);
}
// --- _Py_atomic_load_ptr_acquire / _Py_atomic_store_ptr_release ------------
static inline void *
_Py_atomic_load_ptr_acquire(const void *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(void*)*)obj,
memory_order_acquire);
}
static inline uintptr_t
_Py_atomic_load_uintptr_acquire(const uintptr_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uintptr_t)*)obj,
memory_order_acquire);
}
static inline void
_Py_atomic_store_ptr_release(void *obj, void *value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(void*)*)obj, value,
memory_order_release);
}
static inline void
_Py_atomic_store_uintptr_release(uintptr_t *obj, uintptr_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uintptr_t)*)obj, value,
memory_order_release);
}
static inline void
_Py_atomic_store_int_release(int *obj, int value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int)*)obj, value,
memory_order_release);
}
static inline void
_Py_atomic_store_ssize_release(Py_ssize_t *obj, Py_ssize_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(Py_ssize_t)*)obj, value,
memory_order_release);
}
static inline int
_Py_atomic_load_int_acquire(const int *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int)*)obj,
memory_order_acquire);
}
static inline void
_Py_atomic_store_uint32_release(uint32_t *obj, uint32_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint32_t)*)obj, value,
memory_order_release);
}
static inline void
_Py_atomic_store_uint64_release(uint64_t *obj, uint64_t value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(uint64_t)*)obj, value,
memory_order_release);
}
static inline uint64_t
_Py_atomic_load_uint64_acquire(const uint64_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint64_t)*)obj,
memory_order_acquire);
}
static inline uint32_t
_Py_atomic_load_uint32_acquire(const uint32_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(uint32_t)*)obj,
memory_order_acquire);
}
static inline Py_ssize_t
_Py_atomic_load_ssize_acquire(const Py_ssize_t *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(Py_ssize_t)*)obj,
memory_order_acquire);
}
// --- _Py_atomic_fence ------------------------------------------------------
static inline void
_Py_atomic_fence_seq_cst(void)
{
_Py_USING_STD;
atomic_thread_fence(memory_order_seq_cst);
}
static inline void
_Py_atomic_fence_acquire(void)
{
_Py_USING_STD;
atomic_thread_fence(memory_order_acquire);
}
static inline void
_Py_atomic_fence_release(void)
{
_Py_USING_STD;
atomic_thread_fence(memory_order_release);
}

View File

@@ -0,0 +1,39 @@
#ifndef Py_LIMITED_API
#ifndef PYCTYPE_H
#define PYCTYPE_H
#ifdef __cplusplus
extern "C" {
#endif
#define PY_CTF_LOWER 0x01
#define PY_CTF_UPPER 0x02
#define PY_CTF_ALPHA (PY_CTF_LOWER|PY_CTF_UPPER)
#define PY_CTF_DIGIT 0x04
#define PY_CTF_ALNUM (PY_CTF_ALPHA|PY_CTF_DIGIT)
#define PY_CTF_SPACE 0x08
#define PY_CTF_XDIGIT 0x10
PyAPI_DATA(const unsigned int) _Py_ctype_table[256];
/* Unlike their C counterparts, the following macros are not meant to
* handle an int with any of the values [EOF, 0-UCHAR_MAX]. The argument
* must be a signed/unsigned char. */
#define Py_ISLOWER(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_LOWER)
#define Py_ISUPPER(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_UPPER)
#define Py_ISALPHA(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_ALPHA)
#define Py_ISDIGIT(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_DIGIT)
#define Py_ISXDIGIT(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_XDIGIT)
#define Py_ISALNUM(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_ALNUM)
#define Py_ISSPACE(c) (_Py_ctype_table[Py_CHARMASK(c)] & PY_CTF_SPACE)
PyAPI_DATA(const unsigned char) _Py_ctype_tolower[256];
PyAPI_DATA(const unsigned char) _Py_ctype_toupper[256];
#define Py_TOLOWER(c) (_Py_ctype_tolower[Py_CHARMASK(c)])
#define Py_TOUPPER(c) (_Py_ctype_toupper[Py_CHARMASK(c)])
#ifdef __cplusplus
}
#endif
#endif /* !PYCTYPE_H */
#endif /* !Py_LIMITED_API */

View File

@@ -0,0 +1,38 @@
#ifndef Py_LIMITED_API
#ifndef Py_PYDEBUG_H
#define Py_PYDEBUG_H
#ifdef __cplusplus
extern "C" {
#endif
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_DebugFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_VerboseFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_QuietFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_InteractiveFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_InspectFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_OptimizeFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_NoSiteFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_BytesWarningFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_FrozenFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_IgnoreEnvironmentFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_DontWriteBytecodeFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_NoUserSiteDirectory;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_UnbufferedStdioFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_HashRandomizationFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_IsolatedFlag;
#ifdef MS_WINDOWS
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_LegacyWindowsFSEncodingFlag;
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_LegacyWindowsStdioFlag;
#endif
/* this is a wrapper around getenv() that pays attention to
Py_IgnoreEnvironmentFlag. It should be used for getting variables like
PYTHONPATH and PYTHONHOME from the environment */
PyAPI_FUNC(char*) Py_GETENV(const char *name);
#ifdef __cplusplus
}
#endif
#endif /* !Py_PYDEBUG_H */
#endif /* Py_LIMITED_API */

View File

@@ -0,0 +1,131 @@
#ifndef Py_CPYTHON_ERRORS_H
# error "this header file must not be included directly"
#endif
/* Error objects */
/* PyException_HEAD defines the initial segment of every exception class. */
#define PyException_HEAD PyObject_HEAD PyObject *dict;\
PyObject *args; PyObject *notes; PyObject *traceback;\
PyObject *context; PyObject *cause;\
char suppress_context;
typedef struct {
PyException_HEAD
} PyBaseExceptionObject;
typedef struct {
PyException_HEAD
PyObject *msg;
PyObject *excs;
} PyBaseExceptionGroupObject;
typedef struct {
PyException_HEAD
PyObject *msg;
PyObject *filename;
PyObject *lineno;
PyObject *offset;
PyObject *end_lineno;
PyObject *end_offset;
PyObject *text;
PyObject *print_file_and_line;
} PySyntaxErrorObject;
typedef struct {
PyException_HEAD
PyObject *msg;
PyObject *name;
PyObject *path;
PyObject *name_from;
} PyImportErrorObject;
typedef struct {
PyException_HEAD
PyObject *encoding;
PyObject *object;
Py_ssize_t start;
Py_ssize_t end;
PyObject *reason;
} PyUnicodeErrorObject;
typedef struct {
PyException_HEAD
PyObject *code;
} PySystemExitObject;
typedef struct {
PyException_HEAD
PyObject *myerrno;
PyObject *strerror;
PyObject *filename;
PyObject *filename2;
#ifdef MS_WINDOWS
PyObject *winerror;
#endif
Py_ssize_t written; /* only for BlockingIOError, -1 otherwise */
} PyOSErrorObject;
typedef struct {
PyException_HEAD
PyObject *value;
} PyStopIterationObject;
typedef struct {
PyException_HEAD
PyObject *name;
} PyNameErrorObject;
typedef struct {
PyException_HEAD
PyObject *obj;
PyObject *name;
} PyAttributeErrorObject;
/* Compatibility typedefs */
typedef PyOSErrorObject PyEnvironmentErrorObject;
#ifdef MS_WINDOWS
typedef PyOSErrorObject PyWindowsErrorObject;
#endif
/* Context manipulation (PEP 3134) */
PyAPI_FUNC(void) _PyErr_ChainExceptions1(PyObject *);
/* In exceptions.c */
PyAPI_FUNC(PyObject*) PyUnstable_Exc_PrepReraiseStar(
PyObject *orig,
PyObject *excs);
/* In signalmodule.c */
PyAPI_FUNC(int) PySignal_SetWakeupFd(int fd);
/* Support for adding program text to SyntaxErrors */
PyAPI_FUNC(void) PyErr_SyntaxLocationObject(
PyObject *filename,
int lineno,
int col_offset);
PyAPI_FUNC(void) PyErr_RangedSyntaxLocationObject(
PyObject *filename,
int lineno,
int col_offset,
int end_lineno,
int end_col_offset);
PyAPI_FUNC(PyObject *) PyErr_ProgramTextObject(
PyObject *filename,
int lineno);
PyAPI_FUNC(void) _Py_NO_RETURN _Py_FatalErrorFunc(
const char *func,
const char *message);
PyAPI_FUNC(void) PyErr_FormatUnraisable(const char *, ...);
PyAPI_DATA(PyObject *) PyExc_PythonFinalizationError;
#define Py_FatalError(message) _Py_FatalErrorFunc(__func__, (message))

View File

@@ -0,0 +1,15 @@
#ifndef Py_PYFPE_H
#define Py_PYFPE_H
/* Header excluded from the stable API */
#ifndef Py_LIMITED_API
/* These macros used to do something when Python was built with --with-fpectl,
* but support for that was dropped in 3.7. We continue to define them though,
* to avoid breaking API users.
*/
#define PyFPE_START_PROTECT(err_string, leave_stmt)
#define PyFPE_END_PROTECT(v)
#endif /* !defined(Py_LIMITED_API) */
#endif /* !Py_PYFPE_H */

View File

@@ -0,0 +1,45 @@
#ifndef Py_CPYTHON_PYFRAME_H
# error "this header file must not be included directly"
#endif
PyAPI_DATA(PyTypeObject) PyFrame_Type;
PyAPI_DATA(PyTypeObject) PyFrameLocalsProxy_Type;
#define PyFrame_Check(op) Py_IS_TYPE((op), &PyFrame_Type)
#define PyFrameLocalsProxy_Check(op) Py_IS_TYPE((op), &PyFrameLocalsProxy_Type)
PyAPI_FUNC(PyFrameObject *) PyFrame_GetBack(PyFrameObject *frame);
PyAPI_FUNC(PyObject *) PyFrame_GetLocals(PyFrameObject *frame);
PyAPI_FUNC(PyObject *) PyFrame_GetGlobals(PyFrameObject *frame);
PyAPI_FUNC(PyObject *) PyFrame_GetBuiltins(PyFrameObject *frame);
PyAPI_FUNC(PyObject *) PyFrame_GetGenerator(PyFrameObject *frame);
PyAPI_FUNC(int) PyFrame_GetLasti(PyFrameObject *frame);
PyAPI_FUNC(PyObject*) PyFrame_GetVar(PyFrameObject *frame, PyObject *name);
PyAPI_FUNC(PyObject*) PyFrame_GetVarString(PyFrameObject *frame, const char *name);
/* The following functions are for use by debuggers and other tools
* implementing custom frame evaluators with PEP 523. */
struct _PyInterpreterFrame;
/* Returns the code object of the frame (strong reference).
* Does not raise an exception. */
PyAPI_FUNC(PyObject *) PyUnstable_InterpreterFrame_GetCode(struct _PyInterpreterFrame *frame);
/* Returns a byte ofsset into the last executed instruction.
* Does not raise an exception. */
PyAPI_FUNC(int) PyUnstable_InterpreterFrame_GetLasti(struct _PyInterpreterFrame *frame);
/* Returns the currently executing line number, or -1 if there is no line number.
* Does not raise an exception. */
PyAPI_FUNC(int) PyUnstable_InterpreterFrame_GetLine(struct _PyInterpreterFrame *frame);
#define PyUnstable_EXECUTABLE_KIND_SKIP 0
#define PyUnstable_EXECUTABLE_KIND_PY_FUNCTION 1
#define PyUnstable_EXECUTABLE_KIND_BUILTIN_FUNCTION 3
#define PyUnstable_EXECUTABLE_KIND_METHOD_DESCRIPTOR 4
#define PyUnstable_EXECUTABLE_KINDS 5
PyAPI_DATA(const PyTypeObject *) const PyUnstable_ExecutableKinds[PyUnstable_EXECUTABLE_KINDS+1];

View File

@@ -0,0 +1,47 @@
#ifndef Py_CPYTHON_HASH_H
# error "this header file must not be included directly"
#endif
/* Prime multiplier used in string and various other hashes. */
#define PyHASH_MULTIPLIER 1000003UL /* 0xf4243 */
/* Parameters used for the numeric hash implementation. See notes for
_Py_HashDouble in Python/pyhash.c. Numeric hashes are based on
reduction modulo the prime 2**_PyHASH_BITS - 1. */
#if SIZEOF_VOID_P >= 8
# define PyHASH_BITS 61
#else
# define PyHASH_BITS 31
#endif
#define PyHASH_MODULUS (((size_t)1 << _PyHASH_BITS) - 1)
#define PyHASH_INF 314159
#define PyHASH_IMAG PyHASH_MULTIPLIER
/* Aliases kept for backward compatibility with Python 3.12 */
#define _PyHASH_MULTIPLIER PyHASH_MULTIPLIER
#define _PyHASH_BITS PyHASH_BITS
#define _PyHASH_MODULUS PyHASH_MODULUS
#define _PyHASH_INF PyHASH_INF
#define _PyHASH_IMAG PyHASH_IMAG
/* Helpers for hash functions */
PyAPI_FUNC(Py_hash_t) _Py_HashDouble(PyObject *, double);
// Kept for backward compatibility
#define _Py_HashPointer Py_HashPointer
/* hash function definition */
typedef struct {
Py_hash_t (*const hash)(const void *, Py_ssize_t);
const char *name;
const int hash_bits;
const int seed_bits;
} PyHash_FuncDef;
PyAPI_FUNC(PyHash_FuncDef*) PyHash_GetFuncDef(void);
PyAPI_FUNC(Py_hash_t) Py_HashPointer(const void *ptr);
PyAPI_FUNC(Py_hash_t) PyObject_GenericHash(PyObject *);

View File

@@ -0,0 +1,92 @@
#ifndef Py_CPYTHON_PYLIFECYCLE_H
# error "this header file must not be included directly"
#endif
/* Py_FrozenMain is kept out of the Limited API until documented and present
in all builds of Python */
PyAPI_FUNC(int) Py_FrozenMain(int argc, char **argv);
/* PEP 432 Multi-phase initialization API (Private while provisional!) */
PyAPI_FUNC(PyStatus) Py_PreInitialize(
const PyPreConfig *src_config);
PyAPI_FUNC(PyStatus) Py_PreInitializeFromBytesArgs(
const PyPreConfig *src_config,
Py_ssize_t argc,
char **argv);
PyAPI_FUNC(PyStatus) Py_PreInitializeFromArgs(
const PyPreConfig *src_config,
Py_ssize_t argc,
wchar_t **argv);
/* Initialization and finalization */
PyAPI_FUNC(PyStatus) Py_InitializeFromConfig(
const PyConfig *config);
// Python 3.8 provisional API (PEP 587)
PyAPI_FUNC(PyStatus) _Py_InitializeMain(void);
PyAPI_FUNC(int) Py_RunMain(void);
PyAPI_FUNC(void) _Py_NO_RETURN Py_ExitStatusException(PyStatus err);
PyAPI_FUNC(int) Py_FdIsInteractive(FILE *, const char *);
/* --- PyInterpreterConfig ------------------------------------ */
#define PyInterpreterConfig_DEFAULT_GIL (0)
#define PyInterpreterConfig_SHARED_GIL (1)
#define PyInterpreterConfig_OWN_GIL (2)
typedef struct {
// XXX "allow_object_sharing"? "own_objects"?
int use_main_obmalloc;
int allow_fork;
int allow_exec;
int allow_threads;
int allow_daemon_threads;
int check_multi_interp_extensions;
int gil;
} PyInterpreterConfig;
#define _PyInterpreterConfig_INIT \
{ \
.use_main_obmalloc = 0, \
.allow_fork = 0, \
.allow_exec = 0, \
.allow_threads = 1, \
.allow_daemon_threads = 0, \
.check_multi_interp_extensions = 1, \
.gil = PyInterpreterConfig_OWN_GIL, \
}
// gh-117649: The free-threaded build does not currently support single-phase
// init extensions in subinterpreters. For now, we ensure that
// `check_multi_interp_extensions` is always `1`, even in the legacy config.
#ifdef Py_GIL_DISABLED
# define _PyInterpreterConfig_LEGACY_CHECK_MULTI_INTERP_EXTENSIONS 1
#else
# define _PyInterpreterConfig_LEGACY_CHECK_MULTI_INTERP_EXTENSIONS 0
#endif
#define _PyInterpreterConfig_LEGACY_INIT \
{ \
.use_main_obmalloc = 1, \
.allow_fork = 1, \
.allow_exec = 1, \
.allow_threads = 1, \
.allow_daemon_threads = 1, \
.check_multi_interp_extensions = _PyInterpreterConfig_LEGACY_CHECK_MULTI_INTERP_EXTENSIONS, \
.gil = PyInterpreterConfig_SHARED_GIL, \
}
PyAPI_FUNC(PyStatus) Py_NewInterpreterFromConfig(
PyThreadState **tstate_p,
const PyInterpreterConfig *config);
typedef void (*atexit_datacallbackfunc)(void *);
PyAPI_FUNC(int) PyUnstable_AtExit(
PyInterpreterState *, atexit_datacallbackfunc, void *);

View File

@@ -0,0 +1,84 @@
#ifndef Py_CPYTHON_PYMEM_H
# error "this header file must not be included directly"
#endif
typedef enum {
/* PyMem_RawMalloc(), PyMem_RawRealloc() and PyMem_RawFree() */
PYMEM_DOMAIN_RAW,
/* PyMem_Malloc(), PyMem_Realloc() and PyMem_Free() */
PYMEM_DOMAIN_MEM,
/* PyObject_Malloc(), PyObject_Realloc() and PyObject_Free() */
PYMEM_DOMAIN_OBJ
} PyMemAllocatorDomain;
typedef enum {
PYMEM_ALLOCATOR_NOT_SET = 0,
PYMEM_ALLOCATOR_DEFAULT = 1,
PYMEM_ALLOCATOR_DEBUG = 2,
PYMEM_ALLOCATOR_MALLOC = 3,
PYMEM_ALLOCATOR_MALLOC_DEBUG = 4,
#ifdef WITH_PYMALLOC
PYMEM_ALLOCATOR_PYMALLOC = 5,
PYMEM_ALLOCATOR_PYMALLOC_DEBUG = 6,
#endif
#ifdef WITH_MIMALLOC
PYMEM_ALLOCATOR_MIMALLOC = 7,
PYMEM_ALLOCATOR_MIMALLOC_DEBUG = 8,
#endif
} PyMemAllocatorName;
typedef struct {
/* user context passed as the first argument to the 4 functions */
void *ctx;
/* allocate a memory block */
void* (*malloc) (void *ctx, size_t size);
/* allocate a memory block initialized by zeros */
void* (*calloc) (void *ctx, size_t nelem, size_t elsize);
/* allocate or resize a memory block */
void* (*realloc) (void *ctx, void *ptr, size_t new_size);
/* release a memory block */
void (*free) (void *ctx, void *ptr);
} PyMemAllocatorEx;
/* Get the memory block allocator of the specified domain. */
PyAPI_FUNC(void) PyMem_GetAllocator(PyMemAllocatorDomain domain,
PyMemAllocatorEx *allocator);
/* Set the memory block allocator of the specified domain.
The new allocator must return a distinct non-NULL pointer when requesting
zero bytes.
For the PYMEM_DOMAIN_RAW domain, the allocator must be thread-safe: the GIL
is not held when the allocator is called.
If the new allocator is not a hook (don't call the previous allocator), the
PyMem_SetupDebugHooks() function must be called to reinstall the debug hooks
on top on the new allocator. */
PyAPI_FUNC(void) PyMem_SetAllocator(PyMemAllocatorDomain domain,
PyMemAllocatorEx *allocator);
/* Setup hooks to detect bugs in the following Python memory allocator
functions:
- PyMem_RawMalloc(), PyMem_RawRealloc(), PyMem_RawFree()
- PyMem_Malloc(), PyMem_Realloc(), PyMem_Free()
- PyObject_Malloc(), PyObject_Realloc() and PyObject_Free()
Newly allocated memory is filled with the byte 0xCB, freed memory is filled
with the byte 0xDB. Additional checks:
- detect API violations, ex: PyObject_Free() called on a buffer allocated
by PyMem_Malloc()
- detect write before the start of the buffer (buffer underflow)
- detect write after the end of the buffer (buffer overflow)
The function does nothing if Python is not compiled is debug mode. */
PyAPI_FUNC(void) PyMem_SetupDebugHooks(void);

View File

@@ -0,0 +1,277 @@
#ifndef Py_CPYTHON_PYSTATE_H
# error "this header file must not be included directly"
#endif
/* private interpreter helpers */
PyAPI_FUNC(int) _PyInterpreterState_RequiresIDRef(PyInterpreterState *);
PyAPI_FUNC(void) _PyInterpreterState_RequireIDRef(PyInterpreterState *, int);
PyAPI_FUNC(PyObject *) PyUnstable_InterpreterState_GetMainModule(PyInterpreterState *);
/* State unique per thread */
/* Py_tracefunc return -1 when raising an exception, or 0 for success. */
typedef int (*Py_tracefunc)(PyObject *, PyFrameObject *, int, PyObject *);
/* The following values are used for 'what' for tracefunc functions
*
* To add a new kind of trace event, also update "trace_init" in
* Python/sysmodule.c to define the Python level event name
*/
#define PyTrace_CALL 0
#define PyTrace_EXCEPTION 1
#define PyTrace_LINE 2
#define PyTrace_RETURN 3
#define PyTrace_C_CALL 4
#define PyTrace_C_EXCEPTION 5
#define PyTrace_C_RETURN 6
#define PyTrace_OPCODE 7
typedef struct _err_stackitem {
/* This struct represents a single execution context where we might
* be currently handling an exception. It is a per-coroutine state
* (coroutine in the computer science sense, including the thread
* and generators).
*
* This is used as an entry on the exception stack, where each
* entry indicates if it is currently handling an exception.
* This ensures that the exception state is not impacted
* by "yields" from an except handler. The thread
* always has an entry (the bottom-most one).
*/
/* The exception currently being handled in this context, if any. */
PyObject *exc_value;
struct _err_stackitem *previous_item;
} _PyErr_StackItem;
typedef struct _stack_chunk {
struct _stack_chunk *previous;
size_t size;
size_t top;
PyObject * data[1]; /* Variable sized */
} _PyStackChunk;
struct _ts {
/* See Python/ceval.c for comments explaining most fields */
PyThreadState *prev;
PyThreadState *next;
PyInterpreterState *interp;
/* The global instrumentation version in high bits, plus flags indicating
when to break out of the interpreter loop in lower bits. See details in
pycore_ceval.h. */
uintptr_t eval_breaker;
struct {
/* Has been initialized to a safe state.
In order to be effective, this must be set to 0 during or right
after allocation. */
unsigned int initialized:1;
/* Has been bound to an OS thread. */
unsigned int bound:1;
/* Has been unbound from its OS thread. */
unsigned int unbound:1;
/* Has been bound aa current for the GILState API. */
unsigned int bound_gilstate:1;
/* Currently in use (maybe holds the GIL). */
unsigned int active:1;
/* Currently holds the GIL. */
unsigned int holds_gil:1;
/* various stages of finalization */
unsigned int finalizing:1;
unsigned int cleared:1;
unsigned int finalized:1;
/* padding to align to 4 bytes */
unsigned int :23;
} _status;
#ifdef Py_BUILD_CORE
# define _PyThreadState_WHENCE_NOTSET -1
# define _PyThreadState_WHENCE_UNKNOWN 0
# define _PyThreadState_WHENCE_INIT 1
# define _PyThreadState_WHENCE_FINI 2
# define _PyThreadState_WHENCE_THREADING 3
# define _PyThreadState_WHENCE_GILSTATE 4
# define _PyThreadState_WHENCE_EXEC 5
#endif
int _whence;
/* Thread state (_Py_THREAD_ATTACHED, _Py_THREAD_DETACHED, _Py_THREAD_SUSPENDED).
See Include/internal/pycore_pystate.h for more details. */
int state;
int py_recursion_remaining;
int py_recursion_limit;
int c_recursion_remaining;
int recursion_headroom; /* Allow 50 more calls to handle any errors. */
/* 'tracing' keeps track of the execution depth when tracing/profiling.
This is to prevent the actual trace/profile code from being recorded in
the trace/profile. */
int tracing;
int what_event; /* The event currently being monitored, if any. */
/* Pointer to currently executing frame. */
struct _PyInterpreterFrame *current_frame;
Py_tracefunc c_profilefunc;
Py_tracefunc c_tracefunc;
PyObject *c_profileobj;
PyObject *c_traceobj;
/* The exception currently being raised */
PyObject *current_exception;
/* Pointer to the top of the exception stack for the exceptions
* we may be currently handling. (See _PyErr_StackItem above.)
* This is never NULL. */
_PyErr_StackItem *exc_info;
PyObject *dict; /* Stores per-thread state */
int gilstate_counter;
PyObject *async_exc; /* Asynchronous exception to raise */
unsigned long thread_id; /* Thread id where this tstate was created */
/* Native thread id where this tstate was created. This will be 0 except on
* those platforms that have the notion of native thread id, for which the
* macro PY_HAVE_THREAD_NATIVE_ID is then defined.
*/
unsigned long native_thread_id;
PyObject *delete_later;
/* Tagged pointer to top-most critical section, or zero if there is no
* active critical section. Critical sections are only used in
* `--disable-gil` builds (i.e., when Py_GIL_DISABLED is defined to 1). In the
* default build, this field is always zero.
*/
uintptr_t critical_section;
int coroutine_origin_tracking_depth;
PyObject *async_gen_firstiter;
PyObject *async_gen_finalizer;
PyObject *context;
uint64_t context_ver;
/* Unique thread state id. */
uint64_t id;
_PyStackChunk *datastack_chunk;
PyObject **datastack_top;
PyObject **datastack_limit;
/* XXX signal handlers should also be here */
/* The following fields are here to avoid allocation during init.
The data is exposed through PyThreadState pointer fields.
These fields should not be accessed directly outside of init.
This is indicated by an underscore prefix on the field names.
All other PyInterpreterState pointer fields are populated when
needed and default to NULL.
*/
// Note some fields do not have a leading underscore for backward
// compatibility. See https://bugs.python.org/issue45953#msg412046.
/* The thread's exception stack entry. (Always the last entry.) */
_PyErr_StackItem exc_state;
PyObject *previous_executor;
uint64_t dict_global_version;
/* Used to store/retrieve `threading.local` keys/values for this thread */
PyObject *threading_local_key;
/* Used by `threading.local`s to be remove keys/values for dying threads.
The PyThreadObject must hold the only reference to this value.
*/
PyObject *threading_local_sentinel;
};
#ifdef Py_DEBUG
// A debug build is likely built with low optimization level which implies
// higher stack memory usage than a release build: use a lower limit.
# define Py_C_RECURSION_LIMIT 500
#elif defined(__s390x__)
# define Py_C_RECURSION_LIMIT 800
#elif defined(_WIN32) && defined(_M_ARM64)
# define Py_C_RECURSION_LIMIT 1000
#elif defined(_WIN32)
# define Py_C_RECURSION_LIMIT 3000
#elif defined(__ANDROID__)
// On an ARM64 emulator, API level 34 was OK with 10000, but API level 21
// crashed in test_compiler_recursion_limit.
# define Py_C_RECURSION_LIMIT 3000
#elif defined(_Py_ADDRESS_SANITIZER)
# define Py_C_RECURSION_LIMIT 4000
#elif defined(__wasi__)
// Based on wasmtime 16.
# define Py_C_RECURSION_LIMIT 5000
#else
// This value is duplicated in Lib/test/support/__init__.py
# define Py_C_RECURSION_LIMIT 10000
#endif
/* other API */
/* Similar to PyThreadState_Get(), but don't issue a fatal error
* if it is NULL. */
PyAPI_FUNC(PyThreadState *) PyThreadState_GetUnchecked(void);
// Alias kept for backward compatibility
#define _PyThreadState_UncheckedGet PyThreadState_GetUnchecked
// Disable tracing and profiling.
PyAPI_FUNC(void) PyThreadState_EnterTracing(PyThreadState *tstate);
// Reset tracing and profiling: enable them if a trace function or a profile
// function is set, otherwise disable them.
PyAPI_FUNC(void) PyThreadState_LeaveTracing(PyThreadState *tstate);
/* PyGILState */
/* Helper/diagnostic function - return 1 if the current thread
currently holds the GIL, 0 otherwise.
The function returns 1 if _PyGILState_check_enabled is non-zero. */
PyAPI_FUNC(int) PyGILState_Check(void);
/* The implementation of sys._current_frames() Returns a dict mapping
thread id to that thread's current frame.
*/
PyAPI_FUNC(PyObject*) _PyThread_CurrentFrames(void);
/* Routines for advanced debuggers, requested by David Beazley.
Don't use unless you know what you are doing! */
PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_Main(void);
PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_Head(void);
PyAPI_FUNC(PyInterpreterState *) PyInterpreterState_Next(PyInterpreterState *);
PyAPI_FUNC(PyThreadState *) PyInterpreterState_ThreadHead(PyInterpreterState *);
PyAPI_FUNC(PyThreadState *) PyThreadState_Next(PyThreadState *);
PyAPI_FUNC(void) PyThreadState_DeleteCurrent(void);
/* Frame evaluation API */
typedef PyObject* (*_PyFrameEvalFunction)(PyThreadState *tstate, struct _PyInterpreterFrame *, int);
PyAPI_FUNC(_PyFrameEvalFunction) _PyInterpreterState_GetEvalFrameFunc(
PyInterpreterState *interp);
PyAPI_FUNC(void) _PyInterpreterState_SetEvalFrameFunc(
PyInterpreterState *interp,
_PyFrameEvalFunction eval_frame);

View File

@@ -0,0 +1,175 @@
// Statistics on Python performance.
//
// API:
//
// - _Py_INCREF_STAT_INC() and _Py_DECREF_STAT_INC() used by Py_INCREF()
// and Py_DECREF().
// - _Py_stats variable
//
// Functions of the sys module:
//
// - sys._stats_on()
// - sys._stats_off()
// - sys._stats_clear()
// - sys._stats_dump()
//
// Python must be built with ./configure --enable-pystats to define the
// Py_STATS macro.
//
// Define _PY_INTERPRETER macro to increment interpreter_increfs and
// interpreter_decrefs. Otherwise, increment increfs and decrefs.
//
// The number of incref operations counted by `incref` and
// `interpreter_incref` is the number of increment operations, which is
// not equal to the total of all reference counts. A single increment
// operation may increase the reference count of an object by more than
// one. For example, see `_Py_RefcntAdd`.
#ifndef Py_CPYTHON_PYSTATS_H
# error "this header file must not be included directly"
#endif
#define PYSTATS_MAX_UOP_ID 512
#define SPECIALIZATION_FAILURE_KINDS 36
/* Stats for determining who is calling PyEval_EvalFrame */
#define EVAL_CALL_TOTAL 0
#define EVAL_CALL_VECTOR 1
#define EVAL_CALL_GENERATOR 2
#define EVAL_CALL_LEGACY 3
#define EVAL_CALL_FUNCTION_VECTORCALL 4
#define EVAL_CALL_BUILD_CLASS 5
#define EVAL_CALL_SLOT 6
#define EVAL_CALL_FUNCTION_EX 7
#define EVAL_CALL_API 8
#define EVAL_CALL_METHOD 9
#define EVAL_CALL_KINDS 10
typedef struct _specialization_stats {
uint64_t success;
uint64_t failure;
uint64_t hit;
uint64_t deferred;
uint64_t miss;
uint64_t deopt;
uint64_t failure_kinds[SPECIALIZATION_FAILURE_KINDS];
} SpecializationStats;
typedef struct _opcode_stats {
SpecializationStats specialization;
uint64_t execution_count;
uint64_t pair_count[256];
} OpcodeStats;
typedef struct _call_stats {
uint64_t inlined_py_calls;
uint64_t pyeval_calls;
uint64_t frames_pushed;
uint64_t frame_objects_created;
uint64_t eval_calls[EVAL_CALL_KINDS];
} CallStats;
typedef struct _object_stats {
uint64_t increfs;
uint64_t decrefs;
uint64_t interpreter_increfs;
uint64_t interpreter_decrefs;
uint64_t allocations;
uint64_t allocations512;
uint64_t allocations4k;
uint64_t allocations_big;
uint64_t frees;
uint64_t to_freelist;
uint64_t from_freelist;
uint64_t inline_values;
uint64_t dict_materialized_on_request;
uint64_t dict_materialized_new_key;
uint64_t dict_materialized_too_big;
uint64_t dict_materialized_str_subclass;
uint64_t type_cache_hits;
uint64_t type_cache_misses;
uint64_t type_cache_dunder_hits;
uint64_t type_cache_dunder_misses;
uint64_t type_cache_collisions;
/* Temporary value used during GC */
uint64_t object_visits;
} ObjectStats;
typedef struct _gc_stats {
uint64_t collections;
uint64_t object_visits;
uint64_t objects_collected;
} GCStats;
typedef struct _uop_stats {
uint64_t execution_count;
uint64_t miss;
uint64_t pair_count[PYSTATS_MAX_UOP_ID + 1];
} UOpStats;
#define _Py_UOP_HIST_SIZE 32
typedef struct _optimization_stats {
uint64_t attempts;
uint64_t traces_created;
uint64_t traces_executed;
uint64_t uops_executed;
uint64_t trace_stack_overflow;
uint64_t trace_stack_underflow;
uint64_t trace_too_long;
uint64_t trace_too_short;
uint64_t inner_loop;
uint64_t recursive_call;
uint64_t low_confidence;
uint64_t executors_invalidated;
UOpStats opcode[PYSTATS_MAX_UOP_ID + 1];
uint64_t unsupported_opcode[256];
uint64_t trace_length_hist[_Py_UOP_HIST_SIZE];
uint64_t trace_run_length_hist[_Py_UOP_HIST_SIZE];
uint64_t optimized_trace_length_hist[_Py_UOP_HIST_SIZE];
uint64_t optimizer_attempts;
uint64_t optimizer_successes;
uint64_t optimizer_failure_reason_no_memory;
uint64_t remove_globals_builtins_changed;
uint64_t remove_globals_incorrect_keys;
uint64_t error_in_opcode[PYSTATS_MAX_UOP_ID + 1];
} OptimizationStats;
typedef struct _rare_event_stats {
/* Setting an object's class, obj.__class__ = ... */
uint64_t set_class;
/* Setting the bases of a class, cls.__bases__ = ... */
uint64_t set_bases;
/* Setting the PEP 523 frame eval function, _PyInterpreterState_SetFrameEvalFunc() */
uint64_t set_eval_frame_func;
/* Modifying the builtins, __builtins__.__dict__[var] = ... */
uint64_t builtin_dict;
/* Modifying a function, e.g. func.__defaults__ = ..., etc. */
uint64_t func_modification;
/* Modifying a dict that is being watched */
uint64_t watched_dict_modification;
uint64_t watched_globals_modification;
} RareEventStats;
typedef struct _stats {
OpcodeStats opcode_stats[256];
CallStats call_stats;
ObjectStats object_stats;
OptimizationStats optimization_stats;
RareEventStats rare_event_stats;
GCStats *gc_stats;
} PyStats;
// Export for shared extensions like 'math'
PyAPI_DATA(PyStats*) _Py_stats;
#ifdef _PY_INTERPRETER
# define _Py_INCREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.interpreter_increfs++; } while (0)
# define _Py_DECREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.interpreter_decrefs++; } while (0)
#else
# define _Py_INCREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.increfs++; } while (0)
# define _Py_DECREF_STAT_INC() do { if (_Py_stats) _Py_stats->object_stats.decrefs++; } while (0)
#endif

View File

@@ -0,0 +1,96 @@
#ifndef Py_CPYTHON_PYTHONRUN_H
# error "this header file must not be included directly"
#endif
PyAPI_FUNC(int) PyRun_SimpleStringFlags(const char *, PyCompilerFlags *);
PyAPI_FUNC(int) PyRun_AnyFileExFlags(
FILE *fp,
const char *filename, /* decoded from the filesystem encoding */
int closeit,
PyCompilerFlags *flags);
PyAPI_FUNC(int) PyRun_SimpleFileExFlags(
FILE *fp,
const char *filename, /* decoded from the filesystem encoding */
int closeit,
PyCompilerFlags *flags);
PyAPI_FUNC(int) PyRun_InteractiveOneFlags(
FILE *fp,
const char *filename, /* decoded from the filesystem encoding */
PyCompilerFlags *flags);
PyAPI_FUNC(int) PyRun_InteractiveOneObject(
FILE *fp,
PyObject *filename,
PyCompilerFlags *flags);
PyAPI_FUNC(int) PyRun_InteractiveLoopFlags(
FILE *fp,
const char *filename, /* decoded from the filesystem encoding */
PyCompilerFlags *flags);
PyAPI_FUNC(PyObject *) PyRun_StringFlags(const char *, int, PyObject *,
PyObject *, PyCompilerFlags *);
PyAPI_FUNC(PyObject *) PyRun_FileExFlags(
FILE *fp,
const char *filename, /* decoded from the filesystem encoding */
int start,
PyObject *globals,
PyObject *locals,
int closeit,
PyCompilerFlags *flags);
PyAPI_FUNC(PyObject *) Py_CompileStringExFlags(
const char *str,
const char *filename, /* decoded from the filesystem encoding */
int start,
PyCompilerFlags *flags,
int optimize);
PyAPI_FUNC(PyObject *) Py_CompileStringObject(
const char *str,
PyObject *filename, int start,
PyCompilerFlags *flags,
int optimize);
#define Py_CompileString(str, p, s) Py_CompileStringExFlags((str), (p), (s), NULL, -1)
#define Py_CompileStringFlags(str, p, s, f) Py_CompileStringExFlags((str), (p), (s), (f), -1)
/* A function flavor is also exported by libpython. It is required when
libpython is accessed directly rather than using header files which defines
macros below. On Windows, for example, PyAPI_FUNC() uses dllexport to
export functions in pythonXX.dll. */
PyAPI_FUNC(PyObject *) PyRun_String(const char *str, int s, PyObject *g, PyObject *l);
PyAPI_FUNC(int) PyRun_AnyFile(FILE *fp, const char *name);
PyAPI_FUNC(int) PyRun_AnyFileEx(FILE *fp, const char *name, int closeit);
PyAPI_FUNC(int) PyRun_AnyFileFlags(FILE *, const char *, PyCompilerFlags *);
PyAPI_FUNC(int) PyRun_SimpleString(const char *s);
PyAPI_FUNC(int) PyRun_SimpleFile(FILE *f, const char *p);
PyAPI_FUNC(int) PyRun_SimpleFileEx(FILE *f, const char *p, int c);
PyAPI_FUNC(int) PyRun_InteractiveOne(FILE *f, const char *p);
PyAPI_FUNC(int) PyRun_InteractiveLoop(FILE *f, const char *p);
PyAPI_FUNC(PyObject *) PyRun_File(FILE *fp, const char *p, int s, PyObject *g, PyObject *l);
PyAPI_FUNC(PyObject *) PyRun_FileEx(FILE *fp, const char *p, int s, PyObject *g, PyObject *l, int c);
PyAPI_FUNC(PyObject *) PyRun_FileFlags(FILE *fp, const char *p, int s, PyObject *g, PyObject *l, PyCompilerFlags *flags);
/* Use macros for a bunch of old variants */
#define PyRun_String(str, s, g, l) PyRun_StringFlags((str), (s), (g), (l), NULL)
#define PyRun_AnyFile(fp, name) PyRun_AnyFileExFlags((fp), (name), 0, NULL)
#define PyRun_AnyFileEx(fp, name, closeit) \
PyRun_AnyFileExFlags((fp), (name), (closeit), NULL)
#define PyRun_AnyFileFlags(fp, name, flags) \
PyRun_AnyFileExFlags((fp), (name), 0, (flags))
#define PyRun_SimpleString(s) PyRun_SimpleStringFlags((s), NULL)
#define PyRun_SimpleFile(f, p) PyRun_SimpleFileExFlags((f), (p), 0, NULL)
#define PyRun_SimpleFileEx(f, p, c) PyRun_SimpleFileExFlags((f), (p), (c), NULL)
#define PyRun_InteractiveOne(f, p) PyRun_InteractiveOneFlags((f), (p), NULL)
#define PyRun_InteractiveLoop(f, p) PyRun_InteractiveLoopFlags((f), (p), NULL)
#define PyRun_File(fp, p, s, g, l) \
PyRun_FileExFlags((fp), (p), (s), (g), (l), 0, NULL)
#define PyRun_FileEx(fp, p, s, g, l, c) \
PyRun_FileExFlags((fp), (p), (s), (g), (l), (c), NULL)
#define PyRun_FileFlags(fp, p, s, g, l, flags) \
PyRun_FileExFlags((fp), (p), (s), (g), (l), 0, (flags))
/* Stuff with no proper home (yet) */
PyAPI_FUNC(char *) PyOS_Readline(FILE *, FILE *, const char *);
PyAPI_DATA(char) *(*PyOS_ReadlineFunctionPointer)(FILE *, FILE *, const char *);

View File

@@ -0,0 +1,43 @@
#ifndef Py_CPYTHON_PYTHREAD_H
# error "this header file must not be included directly"
#endif
// PY_TIMEOUT_MAX is the highest usable value (in microseconds) of PY_TIMEOUT_T
// type, and depends on the system threading API.
//
// NOTE: this isn't the same value as `_thread.TIMEOUT_MAX`. The _thread module
// exposes a higher-level API, with timeouts expressed in seconds and
// floating-point numbers allowed.
PyAPI_DATA(const long long) PY_TIMEOUT_MAX;
#define PYTHREAD_INVALID_THREAD_ID ((unsigned long)-1)
#ifdef HAVE_PTHREAD_H
/* Darwin needs pthread.h to know type name the pthread_key_t. */
# include <pthread.h>
# define NATIVE_TSS_KEY_T pthread_key_t
#elif defined(NT_THREADS)
/* In Windows, native TSS key type is DWORD,
but hardcode the unsigned long to avoid errors for include directive.
*/
# define NATIVE_TSS_KEY_T unsigned long
#elif defined(HAVE_PTHREAD_STUBS)
# include "pthread_stubs.h"
# define NATIVE_TSS_KEY_T pthread_key_t
#else
# error "Require native threads. See https://bugs.python.org/issue31370"
#endif
/* When Py_LIMITED_API is not defined, the type layout of Py_tss_t is
exposed to allow static allocation in the API clients. Even in this case,
you must handle TSS keys through API functions due to compatibility.
*/
struct _Py_tss_t {
int _is_initialized;
NATIVE_TSS_KEY_T _key;
};
#undef NATIVE_TSS_KEY_T
/* When static allocation, you must initialize with Py_tss_NEEDS_INIT. */
#define Py_tss_NEEDS_INIT {0}

View File

@@ -0,0 +1,27 @@
// PyTime_t C API: see Doc/c-api/time.rst for the documentation.
#ifndef Py_LIMITED_API
#ifndef Py_PYTIME_H
#define Py_PYTIME_H
#ifdef __cplusplus
extern "C" {
#endif
typedef int64_t PyTime_t;
#define PyTime_MIN INT64_MIN
#define PyTime_MAX INT64_MAX
PyAPI_FUNC(double) PyTime_AsSecondsDouble(PyTime_t t);
PyAPI_FUNC(int) PyTime_Monotonic(PyTime_t *result);
PyAPI_FUNC(int) PyTime_PerfCounter(PyTime_t *result);
PyAPI_FUNC(int) PyTime_Time(PyTime_t *result);
PyAPI_FUNC(int) PyTime_MonotonicRaw(PyTime_t *result);
PyAPI_FUNC(int) PyTime_PerfCounterRaw(PyTime_t *result);
PyAPI_FUNC(int) PyTime_TimeRaw(PyTime_t *result);
#ifdef __cplusplus
}
#endif
#endif /* Py_PYTIME_H */
#endif /* Py_LIMITED_API */

View File

@@ -0,0 +1,71 @@
#ifndef Py_CPYTHON_SETOBJECT_H
# error "this header file must not be included directly"
#endif
/* There are three kinds of entries in the table:
1. Unused: key == NULL and hash == 0
2. Dummy: key == dummy and hash == -1
3. Active: key != NULL and key != dummy and hash != -1
The hash field of Unused slots is always zero.
The hash field of Dummy slots are set to -1
meaning that dummy entries can be detected by
either entry->key==dummy or by entry->hash==-1.
*/
#define PySet_MINSIZE 8
typedef struct {
PyObject *key;
Py_hash_t hash; /* Cached hash code of the key */
} setentry;
/* The SetObject data structure is shared by set and frozenset objects.
Invariant for sets:
- hash is -1
Invariants for frozensets:
- data is immutable.
- hash is the hash of the frozenset or -1 if not computed yet.
*/
typedef struct {
PyObject_HEAD
Py_ssize_t fill; /* Number active and dummy entries*/
Py_ssize_t used; /* Number active entries */
/* The table contains mask + 1 slots, and that's a power of 2.
* We store the mask instead of the size because the mask is more
* frequently needed.
*/
Py_ssize_t mask;
/* The table points to a fixed-size smalltable for small tables
* or to additional malloc'ed memory for bigger tables.
* The table pointer is never NULL which saves us from repeated
* runtime null-tests.
*/
setentry *table;
Py_hash_t hash; /* Only used by frozenset objects */
Py_ssize_t finger; /* Search finger for pop() */
setentry smalltable[PySet_MINSIZE];
PyObject *weakreflist; /* List of weak references */
} PySetObject;
#define _PySet_CAST(so) \
(assert(PyAnySet_Check(so)), _Py_CAST(PySetObject*, so))
static inline Py_ssize_t PySet_GET_SIZE(PyObject *so) {
#ifdef Py_GIL_DISABLED
return _Py_atomic_load_ssize_relaxed(&(_PySet_CAST(so)->used));
#else
return _PySet_CAST(so)->used;
#endif
}
#define PySet_GET_SIZE(so) PySet_GET_SIZE(_PyObject_CAST(so))

View File

@@ -0,0 +1,22 @@
#ifndef Py_CPYTHON_SYSMODULE_H
# error "this header file must not be included directly"
#endif
typedef int(*Py_AuditHookFunction)(const char *, PyObject *, void *);
PyAPI_FUNC(int) PySys_AddAuditHook(Py_AuditHookFunction, void*);
typedef struct {
FILE* perf_map;
PyThread_type_lock map_lock;
} PerfMapState;
PyAPI_FUNC(int) PyUnstable_PerfMapState_Init(void);
PyAPI_FUNC(int) PyUnstable_WritePerfMapEntry(
const void *code_addr,
unsigned int code_size,
const char *entry_name);
PyAPI_FUNC(void) PyUnstable_PerfMapState_Fini(void);
PyAPI_FUNC(int) PyUnstable_CopyPerfMapFile(const char* parent_filename);
PyAPI_FUNC(int) PyUnstable_PerfTrampoline_CompileCode(PyCodeObject *);
PyAPI_FUNC(int) PyUnstable_PerfTrampoline_SetPersistAfterFork(int enable);

View File

@@ -0,0 +1,13 @@
#ifndef Py_CPYTHON_TRACEBACK_H
# error "this header file must not be included directly"
#endif
typedef struct _traceback PyTracebackObject;
struct _traceback {
PyObject_HEAD
PyTracebackObject *tb_next;
PyFrameObject *tb_frame;
int tb_lasti;
int tb_lineno;
};

View File

@@ -0,0 +1,32 @@
#ifndef Py_LIMITED_API
#ifndef Py_TRACEMALLOC_H
#define Py_TRACEMALLOC_H
#ifdef __cplusplus
extern "C" {
#endif
/* Track an allocated memory block in the tracemalloc module.
Return 0 on success, return -1 on error (failed to allocate memory to store
the trace).
Return -2 if tracemalloc is disabled.
If memory block is already tracked, update the existing trace. */
PyAPI_FUNC(int) PyTraceMalloc_Track(
unsigned int domain,
uintptr_t ptr,
size_t size);
/* Untrack an allocated memory block in the tracemalloc module.
Do nothing if the block was not tracked.
Return -2 if tracemalloc is disabled, otherwise return 0. */
PyAPI_FUNC(int) PyTraceMalloc_Untrack(
unsigned int domain,
uintptr_t ptr);
#ifdef __cplusplus
}
#endif
#endif // !Py_TRACEMALLOC_H
#endif // !Py_LIMITED_API

View File

@@ -0,0 +1,38 @@
#ifndef Py_CPYTHON_TUPLEOBJECT_H
# error "this header file must not be included directly"
#endif
typedef struct {
PyObject_VAR_HEAD
/* ob_item contains space for 'ob_size' elements.
Items must normally not be NULL, except during construction when
the tuple is not yet visible outside the function that builds it. */
PyObject *ob_item[1];
} PyTupleObject;
PyAPI_FUNC(int) _PyTuple_Resize(PyObject **, Py_ssize_t);
/* Cast argument to PyTupleObject* type. */
#define _PyTuple_CAST(op) \
(assert(PyTuple_Check(op)), _Py_CAST(PyTupleObject*, (op)))
// Macros and static inline functions, trading safety for speed
static inline Py_ssize_t PyTuple_GET_SIZE(PyObject *op) {
PyTupleObject *tuple = _PyTuple_CAST(op);
return Py_SIZE(tuple);
}
#define PyTuple_GET_SIZE(op) PyTuple_GET_SIZE(_PyObject_CAST(op))
#define PyTuple_GET_ITEM(op, index) (_PyTuple_CAST(op)->ob_item[(index)])
/* Function *only* to be used to fill in brand new tuples */
static inline void
PyTuple_SET_ITEM(PyObject *op, Py_ssize_t index, PyObject *value) {
PyTupleObject *tuple = _PyTuple_CAST(op);
assert(0 <= index);
assert(index < Py_SIZE(tuple));
tuple->ob_item[index] = value;
}
#define PyTuple_SET_ITEM(op, index, value) \
PyTuple_SET_ITEM(_PyObject_CAST(op), (index), _PyObject_CAST(value))

View File

@@ -0,0 +1,703 @@
#ifndef Py_CPYTHON_UNICODEOBJECT_H
# error "this header file must not be included directly"
#endif
/* Py_UNICODE was the native Unicode storage format (code unit) used by
Python and represents a single Unicode element in the Unicode type.
With PEP 393, Py_UNICODE is deprecated and replaced with a
typedef to wchar_t. */
Py_DEPRECATED(3.13) typedef wchar_t PY_UNICODE_TYPE;
Py_DEPRECATED(3.13) typedef wchar_t Py_UNICODE;
/* --- Internal Unicode Operations ---------------------------------------- */
// Static inline functions to work with surrogates
static inline int Py_UNICODE_IS_SURROGATE(Py_UCS4 ch) {
return (0xD800 <= ch && ch <= 0xDFFF);
}
static inline int Py_UNICODE_IS_HIGH_SURROGATE(Py_UCS4 ch) {
return (0xD800 <= ch && ch <= 0xDBFF);
}
static inline int Py_UNICODE_IS_LOW_SURROGATE(Py_UCS4 ch) {
return (0xDC00 <= ch && ch <= 0xDFFF);
}
// Join two surrogate characters and return a single Py_UCS4 value.
static inline Py_UCS4 Py_UNICODE_JOIN_SURROGATES(Py_UCS4 high, Py_UCS4 low) {
assert(Py_UNICODE_IS_HIGH_SURROGATE(high));
assert(Py_UNICODE_IS_LOW_SURROGATE(low));
return 0x10000 + (((high & 0x03FF) << 10) | (low & 0x03FF));
}
// High surrogate = top 10 bits added to 0xD800.
// The character must be in the range [U+10000; U+10ffff].
static inline Py_UCS4 Py_UNICODE_HIGH_SURROGATE(Py_UCS4 ch) {
assert(0x10000 <= ch && ch <= 0x10ffff);
return (0xD800 - (0x10000 >> 10) + (ch >> 10));
}
// Low surrogate = bottom 10 bits added to 0xDC00.
// The character must be in the range [U+10000; U+10ffff].
static inline Py_UCS4 Py_UNICODE_LOW_SURROGATE(Py_UCS4 ch) {
assert(0x10000 <= ch && ch <= 0x10ffff);
return (0xDC00 + (ch & 0x3FF));
}
/* --- Unicode Type ------------------------------------------------------- */
/* ASCII-only strings created through PyUnicode_New use the PyASCIIObject
structure. state.ascii and state.compact are set, and the data
immediately follow the structure. utf8_length can be found
in the length field; the utf8 pointer is equal to the data pointer. */
typedef struct {
/* There are 4 forms of Unicode strings:
- compact ascii:
* structure = PyASCIIObject
* test: PyUnicode_IS_COMPACT_ASCII(op)
* kind = PyUnicode_1BYTE_KIND
* compact = 1
* ascii = 1
* (length is the length of the utf8)
* (data starts just after the structure)
* (since ASCII is decoded from UTF-8, the utf8 string are the data)
- compact:
* structure = PyCompactUnicodeObject
* test: PyUnicode_IS_COMPACT(op) && !PyUnicode_IS_ASCII(op)
* kind = PyUnicode_1BYTE_KIND, PyUnicode_2BYTE_KIND or
PyUnicode_4BYTE_KIND
* compact = 1
* ascii = 0
* utf8 is not shared with data
* utf8_length = 0 if utf8 is NULL
* (data starts just after the structure)
- legacy string:
* structure = PyUnicodeObject structure
* test: !PyUnicode_IS_COMPACT(op)
* kind = PyUnicode_1BYTE_KIND, PyUnicode_2BYTE_KIND or
PyUnicode_4BYTE_KIND
* compact = 0
* data.any is not NULL
* utf8 is shared and utf8_length = length with data.any if ascii = 1
* utf8_length = 0 if utf8 is NULL
Compact strings use only one memory block (structure + characters),
whereas legacy strings use one block for the structure and one block
for characters.
Legacy strings are created by subclasses of Unicode.
See also _PyUnicode_CheckConsistency().
*/
PyObject_HEAD
Py_ssize_t length; /* Number of code points in the string */
Py_hash_t hash; /* Hash value; -1 if not set */
struct {
/* If interned is non-zero, the two references from the
dictionary to this object are *not* counted in ob_refcnt.
The possible values here are:
0: Not Interned
1: Interned
2: Interned and Immortal
3: Interned, Immortal, and Static
This categorization allows the runtime to determine the right
cleanup mechanism at runtime shutdown. */
unsigned int interned:2;
/* Character size:
- PyUnicode_1BYTE_KIND (1):
* character type = Py_UCS1 (8 bits, unsigned)
* all characters are in the range U+0000-U+00FF (latin1)
* if ascii is set, all characters are in the range U+0000-U+007F
(ASCII), otherwise at least one character is in the range
U+0080-U+00FF
- PyUnicode_2BYTE_KIND (2):
* character type = Py_UCS2 (16 bits, unsigned)
* all characters are in the range U+0000-U+FFFF (BMP)
* at least one character is in the range U+0100-U+FFFF
- PyUnicode_4BYTE_KIND (4):
* character type = Py_UCS4 (32 bits, unsigned)
* all characters are in the range U+0000-U+10FFFF
* at least one character is in the range U+10000-U+10FFFF
*/
unsigned int kind:3;
/* Compact is with respect to the allocation scheme. Compact unicode
objects only require one memory block while non-compact objects use
one block for the PyUnicodeObject struct and another for its data
buffer. */
unsigned int compact:1;
/* The string only contains characters in the range U+0000-U+007F (ASCII)
and the kind is PyUnicode_1BYTE_KIND. If ascii is set and compact is
set, use the PyASCIIObject structure. */
unsigned int ascii:1;
/* The object is statically allocated. */
unsigned int statically_allocated:1;
/* Padding to ensure that PyUnicode_DATA() is always aligned to
4 bytes (see issue #19537 on m68k). */
unsigned int :24;
} state;
} PyASCIIObject;
/* Non-ASCII strings allocated through PyUnicode_New use the
PyCompactUnicodeObject structure. state.compact is set, and the data
immediately follow the structure. */
typedef struct {
PyASCIIObject _base;
Py_ssize_t utf8_length; /* Number of bytes in utf8, excluding the
* terminating \0. */
char *utf8; /* UTF-8 representation (null-terminated) */
} PyCompactUnicodeObject;
/* Object format for Unicode subclasses. */
typedef struct {
PyCompactUnicodeObject _base;
union {
void *any;
Py_UCS1 *latin1;
Py_UCS2 *ucs2;
Py_UCS4 *ucs4;
} data; /* Canonical, smallest-form Unicode buffer */
} PyUnicodeObject;
#define _PyASCIIObject_CAST(op) \
(assert(PyUnicode_Check(op)), \
_Py_CAST(PyASCIIObject*, (op)))
#define _PyCompactUnicodeObject_CAST(op) \
(assert(PyUnicode_Check(op)), \
_Py_CAST(PyCompactUnicodeObject*, (op)))
#define _PyUnicodeObject_CAST(op) \
(assert(PyUnicode_Check(op)), \
_Py_CAST(PyUnicodeObject*, (op)))
/* --- Flexible String Representation Helper Macros (PEP 393) -------------- */
/* Values for PyASCIIObject.state: */
/* Interning state. */
#define SSTATE_NOT_INTERNED 0
#define SSTATE_INTERNED_MORTAL 1
#define SSTATE_INTERNED_IMMORTAL 2
#define SSTATE_INTERNED_IMMORTAL_STATIC 3
/* Use only if you know it's a string */
static inline unsigned int PyUnicode_CHECK_INTERNED(PyObject *op) {
return _PyASCIIObject_CAST(op)->state.interned;
}
#define PyUnicode_CHECK_INTERNED(op) PyUnicode_CHECK_INTERNED(_PyObject_CAST(op))
/* For backward compatibility */
static inline unsigned int PyUnicode_IS_READY(PyObject* Py_UNUSED(op)) {
return 1;
}
#define PyUnicode_IS_READY(op) PyUnicode_IS_READY(_PyObject_CAST(op))
/* Return true if the string contains only ASCII characters, or 0 if not. The
string may be compact (PyUnicode_IS_COMPACT_ASCII) or not, but must be
ready. */
static inline unsigned int PyUnicode_IS_ASCII(PyObject *op) {
return _PyASCIIObject_CAST(op)->state.ascii;
}
#define PyUnicode_IS_ASCII(op) PyUnicode_IS_ASCII(_PyObject_CAST(op))
/* Return true if the string is compact or 0 if not.
No type checks or Ready calls are performed. */
static inline unsigned int PyUnicode_IS_COMPACT(PyObject *op) {
return _PyASCIIObject_CAST(op)->state.compact;
}
#define PyUnicode_IS_COMPACT(op) PyUnicode_IS_COMPACT(_PyObject_CAST(op))
/* Return true if the string is a compact ASCII string (use PyASCIIObject
structure), or 0 if not. No type checks or Ready calls are performed. */
static inline int PyUnicode_IS_COMPACT_ASCII(PyObject *op) {
return (_PyASCIIObject_CAST(op)->state.ascii && PyUnicode_IS_COMPACT(op));
}
#define PyUnicode_IS_COMPACT_ASCII(op) PyUnicode_IS_COMPACT_ASCII(_PyObject_CAST(op))
enum PyUnicode_Kind {
/* Return values of the PyUnicode_KIND() function: */
PyUnicode_1BYTE_KIND = 1,
PyUnicode_2BYTE_KIND = 2,
PyUnicode_4BYTE_KIND = 4
};
// PyUnicode_KIND(): Return one of the PyUnicode_*_KIND values defined above.
//
// gh-89653: Converting this macro to a static inline function would introduce
// new compiler warnings on "kind < PyUnicode_KIND(str)" (compare signed and
// unsigned numbers) where kind type is an int or on
// "unsigned int kind = PyUnicode_KIND(str)" (cast signed to unsigned).
#define PyUnicode_KIND(op) _Py_RVALUE(_PyASCIIObject_CAST(op)->state.kind)
/* Return a void pointer to the raw unicode buffer. */
static inline void* _PyUnicode_COMPACT_DATA(PyObject *op) {
if (PyUnicode_IS_ASCII(op)) {
return _Py_STATIC_CAST(void*, (_PyASCIIObject_CAST(op) + 1));
}
return _Py_STATIC_CAST(void*, (_PyCompactUnicodeObject_CAST(op) + 1));
}
static inline void* _PyUnicode_NONCOMPACT_DATA(PyObject *op) {
void *data;
assert(!PyUnicode_IS_COMPACT(op));
data = _PyUnicodeObject_CAST(op)->data.any;
assert(data != NULL);
return data;
}
static inline void* PyUnicode_DATA(PyObject *op) {
if (PyUnicode_IS_COMPACT(op)) {
return _PyUnicode_COMPACT_DATA(op);
}
return _PyUnicode_NONCOMPACT_DATA(op);
}
#define PyUnicode_DATA(op) PyUnicode_DATA(_PyObject_CAST(op))
/* Return pointers to the canonical representation cast to unsigned char,
Py_UCS2, or Py_UCS4 for direct character access.
No checks are performed, use PyUnicode_KIND() before to ensure
these will work correctly. */
#define PyUnicode_1BYTE_DATA(op) _Py_STATIC_CAST(Py_UCS1*, PyUnicode_DATA(op))
#define PyUnicode_2BYTE_DATA(op) _Py_STATIC_CAST(Py_UCS2*, PyUnicode_DATA(op))
#define PyUnicode_4BYTE_DATA(op) _Py_STATIC_CAST(Py_UCS4*, PyUnicode_DATA(op))
/* Returns the length of the unicode string. */
static inline Py_ssize_t PyUnicode_GET_LENGTH(PyObject *op) {
return _PyASCIIObject_CAST(op)->length;
}
#define PyUnicode_GET_LENGTH(op) PyUnicode_GET_LENGTH(_PyObject_CAST(op))
/* Write into the canonical representation, this function does not do any sanity
checks and is intended for usage in loops. The caller should cache the
kind and data pointers obtained from other function calls.
index is the index in the string (starts at 0) and value is the new
code point value which should be written to that location. */
static inline void PyUnicode_WRITE(int kind, void *data,
Py_ssize_t index, Py_UCS4 value)
{
assert(index >= 0);
if (kind == PyUnicode_1BYTE_KIND) {
assert(value <= 0xffU);
_Py_STATIC_CAST(Py_UCS1*, data)[index] = _Py_STATIC_CAST(Py_UCS1, value);
}
else if (kind == PyUnicode_2BYTE_KIND) {
assert(value <= 0xffffU);
_Py_STATIC_CAST(Py_UCS2*, data)[index] = _Py_STATIC_CAST(Py_UCS2, value);
}
else {
assert(kind == PyUnicode_4BYTE_KIND);
assert(value <= 0x10ffffU);
_Py_STATIC_CAST(Py_UCS4*, data)[index] = value;
}
}
#define PyUnicode_WRITE(kind, data, index, value) \
PyUnicode_WRITE(_Py_STATIC_CAST(int, kind), _Py_CAST(void*, data), \
(index), _Py_STATIC_CAST(Py_UCS4, value))
/* Read a code point from the string's canonical representation. No checks
or ready calls are performed. */
static inline Py_UCS4 PyUnicode_READ(int kind,
const void *data, Py_ssize_t index)
{
assert(index >= 0);
if (kind == PyUnicode_1BYTE_KIND) {
return _Py_STATIC_CAST(const Py_UCS1*, data)[index];
}
if (kind == PyUnicode_2BYTE_KIND) {
return _Py_STATIC_CAST(const Py_UCS2*, data)[index];
}
assert(kind == PyUnicode_4BYTE_KIND);
return _Py_STATIC_CAST(const Py_UCS4*, data)[index];
}
#define PyUnicode_READ(kind, data, index) \
PyUnicode_READ(_Py_STATIC_CAST(int, kind), \
_Py_STATIC_CAST(const void*, data), \
(index))
/* PyUnicode_READ_CHAR() is less efficient than PyUnicode_READ() because it
calls PyUnicode_KIND() and might call it twice. For single reads, use
PyUnicode_READ_CHAR, for multiple consecutive reads callers should
cache kind and use PyUnicode_READ instead. */
static inline Py_UCS4 PyUnicode_READ_CHAR(PyObject *unicode, Py_ssize_t index)
{
int kind;
assert(index >= 0);
// Tolerate reading the NUL character at str[len(str)]
assert(index <= PyUnicode_GET_LENGTH(unicode));
kind = PyUnicode_KIND(unicode);
if (kind == PyUnicode_1BYTE_KIND) {
return PyUnicode_1BYTE_DATA(unicode)[index];
}
if (kind == PyUnicode_2BYTE_KIND) {
return PyUnicode_2BYTE_DATA(unicode)[index];
}
assert(kind == PyUnicode_4BYTE_KIND);
return PyUnicode_4BYTE_DATA(unicode)[index];
}
#define PyUnicode_READ_CHAR(unicode, index) \
PyUnicode_READ_CHAR(_PyObject_CAST(unicode), (index))
/* Return a maximum character value which is suitable for creating another
string based on op. This is always an approximation but more efficient
than iterating over the string. */
static inline Py_UCS4 PyUnicode_MAX_CHAR_VALUE(PyObject *op)
{
int kind;
if (PyUnicode_IS_ASCII(op)) {
return 0x7fU;
}
kind = PyUnicode_KIND(op);
if (kind == PyUnicode_1BYTE_KIND) {
return 0xffU;
}
if (kind == PyUnicode_2BYTE_KIND) {
return 0xffffU;
}
assert(kind == PyUnicode_4BYTE_KIND);
return 0x10ffffU;
}
#define PyUnicode_MAX_CHAR_VALUE(op) \
PyUnicode_MAX_CHAR_VALUE(_PyObject_CAST(op))
/* === Public API ========================================================= */
/* With PEP 393, this is the recommended way to allocate a new unicode object.
This function will allocate the object and its buffer in a single memory
block. Objects created using this function are not resizable. */
PyAPI_FUNC(PyObject*) PyUnicode_New(
Py_ssize_t size, /* Number of code points in the new string */
Py_UCS4 maxchar /* maximum code point value in the string */
);
/* For backward compatibility */
static inline int PyUnicode_READY(PyObject* Py_UNUSED(op))
{
return 0;
}
#define PyUnicode_READY(op) PyUnicode_READY(_PyObject_CAST(op))
/* Copy character from one unicode object into another, this function performs
character conversion when necessary and falls back to memcpy() if possible.
Fail if to is too small (smaller than *how_many* or smaller than
len(from)-from_start), or if kind(from[from_start:from_start+how_many]) >
kind(to), or if *to* has more than 1 reference.
Return the number of written character, or return -1 and raise an exception
on error.
Pseudo-code:
how_many = min(how_many, len(from) - from_start)
to[to_start:to_start+how_many] = from[from_start:from_start+how_many]
return how_many
Note: The function doesn't write a terminating null character.
*/
PyAPI_FUNC(Py_ssize_t) PyUnicode_CopyCharacters(
PyObject *to,
Py_ssize_t to_start,
PyObject *from,
Py_ssize_t from_start,
Py_ssize_t how_many
);
/* Fill a string with a character: write fill_char into
unicode[start:start+length].
Fail if fill_char is bigger than the string maximum character, or if the
string has more than 1 reference.
Return the number of written character, or return -1 and raise an exception
on error. */
PyAPI_FUNC(Py_ssize_t) PyUnicode_Fill(
PyObject *unicode,
Py_ssize_t start,
Py_ssize_t length,
Py_UCS4 fill_char
);
/* Create a new string from a buffer of Py_UCS1, Py_UCS2 or Py_UCS4 characters.
Scan the string to find the maximum character. */
PyAPI_FUNC(PyObject*) PyUnicode_FromKindAndData(
int kind,
const void *buffer,
Py_ssize_t size);
/* --- _PyUnicodeWriter API ----------------------------------------------- */
typedef struct {
PyObject *buffer;
void *data;
int kind;
Py_UCS4 maxchar;
Py_ssize_t size;
Py_ssize_t pos;
/* minimum number of allocated characters (default: 0) */
Py_ssize_t min_length;
/* minimum character (default: 127, ASCII) */
Py_UCS4 min_char;
/* If non-zero, overallocate the buffer (default: 0). */
unsigned char overallocate;
/* If readonly is 1, buffer is a shared string (cannot be modified)
and size is set to 0. */
unsigned char readonly;
} _PyUnicodeWriter ;
// Initialize a Unicode writer.
//
// By default, the minimum buffer size is 0 character and overallocation is
// disabled. Set min_length, min_char and overallocate attributes to control
// the allocation of the buffer.
PyAPI_FUNC(void)
_PyUnicodeWriter_Init(_PyUnicodeWriter *writer);
/* Prepare the buffer to write 'length' characters
with the specified maximum character.
Return 0 on success, raise an exception and return -1 on error. */
#define _PyUnicodeWriter_Prepare(WRITER, LENGTH, MAXCHAR) \
(((MAXCHAR) <= (WRITER)->maxchar \
&& (LENGTH) <= (WRITER)->size - (WRITER)->pos) \
? 0 \
: (((LENGTH) == 0) \
? 0 \
: _PyUnicodeWriter_PrepareInternal((WRITER), (LENGTH), (MAXCHAR))))
/* Don't call this function directly, use the _PyUnicodeWriter_Prepare() macro
instead. */
PyAPI_FUNC(int)
_PyUnicodeWriter_PrepareInternal(_PyUnicodeWriter *writer,
Py_ssize_t length, Py_UCS4 maxchar);
/* Prepare the buffer to have at least the kind KIND.
For example, kind=PyUnicode_2BYTE_KIND ensures that the writer will
support characters in range U+000-U+FFFF.
Return 0 on success, raise an exception and return -1 on error. */
#define _PyUnicodeWriter_PrepareKind(WRITER, KIND) \
((KIND) <= (WRITER)->kind \
? 0 \
: _PyUnicodeWriter_PrepareKindInternal((WRITER), (KIND)))
/* Don't call this function directly, use the _PyUnicodeWriter_PrepareKind()
macro instead. */
PyAPI_FUNC(int)
_PyUnicodeWriter_PrepareKindInternal(_PyUnicodeWriter *writer,
int kind);
/* Append a Unicode character.
Return 0 on success, raise an exception and return -1 on error. */
PyAPI_FUNC(int)
_PyUnicodeWriter_WriteChar(_PyUnicodeWriter *writer,
Py_UCS4 ch
);
/* Append a Unicode string.
Return 0 on success, raise an exception and return -1 on error. */
PyAPI_FUNC(int)
_PyUnicodeWriter_WriteStr(_PyUnicodeWriter *writer,
PyObject *str /* Unicode string */
);
/* Append a substring of a Unicode string.
Return 0 on success, raise an exception and return -1 on error. */
PyAPI_FUNC(int)
_PyUnicodeWriter_WriteSubstring(_PyUnicodeWriter *writer,
PyObject *str, /* Unicode string */
Py_ssize_t start,
Py_ssize_t end
);
/* Append an ASCII-encoded byte string.
Return 0 on success, raise an exception and return -1 on error. */
PyAPI_FUNC(int)
_PyUnicodeWriter_WriteASCIIString(_PyUnicodeWriter *writer,
const char *str, /* ASCII-encoded byte string */
Py_ssize_t len /* number of bytes, or -1 if unknown */
);
/* Append a latin1-encoded byte string.
Return 0 on success, raise an exception and return -1 on error. */
PyAPI_FUNC(int)
_PyUnicodeWriter_WriteLatin1String(_PyUnicodeWriter *writer,
const char *str, /* latin1-encoded byte string */
Py_ssize_t len /* length in bytes */
);
/* Get the value of the writer as a Unicode string. Clear the
buffer of the writer. Raise an exception and return NULL
on error. */
PyAPI_FUNC(PyObject *)
_PyUnicodeWriter_Finish(_PyUnicodeWriter *writer);
/* Deallocate memory of a writer (clear its internal buffer). */
PyAPI_FUNC(void)
_PyUnicodeWriter_Dealloc(_PyUnicodeWriter *writer);
/* --- Manage the default encoding ---------------------------------------- */
/* Returns a pointer to the default encoding (UTF-8) of the
Unicode object unicode.
Like PyUnicode_AsUTF8AndSize(), this also caches the UTF-8 representation
in the unicodeobject.
_PyUnicode_AsString is a #define for PyUnicode_AsUTF8 to
support the previous internal function with the same behaviour.
Use of this API is DEPRECATED since no size information can be
extracted from the returned data.
*/
PyAPI_FUNC(const char *) PyUnicode_AsUTF8(PyObject *unicode);
// Alias kept for backward compatibility
#define _PyUnicode_AsString PyUnicode_AsUTF8
/* === Characters Type APIs =============================================== */
/* These should not be used directly. Use the Py_UNICODE_IS* and
Py_UNICODE_TO* macros instead.
These APIs are implemented in Objects/unicodectype.c.
*/
PyAPI_FUNC(int) _PyUnicode_IsLowercase(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsUppercase(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsTitlecase(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsWhitespace(
const Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsLinebreak(
const Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(Py_UCS4) _PyUnicode_ToLowercase(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(Py_UCS4) _PyUnicode_ToUppercase(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(Py_UCS4) _PyUnicode_ToTitlecase(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_ToDecimalDigit(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_ToDigit(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(double) _PyUnicode_ToNumeric(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsDecimalDigit(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsDigit(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsNumeric(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsPrintable(
Py_UCS4 ch /* Unicode character */
);
PyAPI_FUNC(int) _PyUnicode_IsAlpha(
Py_UCS4 ch /* Unicode character */
);
// Helper array used by Py_UNICODE_ISSPACE().
PyAPI_DATA(const unsigned char) _Py_ascii_whitespace[];
// Since splitting on whitespace is an important use case, and
// whitespace in most situations is solely ASCII whitespace, we
// optimize for the common case by using a quick look-up table
// _Py_ascii_whitespace (see below) with an inlined check.
static inline int Py_UNICODE_ISSPACE(Py_UCS4 ch) {
if (ch < 128) {
return _Py_ascii_whitespace[ch];
}
return _PyUnicode_IsWhitespace(ch);
}
#define Py_UNICODE_ISLOWER(ch) _PyUnicode_IsLowercase(ch)
#define Py_UNICODE_ISUPPER(ch) _PyUnicode_IsUppercase(ch)
#define Py_UNICODE_ISTITLE(ch) _PyUnicode_IsTitlecase(ch)
#define Py_UNICODE_ISLINEBREAK(ch) _PyUnicode_IsLinebreak(ch)
#define Py_UNICODE_TOLOWER(ch) _PyUnicode_ToLowercase(ch)
#define Py_UNICODE_TOUPPER(ch) _PyUnicode_ToUppercase(ch)
#define Py_UNICODE_TOTITLE(ch) _PyUnicode_ToTitlecase(ch)
#define Py_UNICODE_ISDECIMAL(ch) _PyUnicode_IsDecimalDigit(ch)
#define Py_UNICODE_ISDIGIT(ch) _PyUnicode_IsDigit(ch)
#define Py_UNICODE_ISNUMERIC(ch) _PyUnicode_IsNumeric(ch)
#define Py_UNICODE_ISPRINTABLE(ch) _PyUnicode_IsPrintable(ch)
#define Py_UNICODE_TODECIMAL(ch) _PyUnicode_ToDecimalDigit(ch)
#define Py_UNICODE_TODIGIT(ch) _PyUnicode_ToDigit(ch)
#define Py_UNICODE_TONUMERIC(ch) _PyUnicode_ToNumeric(ch)
#define Py_UNICODE_ISALPHA(ch) _PyUnicode_IsAlpha(ch)
static inline int Py_UNICODE_ISALNUM(Py_UCS4 ch) {
return (Py_UNICODE_ISALPHA(ch)
|| Py_UNICODE_ISDECIMAL(ch)
|| Py_UNICODE_ISDIGIT(ch)
|| Py_UNICODE_ISNUMERIC(ch));
}
/* === Misc functions ===================================================== */
// Return an interned Unicode object for an Identifier; may fail if there is no
// memory.
PyAPI_FUNC(PyObject*) _PyUnicode_FromId(_Py_Identifier*);

View File

@@ -0,0 +1,20 @@
#ifndef Py_CPYTHON_WARNINGS_H
# error "this header file must not be included directly"
#endif
PyAPI_FUNC(int) PyErr_WarnExplicitObject(
PyObject *category,
PyObject *message,
PyObject *filename,
int lineno,
PyObject *module,
PyObject *registry);
PyAPI_FUNC(int) PyErr_WarnExplicitFormat(
PyObject *category,
const char *filename, int lineno,
const char *module, PyObject *registry,
const char *format, ...);
// DEPRECATED: Use PyErr_WarnEx() instead.
#define PyErr_Warn(category, msg) PyErr_WarnEx((category), (msg), 1)

View File

@@ -0,0 +1,63 @@
#ifndef Py_CPYTHON_WEAKREFOBJECT_H
# error "this header file must not be included directly"
#endif
/* PyWeakReference is the base struct for the Python ReferenceType, ProxyType,
* and CallableProxyType.
*/
struct _PyWeakReference {
PyObject_HEAD
/* The object to which this is a weak reference, or Py_None if none.
* Note that this is a stealth reference: wr_object's refcount is
* not incremented to reflect this pointer.
*/
PyObject *wr_object;
/* A callable to invoke when wr_object dies, or NULL if none. */
PyObject *wr_callback;
/* A cache for wr_object's hash code. As usual for hashes, this is -1
* if the hash code isn't known yet.
*/
Py_hash_t hash;
/* If wr_object is weakly referenced, wr_object has a doubly-linked NULL-
* terminated list of weak references to it. These are the list pointers.
* If wr_object goes away, wr_object is set to Py_None, and these pointers
* have no meaning then.
*/
PyWeakReference *wr_prev;
PyWeakReference *wr_next;
vectorcallfunc vectorcall;
#ifdef Py_GIL_DISABLED
/* Pointer to the lock used when clearing in free-threaded builds.
* Normally this can be derived from wr_object, but in some cases we need
* to lock after wr_object has been set to Py_None.
*/
PyMutex *weakrefs_lock;
#endif
};
PyAPI_FUNC(void) _PyWeakref_ClearRef(PyWeakReference *self);
Py_DEPRECATED(3.13) static inline PyObject* PyWeakref_GET_OBJECT(PyObject *ref_obj)
{
PyWeakReference *ref;
PyObject *obj;
assert(PyWeakref_Check(ref_obj));
ref = _Py_CAST(PyWeakReference*, ref_obj);
obj = ref->wr_object;
// Explanation for the Py_REFCNT() check: when a weakref's target is part
// of a long chain of deallocations which triggers the trashcan mechanism,
// clearing the weakrefs can be delayed long after the target's refcount
// has dropped to zero. In the meantime, code accessing the weakref will
// be able to "see" the target object even though it is supposed to be
// unreachable. See issue gh-60806.
if (Py_REFCNT(obj) > 0) {
return obj;
}
return Py_None;
}
#define PyWeakref_GET_OBJECT(ref) PyWeakref_GET_OBJECT(_PyObject_CAST(ref))