mirror of
https://github.com/bunny-lab-io/Borealis.git
synced 2025-10-27 12:21:57 -06:00
Removed the Requirement to Install Python and NodeJS (Now Bundled with Borealis)
This commit is contained in:
565
Dependencies/Python/include/internal/mimalloc/mimalloc.h
vendored
Normal file
565
Dependencies/Python/include/internal/mimalloc/mimalloc.h
vendored
Normal file
@@ -0,0 +1,565 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_H
|
||||
#define MIMALLOC_H
|
||||
|
||||
#define MI_MALLOC_VERSION 212 // major + 2 digits minor
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Compiler specific attributes
|
||||
// ------------------------------------------------------
|
||||
|
||||
#ifdef __cplusplus
|
||||
#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11
|
||||
#define mi_attr_noexcept noexcept
|
||||
#else
|
||||
#define mi_attr_noexcept throw()
|
||||
#endif
|
||||
#else
|
||||
#define mi_attr_noexcept
|
||||
#endif
|
||||
|
||||
#if defined(__cplusplus) && (__cplusplus >= 201703)
|
||||
#define mi_decl_nodiscard [[nodiscard]]
|
||||
#elif (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__) // includes clang, icc, and clang-cl
|
||||
#define mi_decl_nodiscard __attribute__((warn_unused_result))
|
||||
#elif defined(_HAS_NODISCARD)
|
||||
#define mi_decl_nodiscard _NODISCARD
|
||||
#elif (_MSC_VER >= 1700)
|
||||
#define mi_decl_nodiscard _Check_return_
|
||||
#else
|
||||
#define mi_decl_nodiscard
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) || defined(__MINGW32__)
|
||||
#if !defined(MI_SHARED_LIB)
|
||||
#define mi_decl_export
|
||||
#elif defined(MI_SHARED_LIB_EXPORT)
|
||||
#define mi_decl_export __declspec(dllexport)
|
||||
#else
|
||||
#define mi_decl_export __declspec(dllimport)
|
||||
#endif
|
||||
#if defined(__MINGW32__)
|
||||
#define mi_decl_restrict
|
||||
#define mi_attr_malloc __attribute__((malloc))
|
||||
#else
|
||||
#if (_MSC_VER >= 1900) && !defined(__EDG__)
|
||||
#define mi_decl_restrict __declspec(allocator) __declspec(restrict)
|
||||
#else
|
||||
#define mi_decl_restrict __declspec(restrict)
|
||||
#endif
|
||||
#define mi_attr_malloc
|
||||
#endif
|
||||
#define mi_cdecl __cdecl
|
||||
#define mi_attr_alloc_size(s)
|
||||
#define mi_attr_alloc_size2(s1,s2)
|
||||
#define mi_attr_alloc_align(p)
|
||||
#elif defined(__GNUC__) // includes clang and icc
|
||||
#if defined(MI_SHARED_LIB) && defined(MI_SHARED_LIB_EXPORT)
|
||||
#define mi_decl_export __attribute__((visibility("default")))
|
||||
#else
|
||||
#define mi_decl_export
|
||||
#endif
|
||||
#define mi_cdecl // leads to warnings... __attribute__((cdecl))
|
||||
#define mi_decl_restrict
|
||||
#define mi_attr_malloc __attribute__((malloc))
|
||||
#if (defined(__clang_major__) && (__clang_major__ < 4)) || (__GNUC__ < 5)
|
||||
#define mi_attr_alloc_size(s)
|
||||
#define mi_attr_alloc_size2(s1,s2)
|
||||
#define mi_attr_alloc_align(p)
|
||||
#elif defined(__INTEL_COMPILER)
|
||||
#define mi_attr_alloc_size(s) __attribute__((alloc_size(s)))
|
||||
#define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2)))
|
||||
#define mi_attr_alloc_align(p)
|
||||
#else
|
||||
#define mi_attr_alloc_size(s) __attribute__((alloc_size(s)))
|
||||
#define mi_attr_alloc_size2(s1,s2) __attribute__((alloc_size(s1,s2)))
|
||||
#define mi_attr_alloc_align(p) __attribute__((alloc_align(p)))
|
||||
#endif
|
||||
#else
|
||||
#define mi_cdecl
|
||||
#define mi_decl_export
|
||||
#define mi_decl_restrict
|
||||
#define mi_attr_malloc
|
||||
#define mi_attr_alloc_size(s)
|
||||
#define mi_attr_alloc_size2(s1,s2)
|
||||
#define mi_attr_alloc_align(p)
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Includes
|
||||
// ------------------------------------------------------
|
||||
|
||||
#include <stddef.h> // size_t
|
||||
#include <stdbool.h> // bool
|
||||
#include <stdint.h> // INTPTR_MAX
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Standard malloc interface
|
||||
// ------------------------------------------------------
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
mi_decl_export void* mi_expand(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
|
||||
mi_decl_export void mi_free(void* p) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc;
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Extended functionality
|
||||
// ------------------------------------------------------
|
||||
#define MI_SMALL_WSIZE_MAX (128)
|
||||
#define MI_SMALL_SIZE_MAX (MI_SMALL_WSIZE_MAX*sizeof(void*))
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_usable_size(const void* p) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_good_size(size_t size) mi_attr_noexcept;
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Internals
|
||||
// ------------------------------------------------------
|
||||
|
||||
typedef void (mi_cdecl mi_deferred_free_fun)(bool force, unsigned long long heartbeat, void* arg);
|
||||
mi_decl_export void mi_register_deferred_free(mi_deferred_free_fun* deferred_free, void* arg) mi_attr_noexcept;
|
||||
|
||||
typedef void (mi_cdecl mi_output_fun)(const char* msg, void* arg);
|
||||
mi_decl_export void mi_register_output(mi_output_fun* out, void* arg) mi_attr_noexcept;
|
||||
|
||||
typedef void (mi_cdecl mi_error_fun)(int err, void* arg);
|
||||
mi_decl_export void mi_register_error(mi_error_fun* fun, void* arg);
|
||||
|
||||
mi_decl_export void mi_collect(bool force) mi_attr_noexcept;
|
||||
mi_decl_export int mi_version(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_stats_reset(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_stats_merge(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_stats_print(void* out) mi_attr_noexcept; // backward compatibility: `out` is ignored and should be NULL
|
||||
mi_decl_export void mi_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export void mi_process_init(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_thread_init(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_thread_done(void) mi_attr_noexcept;
|
||||
mi_decl_export void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs,
|
||||
size_t* current_rss, size_t* peak_rss,
|
||||
size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept;
|
||||
|
||||
// -------------------------------------------------------------------------------------
|
||||
// Aligned allocation
|
||||
// Note that `alignment` always follows `size` for consistency with unaligned
|
||||
// allocation, but unfortunately this differs from `posix_memalign` and `aligned_alloc`.
|
||||
// -------------------------------------------------------------------------------------
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(1,2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------------------
|
||||
// Heaps: first-class, but can only allocate from the same thread that created it.
|
||||
// -------------------------------------------------------------------------------------
|
||||
|
||||
struct mi_heap_s;
|
||||
typedef struct mi_heap_s mi_heap_t;
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new(void);
|
||||
mi_decl_export void mi_heap_delete(mi_heap_t* heap);
|
||||
mi_decl_export void mi_heap_destroy(mi_heap_t* heap);
|
||||
mi_decl_export mi_heap_t* mi_heap_set_default(mi_heap_t* heap);
|
||||
mi_decl_export mi_heap_t* mi_heap_get_default(void);
|
||||
mi_decl_export mi_heap_t* mi_heap_get_backing(void);
|
||||
mi_decl_export void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept;
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap, size_t count, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_small(mi_heap_t* heap, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_reallocn(mi_heap_t* heap, void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_reallocf(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const char* s) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const char* s, size_t n) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name) mi_attr_noexcept mi_attr_malloc;
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_zalloc_aligned_at(mi_heap_t* heap, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap, size_t count, size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3) mi_attr_alloc_align(4);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_calloc_aligned_at(mi_heap_t* heap, size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size2(2, 3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_realloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------------
|
||||
// Zero initialized re-allocation.
|
||||
// Only valid on memory that was originally allocated with zero initialization too.
|
||||
// e.g. `mi_calloc`, `mi_zalloc`, `mi_zalloc_aligned` etc.
|
||||
// see <https://github.com/microsoft/mimalloc/issues/63#issuecomment-508272992>
|
||||
// --------------------------------------------------------------------------------
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_recalloc(void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(2) mi_attr_alloc_align(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(2,3) mi_attr_alloc_align(4);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(2,3);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc(mi_heap_t* heap, void* p, size_t newsize) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t newcount, size_t size) mi_attr_noexcept mi_attr_alloc_size2(3,4);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned(mi_heap_t* heap, void* p, size_t newsize, size_t alignment) mi_attr_noexcept mi_attr_alloc_size(3) mi_attr_alloc_align(4);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_rezalloc_aligned_at(mi_heap_t* heap, void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size(3);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept mi_attr_alloc_size2(3,4) mi_attr_alloc_align(5);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_heap_recalloc_aligned_at(mi_heap_t* heap, void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept mi_attr_alloc_size2(3,4);
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Analysis
|
||||
// ------------------------------------------------------
|
||||
|
||||
mi_decl_export bool mi_heap_contains_block(mi_heap_t* heap, const void* p);
|
||||
mi_decl_export bool mi_heap_check_owned(mi_heap_t* heap, const void* p);
|
||||
mi_decl_export bool mi_check_owned(const void* p);
|
||||
|
||||
// An area of heap space contains blocks of a single size.
|
||||
typedef struct mi_heap_area_s {
|
||||
void* blocks; // start of the area containing heap blocks
|
||||
size_t reserved; // bytes reserved for this area (virtual)
|
||||
size_t committed; // current available bytes for this area
|
||||
size_t used; // number of allocated blocks
|
||||
size_t block_size; // size in bytes of each block
|
||||
size_t full_block_size; // size in bytes of a full block including padding and metadata.
|
||||
} mi_heap_area_t;
|
||||
|
||||
typedef bool (mi_cdecl mi_block_visit_fun)(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg);
|
||||
|
||||
mi_decl_export bool mi_heap_visit_blocks(const mi_heap_t* heap, bool visit_all_blocks, mi_block_visit_fun* visitor, void* arg);
|
||||
|
||||
// Experimental
|
||||
mi_decl_nodiscard mi_decl_export bool mi_is_in_heap_region(const void* p) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export bool mi_is_redirected(void) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export int mi_reserve_huge_os_pages_interleave(size_t pages, size_t numa_nodes, size_t timeout_msecs) mi_attr_noexcept;
|
||||
mi_decl_export int mi_reserve_huge_os_pages_at(size_t pages, int numa_node, size_t timeout_msecs) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export int mi_reserve_os_memory(size_t size, bool commit, bool allow_large) mi_attr_noexcept;
|
||||
mi_decl_export bool mi_manage_os_memory(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export void mi_debug_show_arenas(void) mi_attr_noexcept;
|
||||
|
||||
// Experimental: heaps associated with specific memory arena's
|
||||
typedef int mi_arena_id_t;
|
||||
mi_decl_export void* mi_arena_area(mi_arena_id_t arena_id, size_t* size);
|
||||
mi_decl_export int mi_reserve_huge_os_pages_at_ex(size_t pages, int numa_node, size_t timeout_msecs, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
mi_decl_export int mi_reserve_os_memory_ex(size_t size, bool commit, bool allow_large, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
mi_decl_export bool mi_manage_os_memory_ex(void* start, size_t size, bool is_committed, bool is_large, bool is_zero, int numa_node, bool exclusive, mi_arena_id_t* arena_id) mi_attr_noexcept;
|
||||
|
||||
#if MI_MALLOC_VERSION >= 182
|
||||
// Create a heap that only allocates in the specified arena
|
||||
mi_decl_nodiscard mi_decl_export mi_heap_t* mi_heap_new_in_arena(mi_arena_id_t arena_id);
|
||||
#endif
|
||||
|
||||
// deprecated
|
||||
mi_decl_export int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept;
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Convenience
|
||||
// ------------------------------------------------------
|
||||
|
||||
#define mi_malloc_tp(tp) ((tp*)mi_malloc(sizeof(tp)))
|
||||
#define mi_zalloc_tp(tp) ((tp*)mi_zalloc(sizeof(tp)))
|
||||
#define mi_calloc_tp(tp,n) ((tp*)mi_calloc(n,sizeof(tp)))
|
||||
#define mi_mallocn_tp(tp,n) ((tp*)mi_mallocn(n,sizeof(tp)))
|
||||
#define mi_reallocn_tp(p,tp,n) ((tp*)mi_reallocn(p,n,sizeof(tp)))
|
||||
#define mi_recalloc_tp(p,tp,n) ((tp*)mi_recalloc(p,n,sizeof(tp)))
|
||||
|
||||
#define mi_heap_malloc_tp(hp,tp) ((tp*)mi_heap_malloc(hp,sizeof(tp)))
|
||||
#define mi_heap_zalloc_tp(hp,tp) ((tp*)mi_heap_zalloc(hp,sizeof(tp)))
|
||||
#define mi_heap_calloc_tp(hp,tp,n) ((tp*)mi_heap_calloc(hp,n,sizeof(tp)))
|
||||
#define mi_heap_mallocn_tp(hp,tp,n) ((tp*)mi_heap_mallocn(hp,n,sizeof(tp)))
|
||||
#define mi_heap_reallocn_tp(hp,p,tp,n) ((tp*)mi_heap_reallocn(hp,p,n,sizeof(tp)))
|
||||
#define mi_heap_recalloc_tp(hp,p,tp,n) ((tp*)mi_heap_recalloc(hp,p,n,sizeof(tp)))
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Options
|
||||
// ------------------------------------------------------
|
||||
|
||||
typedef enum mi_option_e {
|
||||
// stable options
|
||||
mi_option_show_errors, // print error messages
|
||||
mi_option_show_stats, // print statistics on termination
|
||||
mi_option_verbose, // print verbose messages
|
||||
// the following options are experimental (see src/options.h)
|
||||
mi_option_eager_commit, // eager commit segments? (after `eager_commit_delay` segments) (=1)
|
||||
mi_option_arena_eager_commit, // eager commit arenas? Use 2 to enable just on overcommit systems (=2)
|
||||
mi_option_purge_decommits, // should a memory purge decommit (or only reset) (=1)
|
||||
mi_option_allow_large_os_pages, // allow large (2MiB) OS pages, implies eager commit
|
||||
mi_option_reserve_huge_os_pages, // reserve N huge OS pages (1GiB/page) at startup
|
||||
mi_option_reserve_huge_os_pages_at, // reserve huge OS pages at a specific NUMA node
|
||||
mi_option_reserve_os_memory, // reserve specified amount of OS memory in an arena at startup
|
||||
mi_option_deprecated_segment_cache,
|
||||
mi_option_deprecated_page_reset,
|
||||
mi_option_abandoned_page_purge, // immediately purge delayed purges on thread termination
|
||||
mi_option_deprecated_segment_reset,
|
||||
mi_option_eager_commit_delay,
|
||||
mi_option_purge_delay, // memory purging is delayed by N milli seconds; use 0 for immediate purging or -1 for no purging at all.
|
||||
mi_option_use_numa_nodes, // 0 = use all available numa nodes, otherwise use at most N nodes.
|
||||
mi_option_limit_os_alloc, // 1 = do not use OS memory for allocation (but only programmatically reserved arenas)
|
||||
mi_option_os_tag, // tag used for OS logging (macOS only for now)
|
||||
mi_option_max_errors, // issue at most N error messages
|
||||
mi_option_max_warnings, // issue at most N warning messages
|
||||
mi_option_max_segment_reclaim,
|
||||
mi_option_destroy_on_exit, // if set, release all memory on exit; sometimes used for dynamic unloading but can be unsafe.
|
||||
mi_option_arena_reserve, // initial memory size in KiB for arena reservation (1GiB on 64-bit)
|
||||
mi_option_arena_purge_mult,
|
||||
mi_option_purge_extend_delay,
|
||||
_mi_option_last,
|
||||
// legacy option names
|
||||
mi_option_large_os_pages = mi_option_allow_large_os_pages,
|
||||
mi_option_eager_region_commit = mi_option_arena_eager_commit,
|
||||
mi_option_reset_decommits = mi_option_purge_decommits,
|
||||
mi_option_reset_delay = mi_option_purge_delay,
|
||||
mi_option_abandoned_page_reset = mi_option_abandoned_page_purge
|
||||
} mi_option_t;
|
||||
|
||||
|
||||
mi_decl_nodiscard mi_decl_export bool mi_option_is_enabled(mi_option_t option);
|
||||
mi_decl_export void mi_option_enable(mi_option_t option);
|
||||
mi_decl_export void mi_option_disable(mi_option_t option);
|
||||
mi_decl_export void mi_option_set_enabled(mi_option_t option, bool enable);
|
||||
mi_decl_export void mi_option_set_enabled_default(mi_option_t option, bool enable);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export long mi_option_get(mi_option_t option);
|
||||
mi_decl_nodiscard mi_decl_export long mi_option_get_clamp(mi_option_t option, long min, long max);
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_option_get_size(mi_option_t option);
|
||||
mi_decl_export void mi_option_set(mi_option_t option, long value);
|
||||
mi_decl_export void mi_option_set_default(mi_option_t option, long value);
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------------------------------------
|
||||
// "mi" prefixed implementations of various posix, Unix, Windows, and C++ allocation functions.
|
||||
// (This can be convenient when providing overrides of these functions as done in `mimalloc-override.h`.)
|
||||
// note: we use `mi_cfree` as "checked free" and it checks if the pointer is in our heap before free-ing.
|
||||
// -------------------------------------------------------------------------------------------------------
|
||||
|
||||
mi_decl_export void mi_cfree(void* p) mi_attr_noexcept;
|
||||
mi_decl_export void* mi__expand(void* p, size_t newsize) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_malloc_size(const void* p) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_malloc_good_size(size_t size) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export size_t mi_malloc_usable_size(const void *p) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export int mi_posix_memalign(void** p, size_t alignment, size_t size) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_memalign(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_valloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_pvalloc(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_aligned_alloc(size_t alignment, size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(2) mi_attr_alloc_align(1);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export void* mi_reallocarray(void* p, size_t count, size_t size) mi_attr_noexcept mi_attr_alloc_size2(2,3);
|
||||
mi_decl_nodiscard mi_decl_export int mi_reallocarr(void* p, size_t count, size_t size) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export void* mi_aligned_recalloc(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept;
|
||||
mi_decl_nodiscard mi_decl_export void* mi_aligned_offset_recalloc(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept;
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned short* mi_wcsdup(const unsigned short* s) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict unsigned char* mi_mbsdup(const unsigned char* s) mi_attr_noexcept mi_attr_malloc;
|
||||
mi_decl_export int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept;
|
||||
mi_decl_export int mi_wdupenv_s(unsigned short** buf, size_t* size, const unsigned short* name) mi_attr_noexcept;
|
||||
|
||||
mi_decl_export void mi_free_size(void* p, size_t size) mi_attr_noexcept;
|
||||
mi_decl_export void mi_free_size_aligned(void* p, size_t size, size_t alignment) mi_attr_noexcept;
|
||||
mi_decl_export void mi_free_aligned(void* p, size_t alignment) mi_attr_noexcept;
|
||||
|
||||
// The `mi_new` wrappers implement C++ semantics on out-of-memory instead of directly returning `NULL`.
|
||||
// (and call `std::get_new_handler` and potentially raise a `std::bad_alloc` exception).
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new(size_t size) mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned(size_t size, size_t alignment) mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_nothrow(size_t size) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_aligned_nothrow(size_t size, size_t alignment) mi_attr_noexcept mi_attr_malloc mi_attr_alloc_size(1) mi_attr_alloc_align(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_new_n(size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(1, 2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_new_realloc(void* p, size_t newsize) mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export void* mi_new_reallocn(void* p, size_t newcount, size_t size) mi_attr_alloc_size2(2, 3);
|
||||
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) mi_attr_malloc mi_attr_alloc_size(2);
|
||||
mi_decl_nodiscard mi_decl_export mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) mi_attr_malloc mi_attr_alloc_size2(2, 3);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
// ---------------------------------------------------------------------------------------------
|
||||
// Implement the C++ std::allocator interface for use in STL containers.
|
||||
// (note: see `mimalloc-new-delete.h` for overriding the new/delete operators globally)
|
||||
// ---------------------------------------------------------------------------------------------
|
||||
#ifdef __cplusplus
|
||||
|
||||
#include <cstddef> // std::size_t
|
||||
#include <cstdint> // PTRDIFF_MAX
|
||||
#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11
|
||||
#include <type_traits> // std::true_type
|
||||
#include <utility> // std::forward
|
||||
#endif
|
||||
|
||||
template<class T> struct _mi_stl_allocator_common {
|
||||
typedef T value_type;
|
||||
typedef std::size_t size_type;
|
||||
typedef std::ptrdiff_t difference_type;
|
||||
typedef value_type& reference;
|
||||
typedef value_type const& const_reference;
|
||||
typedef value_type* pointer;
|
||||
typedef value_type const* const_pointer;
|
||||
|
||||
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
|
||||
using propagate_on_container_copy_assignment = std::true_type;
|
||||
using propagate_on_container_move_assignment = std::true_type;
|
||||
using propagate_on_container_swap = std::true_type;
|
||||
template <class U, class ...Args> void construct(U* p, Args&& ...args) { ::new(p) U(std::forward<Args>(args)...); }
|
||||
template <class U> void destroy(U* p) mi_attr_noexcept { p->~U(); }
|
||||
#else
|
||||
void construct(pointer p, value_type const& val) { ::new(p) value_type(val); }
|
||||
void destroy(pointer p) { p->~value_type(); }
|
||||
#endif
|
||||
|
||||
size_type max_size() const mi_attr_noexcept { return (PTRDIFF_MAX/sizeof(value_type)); }
|
||||
pointer address(reference x) const { return &x; }
|
||||
const_pointer address(const_reference x) const { return &x; }
|
||||
};
|
||||
|
||||
template<class T> struct mi_stl_allocator : public _mi_stl_allocator_common<T> {
|
||||
using typename _mi_stl_allocator_common<T>::size_type;
|
||||
using typename _mi_stl_allocator_common<T>::value_type;
|
||||
using typename _mi_stl_allocator_common<T>::pointer;
|
||||
template <class U> struct rebind { typedef mi_stl_allocator<U> other; };
|
||||
|
||||
mi_stl_allocator() mi_attr_noexcept = default;
|
||||
mi_stl_allocator(const mi_stl_allocator&) mi_attr_noexcept = default;
|
||||
template<class U> mi_stl_allocator(const mi_stl_allocator<U>&) mi_attr_noexcept { }
|
||||
mi_stl_allocator select_on_container_copy_construction() const { return *this; }
|
||||
void deallocate(T* p, size_type) { mi_free(p); }
|
||||
|
||||
#if (__cplusplus >= 201703L) // C++17
|
||||
mi_decl_nodiscard T* allocate(size_type count) { return static_cast<T*>(mi_new_n(count, sizeof(T))); }
|
||||
mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); }
|
||||
#else
|
||||
mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast<pointer>(mi_new_n(count, sizeof(value_type))); }
|
||||
#endif
|
||||
|
||||
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
|
||||
using is_always_equal = std::true_type;
|
||||
#endif
|
||||
};
|
||||
|
||||
template<class T1,class T2> bool operator==(const mi_stl_allocator<T1>& , const mi_stl_allocator<T2>& ) mi_attr_noexcept { return true; }
|
||||
template<class T1,class T2> bool operator!=(const mi_stl_allocator<T1>& , const mi_stl_allocator<T2>& ) mi_attr_noexcept { return false; }
|
||||
|
||||
|
||||
#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) // C++11
|
||||
#define MI_HAS_HEAP_STL_ALLOCATOR 1
|
||||
|
||||
#include <memory> // std::shared_ptr
|
||||
|
||||
// Common base class for STL allocators in a specific heap
|
||||
template<class T, bool _mi_destroy> struct _mi_heap_stl_allocator_common : public _mi_stl_allocator_common<T> {
|
||||
using typename _mi_stl_allocator_common<T>::size_type;
|
||||
using typename _mi_stl_allocator_common<T>::value_type;
|
||||
using typename _mi_stl_allocator_common<T>::pointer;
|
||||
|
||||
_mi_heap_stl_allocator_common(mi_heap_t* hp) : heap(hp) { } /* will not delete nor destroy the passed in heap */
|
||||
|
||||
#if (__cplusplus >= 201703L) // C++17
|
||||
mi_decl_nodiscard T* allocate(size_type count) { return static_cast<T*>(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(T))); }
|
||||
mi_decl_nodiscard T* allocate(size_type count, const void*) { return allocate(count); }
|
||||
#else
|
||||
mi_decl_nodiscard pointer allocate(size_type count, const void* = 0) { return static_cast<pointer>(mi_heap_alloc_new_n(this->heap.get(), count, sizeof(value_type))); }
|
||||
#endif
|
||||
|
||||
#if ((__cplusplus >= 201103L) || (_MSC_VER > 1900)) // C++11
|
||||
using is_always_equal = std::false_type;
|
||||
#endif
|
||||
|
||||
void collect(bool force) { mi_heap_collect(this->heap.get(), force); }
|
||||
template<class U> bool is_equal(const _mi_heap_stl_allocator_common<U, _mi_destroy>& x) const { return (this->heap == x.heap); }
|
||||
|
||||
protected:
|
||||
std::shared_ptr<mi_heap_t> heap;
|
||||
template<class U, bool D> friend struct _mi_heap_stl_allocator_common;
|
||||
|
||||
_mi_heap_stl_allocator_common() {
|
||||
mi_heap_t* hp = mi_heap_new();
|
||||
this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */
|
||||
}
|
||||
_mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { }
|
||||
template<class U> _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common<U, _mi_destroy>& x) mi_attr_noexcept : heap(x.heap) { }
|
||||
|
||||
private:
|
||||
static void heap_delete(mi_heap_t* hp) { if (hp != NULL) { mi_heap_delete(hp); } }
|
||||
static void heap_destroy(mi_heap_t* hp) { if (hp != NULL) { mi_heap_destroy(hp); } }
|
||||
};
|
||||
|
||||
// STL allocator allocation in a specific heap
|
||||
template<class T> struct mi_heap_stl_allocator : public _mi_heap_stl_allocator_common<T, false> {
|
||||
using typename _mi_heap_stl_allocator_common<T, false>::size_type;
|
||||
mi_heap_stl_allocator() : _mi_heap_stl_allocator_common<T, false>() { } // creates fresh heap that is deleted when the destructor is called
|
||||
mi_heap_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, false>(hp) { } // no delete nor destroy on the passed in heap
|
||||
template<class U> mi_heap_stl_allocator(const mi_heap_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, false>(x) { }
|
||||
|
||||
mi_heap_stl_allocator select_on_container_copy_construction() const { return *this; }
|
||||
void deallocate(T* p, size_type) { mi_free(p); }
|
||||
template<class U> struct rebind { typedef mi_heap_stl_allocator<U> other; };
|
||||
};
|
||||
|
||||
template<class T1, class T2> bool operator==(const mi_heap_stl_allocator<T1>& x, const mi_heap_stl_allocator<T2>& y) mi_attr_noexcept { return (x.is_equal(y)); }
|
||||
template<class T1, class T2> bool operator!=(const mi_heap_stl_allocator<T1>& x, const mi_heap_stl_allocator<T2>& y) mi_attr_noexcept { return (!x.is_equal(y)); }
|
||||
|
||||
|
||||
// STL allocator allocation in a specific heap, where `free` does nothing and
|
||||
// the heap is destroyed in one go on destruction -- use with care!
|
||||
template<class T> struct mi_heap_destroy_stl_allocator : public _mi_heap_stl_allocator_common<T, true> {
|
||||
using typename _mi_heap_stl_allocator_common<T, true>::size_type;
|
||||
mi_heap_destroy_stl_allocator() : _mi_heap_stl_allocator_common<T, true>() { } // creates fresh heap that is destroyed when the destructor is called
|
||||
mi_heap_destroy_stl_allocator(mi_heap_t* hp) : _mi_heap_stl_allocator_common<T, true>(hp) { } // no delete nor destroy on the passed in heap
|
||||
template<class U> mi_heap_destroy_stl_allocator(const mi_heap_destroy_stl_allocator<U>& x) mi_attr_noexcept : _mi_heap_stl_allocator_common<T, true>(x) { }
|
||||
|
||||
mi_heap_destroy_stl_allocator select_on_container_copy_construction() const { return *this; }
|
||||
void deallocate(T*, size_type) { /* do nothing as we destroy the heap on destruct. */ }
|
||||
template<class U> struct rebind { typedef mi_heap_destroy_stl_allocator<U> other; };
|
||||
};
|
||||
|
||||
template<class T1, class T2> bool operator==(const mi_heap_destroy_stl_allocator<T1>& x, const mi_heap_destroy_stl_allocator<T2>& y) mi_attr_noexcept { return (x.is_equal(y)); }
|
||||
template<class T1, class T2> bool operator!=(const mi_heap_destroy_stl_allocator<T1>& x, const mi_heap_destroy_stl_allocator<T2>& y) mi_attr_noexcept { return (!x.is_equal(y)); }
|
||||
|
||||
#endif // C++11
|
||||
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif
|
||||
392
Dependencies/Python/include/internal/mimalloc/mimalloc/atomic.h
vendored
Normal file
392
Dependencies/Python/include/internal/mimalloc/mimalloc/atomic.h
vendored
Normal file
@@ -0,0 +1,392 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023 Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_ATOMIC_H
|
||||
#define MIMALLOC_ATOMIC_H
|
||||
|
||||
// --------------------------------------------------------------------------------------------
|
||||
// Atomics
|
||||
// We need to be portable between C, C++, and MSVC.
|
||||
// We base the primitives on the C/C++ atomics and create a mimimal wrapper for MSVC in C compilation mode.
|
||||
// This is why we try to use only `uintptr_t` and `<type>*` as atomic types.
|
||||
// To gain better insight in the range of used atomics, we use explicitly named memory order operations
|
||||
// instead of passing the memory order as a parameter.
|
||||
// -----------------------------------------------------------------------------------------------
|
||||
|
||||
#if defined(__cplusplus)
|
||||
// Use C++ atomics
|
||||
#include <atomic>
|
||||
#define _Atomic(tp) std::atomic<tp>
|
||||
#define mi_atomic(name) std::atomic_##name
|
||||
#define mi_memory_order(name) std::memory_order_##name
|
||||
#if (__cplusplus >= 202002L) // c++20, see issue #571
|
||||
#define MI_ATOMIC_VAR_INIT(x) x
|
||||
#elif !defined(ATOMIC_VAR_INIT)
|
||||
#define MI_ATOMIC_VAR_INIT(x) x
|
||||
#else
|
||||
#define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
|
||||
#endif
|
||||
#elif defined(_MSC_VER)
|
||||
// Use MSVC C wrapper for C11 atomics
|
||||
#define _Atomic(tp) tp
|
||||
#define MI_ATOMIC_VAR_INIT(x) x
|
||||
#define mi_atomic(name) mi_atomic_##name
|
||||
#define mi_memory_order(name) mi_memory_order_##name
|
||||
#else
|
||||
// Use C11 atomics
|
||||
#include <stdatomic.h>
|
||||
#define mi_atomic(name) atomic_##name
|
||||
#define mi_memory_order(name) memory_order_##name
|
||||
#if (__STDC_VERSION__ >= 201710L) // c17, see issue #735
|
||||
#define MI_ATOMIC_VAR_INIT(x) x
|
||||
#elif !defined(ATOMIC_VAR_INIT)
|
||||
#define MI_ATOMIC_VAR_INIT(x) x
|
||||
#else
|
||||
#define MI_ATOMIC_VAR_INIT(x) ATOMIC_VAR_INIT(x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Various defines for all used memory orders in mimalloc
|
||||
#define mi_atomic_cas_weak(p,expected,desired,mem_success,mem_fail) \
|
||||
mi_atomic(compare_exchange_weak_explicit)(p,expected,desired,mem_success,mem_fail)
|
||||
|
||||
#define mi_atomic_cas_strong(p,expected,desired,mem_success,mem_fail) \
|
||||
mi_atomic(compare_exchange_strong_explicit)(p,expected,desired,mem_success,mem_fail)
|
||||
|
||||
#define mi_atomic_load_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire))
|
||||
#define mi_atomic_load_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
|
||||
#define mi_atomic_store_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
|
||||
#define mi_atomic_store_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
|
||||
#define mi_atomic_exchange_release(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(release))
|
||||
#define mi_atomic_exchange_acq_rel(p,x) mi_atomic(exchange_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
#define mi_atomic_cas_weak_release(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed))
|
||||
#define mi_atomic_cas_weak_acq_rel(p,exp,des) mi_atomic_cas_weak(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire))
|
||||
#define mi_atomic_cas_strong_release(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(release),mi_memory_order(relaxed))
|
||||
#define mi_atomic_cas_strong_acq_rel(p,exp,des) mi_atomic_cas_strong(p,exp,des,mi_memory_order(acq_rel),mi_memory_order(acquire))
|
||||
|
||||
#define mi_atomic_add_relaxed(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(relaxed))
|
||||
#define mi_atomic_sub_relaxed(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(relaxed))
|
||||
#define mi_atomic_add_acq_rel(p,x) mi_atomic(fetch_add_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
#define mi_atomic_sub_acq_rel(p,x) mi_atomic(fetch_sub_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
#define mi_atomic_and_acq_rel(p,x) mi_atomic(fetch_and_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
#define mi_atomic_or_acq_rel(p,x) mi_atomic(fetch_or_explicit)(p,x,mi_memory_order(acq_rel))
|
||||
|
||||
#define mi_atomic_increment_relaxed(p) mi_atomic_add_relaxed(p,(uintptr_t)1)
|
||||
#define mi_atomic_decrement_relaxed(p) mi_atomic_sub_relaxed(p,(uintptr_t)1)
|
||||
#define mi_atomic_increment_acq_rel(p) mi_atomic_add_acq_rel(p,(uintptr_t)1)
|
||||
#define mi_atomic_decrement_acq_rel(p) mi_atomic_sub_acq_rel(p,(uintptr_t)1)
|
||||
|
||||
static inline void mi_atomic_yield(void);
|
||||
static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add);
|
||||
static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub);
|
||||
|
||||
|
||||
#if defined(__cplusplus) || !defined(_MSC_VER)
|
||||
|
||||
// In C++/C11 atomics we have polymorphic atomics so can use the typed `ptr` variants (where `tp` is the type of atomic value)
|
||||
// We use these macros so we can provide a typed wrapper in MSVC in C compilation mode as well
|
||||
#define mi_atomic_load_ptr_acquire(tp,p) mi_atomic_load_acquire(p)
|
||||
#define mi_atomic_load_ptr_relaxed(tp,p) mi_atomic_load_relaxed(p)
|
||||
|
||||
// In C++ we need to add casts to help resolve templates if NULL is passed
|
||||
#if defined(__cplusplus)
|
||||
#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,(tp*)x)
|
||||
#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,(tp*)x)
|
||||
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,(tp*)des)
|
||||
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,(tp*)des)
|
||||
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,(tp*)des)
|
||||
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,(tp*)x)
|
||||
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,(tp*)x)
|
||||
#else
|
||||
#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release(p,x)
|
||||
#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed(p,x)
|
||||
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release(p,exp,des)
|
||||
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel(p,exp,des)
|
||||
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release(p,exp,des)
|
||||
#define mi_atomic_exchange_ptr_release(tp,p,x) mi_atomic_exchange_release(p,x)
|
||||
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) mi_atomic_exchange_acq_rel(p,x)
|
||||
#endif
|
||||
|
||||
// These are used by the statistics
|
||||
static inline int64_t mi_atomic_addi64_relaxed(volatile int64_t* p, int64_t add) {
|
||||
return mi_atomic(fetch_add_explicit)((_Atomic(int64_t)*)p, add, mi_memory_order(relaxed));
|
||||
}
|
||||
static inline void mi_atomic_maxi64_relaxed(volatile int64_t* p, int64_t x) {
|
||||
int64_t current = mi_atomic_load_relaxed((_Atomic(int64_t)*)p);
|
||||
while (current < x && !mi_atomic_cas_weak_release((_Atomic(int64_t)*)p, ¤t, x)) { /* nothing */ };
|
||||
}
|
||||
|
||||
// Used by timers
|
||||
#define mi_atomic_loadi64_acquire(p) mi_atomic(load_explicit)(p,mi_memory_order(acquire))
|
||||
#define mi_atomic_loadi64_relaxed(p) mi_atomic(load_explicit)(p,mi_memory_order(relaxed))
|
||||
#define mi_atomic_storei64_release(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(release))
|
||||
#define mi_atomic_storei64_relaxed(p,x) mi_atomic(store_explicit)(p,x,mi_memory_order(relaxed))
|
||||
|
||||
#define mi_atomic_casi64_strong_acq_rel(p,e,d) mi_atomic_cas_strong_acq_rel(p,e,d)
|
||||
#define mi_atomic_addi64_acq_rel(p,i) mi_atomic_add_acq_rel(p,i)
|
||||
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
// MSVC C compilation wrapper that uses Interlocked operations to model C11 atomics.
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#include <intrin.h>
|
||||
#ifdef _WIN64
|
||||
typedef LONG64 msc_intptr_t;
|
||||
#define MI_64(f) f##64
|
||||
#else
|
||||
typedef LONG msc_intptr_t;
|
||||
#define MI_64(f) f
|
||||
#endif
|
||||
|
||||
typedef enum mi_memory_order_e {
|
||||
mi_memory_order_relaxed,
|
||||
mi_memory_order_consume,
|
||||
mi_memory_order_acquire,
|
||||
mi_memory_order_release,
|
||||
mi_memory_order_acq_rel,
|
||||
mi_memory_order_seq_cst
|
||||
} mi_memory_order;
|
||||
|
||||
static inline uintptr_t mi_atomic_fetch_add_explicit(_Atomic(uintptr_t)*p, uintptr_t add, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, (msc_intptr_t)add);
|
||||
}
|
||||
static inline uintptr_t mi_atomic_fetch_sub_explicit(_Atomic(uintptr_t)*p, uintptr_t sub, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedExchangeAdd)((volatile msc_intptr_t*)p, -((msc_intptr_t)sub));
|
||||
}
|
||||
static inline uintptr_t mi_atomic_fetch_and_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedAnd)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
|
||||
}
|
||||
static inline uintptr_t mi_atomic_fetch_or_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedOr)((volatile msc_intptr_t*)p, (msc_intptr_t)x);
|
||||
}
|
||||
static inline bool mi_atomic_compare_exchange_strong_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) {
|
||||
(void)(mo1); (void)(mo2);
|
||||
uintptr_t read = (uintptr_t)MI_64(_InterlockedCompareExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)desired, (msc_intptr_t)(*expected));
|
||||
if (read == *expected) {
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
*expected = read;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
static inline bool mi_atomic_compare_exchange_weak_explicit(_Atomic(uintptr_t)*p, uintptr_t* expected, uintptr_t desired, mi_memory_order mo1, mi_memory_order mo2) {
|
||||
return mi_atomic_compare_exchange_strong_explicit(p, expected, desired, mo1, mo2);
|
||||
}
|
||||
static inline uintptr_t mi_atomic_exchange_explicit(_Atomic(uintptr_t)*p, uintptr_t exchange, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
return (uintptr_t)MI_64(_InterlockedExchange)((volatile msc_intptr_t*)p, (msc_intptr_t)exchange);
|
||||
}
|
||||
static inline void mi_atomic_thread_fence(mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
_Atomic(uintptr_t) x = 0;
|
||||
mi_atomic_exchange_explicit(&x, 1, mo);
|
||||
}
|
||||
static inline uintptr_t mi_atomic_load_explicit(_Atomic(uintptr_t) const* p, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
#if defined(_M_IX86) || defined(_M_X64)
|
||||
return *p;
|
||||
#else
|
||||
uintptr_t x = *p;
|
||||
if (mo > mi_memory_order_relaxed) {
|
||||
while (!mi_atomic_compare_exchange_weak_explicit((_Atomic(uintptr_t)*)p, &x, x, mo, mi_memory_order_relaxed)) { /* nothing */ };
|
||||
}
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
static inline void mi_atomic_store_explicit(_Atomic(uintptr_t)*p, uintptr_t x, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
#if defined(_M_IX86) || defined(_M_X64)
|
||||
*p = x;
|
||||
#else
|
||||
mi_atomic_exchange_explicit(p, x, mo);
|
||||
#endif
|
||||
}
|
||||
static inline int64_t mi_atomic_loadi64_explicit(_Atomic(int64_t)*p, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
#if defined(_M_X64)
|
||||
return *p;
|
||||
#else
|
||||
int64_t old = *p;
|
||||
int64_t x = old;
|
||||
while ((old = InterlockedCompareExchange64(p, x, old)) != x) {
|
||||
x = old;
|
||||
}
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
static inline void mi_atomic_storei64_explicit(_Atomic(int64_t)*p, int64_t x, mi_memory_order mo) {
|
||||
(void)(mo);
|
||||
#if defined(x_M_IX86) || defined(_M_X64)
|
||||
*p = x;
|
||||
#else
|
||||
InterlockedExchange64(p, x);
|
||||
#endif
|
||||
}
|
||||
|
||||
// These are used by the statistics
|
||||
static inline int64_t mi_atomic_addi64_relaxed(volatile _Atomic(int64_t)*p, int64_t add) {
|
||||
#ifdef _WIN64
|
||||
return (int64_t)mi_atomic_addi((int64_t*)p, add);
|
||||
#else
|
||||
int64_t current;
|
||||
int64_t sum;
|
||||
do {
|
||||
current = *p;
|
||||
sum = current + add;
|
||||
} while (_InterlockedCompareExchange64(p, sum, current) != current);
|
||||
return current;
|
||||
#endif
|
||||
}
|
||||
static inline void mi_atomic_maxi64_relaxed(volatile _Atomic(int64_t)*p, int64_t x) {
|
||||
int64_t current;
|
||||
do {
|
||||
current = *p;
|
||||
} while (current < x && _InterlockedCompareExchange64(p, x, current) != current);
|
||||
}
|
||||
|
||||
static inline void mi_atomic_addi64_acq_rel(volatile _Atomic(int64_t*)p, int64_t i) {
|
||||
mi_atomic_addi64_relaxed(p, i);
|
||||
}
|
||||
|
||||
static inline bool mi_atomic_casi64_strong_acq_rel(volatile _Atomic(int64_t*)p, int64_t* exp, int64_t des) {
|
||||
int64_t read = _InterlockedCompareExchange64(p, des, *exp);
|
||||
if (read == *exp) {
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
*exp = read;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// The pointer macros cast to `uintptr_t`.
|
||||
#define mi_atomic_load_ptr_acquire(tp,p) (tp*)mi_atomic_load_acquire((_Atomic(uintptr_t)*)(p))
|
||||
#define mi_atomic_load_ptr_relaxed(tp,p) (tp*)mi_atomic_load_relaxed((_Atomic(uintptr_t)*)(p))
|
||||
#define mi_atomic_store_ptr_release(tp,p,x) mi_atomic_store_release((_Atomic(uintptr_t)*)(p),(uintptr_t)(x))
|
||||
#define mi_atomic_store_ptr_relaxed(tp,p,x) mi_atomic_store_relaxed((_Atomic(uintptr_t)*)(p),(uintptr_t)(x))
|
||||
#define mi_atomic_cas_ptr_weak_release(tp,p,exp,des) mi_atomic_cas_weak_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
||||
#define mi_atomic_cas_ptr_weak_acq_rel(tp,p,exp,des) mi_atomic_cas_weak_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
||||
#define mi_atomic_cas_ptr_strong_release(tp,p,exp,des) mi_atomic_cas_strong_release((_Atomic(uintptr_t)*)(p),(uintptr_t*)exp,(uintptr_t)des)
|
||||
#define mi_atomic_exchange_ptr_release(tp,p,x) (tp*)mi_atomic_exchange_release((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
|
||||
#define mi_atomic_exchange_ptr_acq_rel(tp,p,x) (tp*)mi_atomic_exchange_acq_rel((_Atomic(uintptr_t)*)(p),(uintptr_t)x)
|
||||
|
||||
#define mi_atomic_loadi64_acquire(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(acquire))
|
||||
#define mi_atomic_loadi64_relaxed(p) mi_atomic(loadi64_explicit)(p,mi_memory_order(relaxed))
|
||||
#define mi_atomic_storei64_release(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(release))
|
||||
#define mi_atomic_storei64_relaxed(p,x) mi_atomic(storei64_explicit)(p,x,mi_memory_order(relaxed))
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
// Atomically add a signed value; returns the previous value.
|
||||
static inline intptr_t mi_atomic_addi(_Atomic(intptr_t)*p, intptr_t add) {
|
||||
return (intptr_t)mi_atomic_add_acq_rel((_Atomic(uintptr_t)*)p, (uintptr_t)add);
|
||||
}
|
||||
|
||||
// Atomically subtract a signed value; returns the previous value.
|
||||
static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) {
|
||||
return (intptr_t)mi_atomic_addi(p, -sub);
|
||||
}
|
||||
|
||||
typedef _Atomic(uintptr_t) mi_atomic_once_t;
|
||||
|
||||
// Returns true only on the first invocation
|
||||
static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
|
||||
if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
|
||||
uintptr_t expected = 0;
|
||||
return mi_atomic_cas_strong_acq_rel(once, &expected, (uintptr_t)1); // try to set to 1
|
||||
}
|
||||
|
||||
typedef _Atomic(uintptr_t) mi_atomic_guard_t;
|
||||
|
||||
// Allows only one thread to execute at a time
|
||||
#define mi_atomic_guard(guard) \
|
||||
uintptr_t _mi_guard_expected = 0; \
|
||||
for(bool _mi_guard_once = true; \
|
||||
_mi_guard_once && mi_atomic_cas_strong_acq_rel(guard,&_mi_guard_expected,(uintptr_t)1); \
|
||||
(mi_atomic_store_release(guard,(uintptr_t)0), _mi_guard_once = false) )
|
||||
|
||||
|
||||
|
||||
// Yield
|
||||
#if defined(__cplusplus)
|
||||
#include <thread>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
std::this_thread::yield();
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
YieldProcessor();
|
||||
}
|
||||
#elif defined(__SSE2__)
|
||||
#include <emmintrin.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
_mm_pause();
|
||||
}
|
||||
#elif (defined(__GNUC__) || defined(__clang__)) && \
|
||||
(defined(__x86_64__) || defined(__i386__) || \
|
||||
defined(__aarch64__) || defined(__arm__) || \
|
||||
defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__))
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile ("pause" ::: "memory");
|
||||
}
|
||||
#elif defined(__aarch64__)
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile("wfe");
|
||||
}
|
||||
#elif defined(__arm__)
|
||||
#if __ARM_ARCH >= 7
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile("yield" ::: "memory");
|
||||
}
|
||||
#else
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile ("nop" ::: "memory");
|
||||
}
|
||||
#endif
|
||||
#elif defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) || defined(__POWERPC__)
|
||||
#ifdef __APPLE__
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ volatile ("or r27,r27,r27" ::: "memory");
|
||||
}
|
||||
#else
|
||||
static inline void mi_atomic_yield(void) {
|
||||
__asm__ __volatile__ ("or 27,27,27" ::: "memory");
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
#elif defined(__sun)
|
||||
// Fallback for other archs
|
||||
#include <synch.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
smt_pause();
|
||||
}
|
||||
#elif defined(__wasi__)
|
||||
#include <sched.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
sched_yield();
|
||||
}
|
||||
#else
|
||||
#include <unistd.h>
|
||||
static inline void mi_atomic_yield(void) {
|
||||
sleep(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif // __MIMALLOC_ATOMIC_H
|
||||
969
Dependencies/Python/include/internal/mimalloc/mimalloc/internal.h
vendored
Normal file
969
Dependencies/Python/include/internal/mimalloc/mimalloc/internal.h
vendored
Normal file
@@ -0,0 +1,969 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_INTERNAL_H
|
||||
#define MIMALLOC_INTERNAL_H
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// This file contains the interal API's of mimalloc and various utility
|
||||
// functions and macros.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
#include "types.h"
|
||||
#include "track.h"
|
||||
|
||||
#if (MI_DEBUG>0)
|
||||
#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__)
|
||||
#else
|
||||
#define mi_trace_message(...)
|
||||
#endif
|
||||
|
||||
#if defined(__EMSCRIPTEN__) && !defined(__wasi__)
|
||||
#define __wasi__
|
||||
#endif
|
||||
|
||||
#if defined(__cplusplus)
|
||||
#define mi_decl_externc extern "C"
|
||||
#else
|
||||
#define mi_decl_externc
|
||||
#endif
|
||||
|
||||
// pthreads
|
||||
#if !defined(_WIN32) && !defined(__wasi__)
|
||||
#define MI_USE_PTHREADS
|
||||
#include <pthread.h>
|
||||
#endif
|
||||
|
||||
// "options.c"
|
||||
void _mi_fputs(mi_output_fun* out, void* arg, const char* prefix, const char* message);
|
||||
void _mi_fprintf(mi_output_fun* out, void* arg, const char* fmt, ...);
|
||||
void _mi_warning_message(const char* fmt, ...);
|
||||
void _mi_verbose_message(const char* fmt, ...);
|
||||
void _mi_trace_message(const char* fmt, ...);
|
||||
void _mi_options_init(void);
|
||||
void _mi_error_message(int err, const char* fmt, ...);
|
||||
|
||||
// random.c
|
||||
void _mi_random_init(mi_random_ctx_t* ctx);
|
||||
void _mi_random_init_weak(mi_random_ctx_t* ctx);
|
||||
void _mi_random_reinit_if_weak(mi_random_ctx_t * ctx);
|
||||
void _mi_random_split(mi_random_ctx_t* ctx, mi_random_ctx_t* new_ctx);
|
||||
uintptr_t _mi_random_next(mi_random_ctx_t* ctx);
|
||||
uintptr_t _mi_heap_random_next(mi_heap_t* heap);
|
||||
uintptr_t _mi_os_random_weak(uintptr_t extra_seed);
|
||||
static inline uintptr_t _mi_random_shuffle(uintptr_t x);
|
||||
|
||||
// init.c
|
||||
extern mi_decl_cache_align mi_stats_t _mi_stats_main;
|
||||
extern mi_decl_cache_align const mi_page_t _mi_page_empty;
|
||||
bool _mi_is_main_thread(void);
|
||||
size_t _mi_current_thread_count(void);
|
||||
bool _mi_preloading(void); // true while the C runtime is not initialized yet
|
||||
mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
|
||||
mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
|
||||
void _mi_thread_done(mi_heap_t* heap);
|
||||
void _mi_thread_data_collect(void);
|
||||
void _mi_tld_init(mi_tld_t* tld, mi_heap_t* bheap);
|
||||
|
||||
// os.c
|
||||
void _mi_os_init(void); // called from process init
|
||||
void* _mi_os_alloc(size_t size, mi_memid_t* memid, mi_stats_t* stats);
|
||||
void _mi_os_free(void* p, size_t size, mi_memid_t memid, mi_stats_t* stats);
|
||||
void _mi_os_free_ex(void* p, size_t size, bool still_committed, mi_memid_t memid, mi_stats_t* stats);
|
||||
|
||||
size_t _mi_os_page_size(void);
|
||||
size_t _mi_os_good_alloc_size(size_t size);
|
||||
bool _mi_os_has_overcommit(void);
|
||||
bool _mi_os_has_virtual_reserve(void);
|
||||
|
||||
bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats);
|
||||
bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
|
||||
bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_protect(void* addr, size_t size);
|
||||
bool _mi_os_unprotect(void* addr, size_t size);
|
||||
bool _mi_os_purge(void* p, size_t size, mi_stats_t* stats);
|
||||
bool _mi_os_purge_ex(void* p, size_t size, bool allow_reset, mi_stats_t* stats);
|
||||
|
||||
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* stats);
|
||||
void* _mi_os_alloc_aligned_at_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_memid_t* memid, mi_stats_t* tld_stats);
|
||||
|
||||
void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
|
||||
bool _mi_os_use_large_page(size_t size, size_t alignment);
|
||||
size_t _mi_os_large_page_size(void);
|
||||
|
||||
void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize, mi_memid_t* memid);
|
||||
|
||||
// arena.c
|
||||
mi_arena_id_t _mi_arena_id_none(void);
|
||||
void _mi_arena_free(void* p, size_t size, size_t still_committed_size, mi_memid_t memid, mi_stats_t* stats);
|
||||
void* _mi_arena_alloc(size_t size, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
|
||||
void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool commit, bool allow_large, mi_arena_id_t req_arena_id, mi_memid_t* memid, mi_os_tld_t* tld);
|
||||
bool _mi_arena_memid_is_suitable(mi_memid_t memid, mi_arena_id_t request_arena_id);
|
||||
bool _mi_arena_contains(const void* p);
|
||||
void _mi_arena_collect(bool force_purge, mi_stats_t* stats);
|
||||
void _mi_arena_unsafe_destroy_all(mi_stats_t* stats);
|
||||
|
||||
// "segment-map.c"
|
||||
void _mi_segment_map_allocated_at(const mi_segment_t* segment);
|
||||
void _mi_segment_map_freed_at(const mi_segment_t* segment);
|
||||
|
||||
// "segment.c"
|
||||
extern mi_abandoned_pool_t _mi_abandoned_default; // global abandoned pool
|
||||
mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld);
|
||||
void _mi_segment_page_free(mi_page_t* page, bool force, mi_segments_tld_t* tld);
|
||||
void _mi_segment_page_abandon(mi_page_t* page, mi_segments_tld_t* tld);
|
||||
bool _mi_segment_try_reclaim_abandoned( mi_heap_t* heap, bool try_all, mi_segments_tld_t* tld);
|
||||
void _mi_segment_thread_collect(mi_segments_tld_t* tld);
|
||||
bool _mi_abandoned_pool_visit_blocks(mi_abandoned_pool_t* pool, uint8_t page_tag, bool visit_blocks, mi_block_visit_fun* visitor, void* arg);
|
||||
|
||||
|
||||
#if MI_HUGE_PAGE_ABANDON
|
||||
void _mi_segment_huge_page_free(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
||||
#else
|
||||
void _mi_segment_huge_page_reset(mi_segment_t* segment, mi_page_t* page, mi_block_t* block);
|
||||
#endif
|
||||
|
||||
uint8_t* _mi_segment_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size); // page start for any page
|
||||
void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
|
||||
void _mi_abandoned_await_readers(mi_abandoned_pool_t *pool);
|
||||
void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld);
|
||||
|
||||
// "page.c"
|
||||
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
|
||||
|
||||
void _mi_page_retire(mi_page_t* page) mi_attr_noexcept; // free the page if there are no other pages with many free blocks
|
||||
void _mi_page_unfull(mi_page_t* page);
|
||||
void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force); // free the page
|
||||
void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq); // abandon the page, to be picked up by another thread...
|
||||
void _mi_heap_delayed_free_all(mi_heap_t* heap);
|
||||
bool _mi_heap_delayed_free_partial(mi_heap_t* heap);
|
||||
void _mi_heap_collect_retired(mi_heap_t* heap, bool force);
|
||||
|
||||
void _mi_page_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
|
||||
bool _mi_page_try_use_delayed_free(mi_page_t* page, mi_delayed_t delay, bool override_never);
|
||||
size_t _mi_page_queue_append(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_queue_t* append);
|
||||
void _mi_deferred_free(mi_heap_t* heap, bool force);
|
||||
|
||||
void _mi_page_free_collect(mi_page_t* page,bool force);
|
||||
void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page); // callback from segments
|
||||
|
||||
size_t _mi_bin_size(uint8_t bin); // for stats
|
||||
uint8_t _mi_bin(size_t size); // for stats
|
||||
|
||||
// "heap.c"
|
||||
void _mi_heap_init_ex(mi_heap_t* heap, mi_tld_t* tld, mi_arena_id_t arena_id, bool no_reclaim, uint8_t tag);
|
||||
void _mi_heap_destroy_pages(mi_heap_t* heap);
|
||||
void _mi_heap_collect_abandon(mi_heap_t* heap);
|
||||
void _mi_heap_set_default_direct(mi_heap_t* heap);
|
||||
bool _mi_heap_memid_is_suitable(mi_heap_t* heap, mi_memid_t memid);
|
||||
void _mi_heap_unsafe_destroy_all(void);
|
||||
void _mi_heap_area_init(mi_heap_area_t* area, mi_page_t* page);
|
||||
bool _mi_heap_area_visit_blocks(const mi_heap_area_t* area, mi_page_t *page, mi_block_visit_fun* visitor, void* arg);
|
||||
|
||||
// "stats.c"
|
||||
void _mi_stats_done(mi_stats_t* stats);
|
||||
mi_msecs_t _mi_clock_now(void);
|
||||
mi_msecs_t _mi_clock_end(mi_msecs_t start);
|
||||
mi_msecs_t _mi_clock_start(void);
|
||||
|
||||
// "alloc.c"
|
||||
void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t size, bool zero) mi_attr_noexcept; // called from `_mi_malloc_generic`
|
||||
void* _mi_heap_malloc_zero(mi_heap_t* heap, size_t size, bool zero) mi_attr_noexcept;
|
||||
void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept; // called from `_mi_heap_malloc_aligned`
|
||||
void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero) mi_attr_noexcept;
|
||||
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
|
||||
bool _mi_free_delayed_block(mi_block_t* block);
|
||||
void _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
|
||||
void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
|
||||
|
||||
// option.c, c primitives
|
||||
char _mi_toupper(char c);
|
||||
int _mi_strnicmp(const char* s, const char* t, size_t n);
|
||||
void _mi_strlcpy(char* dest, const char* src, size_t dest_size);
|
||||
void _mi_strlcat(char* dest, const char* src, size_t dest_size);
|
||||
size_t _mi_strlen(const char* s);
|
||||
size_t _mi_strnlen(const char* s, size_t max_len);
|
||||
|
||||
|
||||
#if MI_DEBUG>1
|
||||
bool _mi_page_is_valid(mi_page_t* page);
|
||||
#endif
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Branches
|
||||
// ------------------------------------------------------
|
||||
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define mi_unlikely(x) (__builtin_expect(!!(x),false))
|
||||
#define mi_likely(x) (__builtin_expect(!!(x),true))
|
||||
#elif (defined(__cplusplus) && (__cplusplus >= 202002L)) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
|
||||
#define mi_unlikely(x) (x) [[unlikely]]
|
||||
#define mi_likely(x) (x) [[likely]]
|
||||
#else
|
||||
#define mi_unlikely(x) (x)
|
||||
#define mi_likely(x) (x)
|
||||
#endif
|
||||
|
||||
#ifndef __has_builtin
|
||||
#define __has_builtin(x) 0
|
||||
#endif
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Error codes passed to `_mi_fatal_error`
|
||||
All are recoverable but EFAULT is a serious error and aborts by default in secure mode.
|
||||
For portability define undefined error codes using common Unix codes:
|
||||
<https://www-numi.fnal.gov/offline_software/srt_public_context/WebDocs/Errors/unix_system_errors.html>
|
||||
----------------------------------------------------------- */
|
||||
#include <errno.h>
|
||||
#ifndef EAGAIN // double free
|
||||
#define EAGAIN (11)
|
||||
#endif
|
||||
#ifndef ENOMEM // out of memory
|
||||
#define ENOMEM (12)
|
||||
#endif
|
||||
#ifndef EFAULT // corrupted free-list or meta-data
|
||||
#define EFAULT (14)
|
||||
#endif
|
||||
#ifndef EINVAL // trying to free an invalid pointer
|
||||
#define EINVAL (22)
|
||||
#endif
|
||||
#ifndef EOVERFLOW // count*size overflow
|
||||
#define EOVERFLOW (75)
|
||||
#endif
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Inlined definitions
|
||||
----------------------------------------------------------- */
|
||||
#define MI_UNUSED(x) (void)(x)
|
||||
#if (MI_DEBUG>0)
|
||||
#define MI_UNUSED_RELEASE(x)
|
||||
#else
|
||||
#define MI_UNUSED_RELEASE(x) MI_UNUSED(x)
|
||||
#endif
|
||||
|
||||
#define MI_INIT4(x) x(),x(),x(),x()
|
||||
#define MI_INIT8(x) MI_INIT4(x),MI_INIT4(x)
|
||||
#define MI_INIT16(x) MI_INIT8(x),MI_INIT8(x)
|
||||
#define MI_INIT32(x) MI_INIT16(x),MI_INIT16(x)
|
||||
#define MI_INIT64(x) MI_INIT32(x),MI_INIT32(x)
|
||||
#define MI_INIT128(x) MI_INIT64(x),MI_INIT64(x)
|
||||
#define MI_INIT256(x) MI_INIT128(x),MI_INIT128(x)
|
||||
|
||||
|
||||
#include <string.h>
|
||||
// initialize a local variable to zero; use memset as compilers optimize constant sized memset's
|
||||
#define _mi_memzero_var(x) memset(&x,0,sizeof(x))
|
||||
|
||||
// Is `x` a power of two? (0 is considered a power of two)
|
||||
static inline bool _mi_is_power_of_two(uintptr_t x) {
|
||||
return ((x & (x - 1)) == 0);
|
||||
}
|
||||
|
||||
// Is a pointer aligned?
|
||||
static inline bool _mi_is_aligned(void* p, size_t alignment) {
|
||||
mi_assert_internal(alignment != 0);
|
||||
return (((uintptr_t)p % alignment) == 0);
|
||||
}
|
||||
|
||||
// Align upwards
|
||||
static inline uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) {
|
||||
mi_assert_internal(alignment != 0);
|
||||
uintptr_t mask = alignment - 1;
|
||||
if ((alignment & mask) == 0) { // power of two?
|
||||
return ((sz + mask) & ~mask);
|
||||
}
|
||||
else {
|
||||
return (((sz + mask)/alignment)*alignment);
|
||||
}
|
||||
}
|
||||
|
||||
// Align downwards
|
||||
static inline uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) {
|
||||
mi_assert_internal(alignment != 0);
|
||||
uintptr_t mask = alignment - 1;
|
||||
if ((alignment & mask) == 0) { // power of two?
|
||||
return (sz & ~mask);
|
||||
}
|
||||
else {
|
||||
return ((sz / alignment) * alignment);
|
||||
}
|
||||
}
|
||||
|
||||
// Divide upwards: `s <= _mi_divide_up(s,d)*d < s+d`.
|
||||
static inline uintptr_t _mi_divide_up(uintptr_t size, size_t divider) {
|
||||
mi_assert_internal(divider != 0);
|
||||
return (divider == 0 ? size : ((size + divider - 1) / divider));
|
||||
}
|
||||
|
||||
// Is memory zero initialized?
|
||||
static inline bool mi_mem_is_zero(const void* p, size_t size) {
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if (((uint8_t*)p)[i] != 0) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Align a byte size to a size in _machine words_,
|
||||
// i.e. byte size == `wsize*sizeof(void*)`.
|
||||
static inline size_t _mi_wsize_from_size(size_t size) {
|
||||
mi_assert_internal(size <= SIZE_MAX - sizeof(uintptr_t));
|
||||
return (size + sizeof(uintptr_t) - 1) / sizeof(uintptr_t);
|
||||
}
|
||||
|
||||
// Overflow detecting multiply
|
||||
#if __has_builtin(__builtin_umul_overflow) || (defined(__GNUC__) && (__GNUC__ >= 5))
|
||||
#include <limits.h> // UINT_MAX, ULONG_MAX
|
||||
#if defined(_CLOCK_T) // for Illumos
|
||||
#undef _CLOCK_T
|
||||
#endif
|
||||
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
|
||||
#if (SIZE_MAX == ULONG_MAX)
|
||||
return __builtin_umull_overflow(count, size, (unsigned long *)total);
|
||||
#elif (SIZE_MAX == UINT_MAX)
|
||||
return __builtin_umul_overflow(count, size, (unsigned int *)total);
|
||||
#else
|
||||
return __builtin_umulll_overflow(count, size, (unsigned long long *)total);
|
||||
#endif
|
||||
}
|
||||
#else /* __builtin_umul_overflow is unavailable */
|
||||
static inline bool mi_mul_overflow(size_t count, size_t size, size_t* total) {
|
||||
#define MI_MUL_NO_OVERFLOW ((size_t)1 << (4*sizeof(size_t))) // sqrt(SIZE_MAX)
|
||||
*total = count * size;
|
||||
// note: gcc/clang optimize this to directly check the overflow flag
|
||||
return ((size >= MI_MUL_NO_OVERFLOW || count >= MI_MUL_NO_OVERFLOW) && size > 0 && (SIZE_MAX / size) < count);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Safe multiply `count*size` into `total`; return `true` on overflow.
|
||||
static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* total) {
|
||||
if (count==1) { // quick check for the case where count is one (common for C++ allocators)
|
||||
*total = size;
|
||||
return false;
|
||||
}
|
||||
else if mi_unlikely(mi_mul_overflow(count, size, total)) {
|
||||
#if MI_DEBUG > 0
|
||||
_mi_error_message(EOVERFLOW, "allocation request is too large (%zu * %zu bytes)\n", count, size);
|
||||
#endif
|
||||
*total = SIZE_MAX;
|
||||
return true;
|
||||
}
|
||||
else return false;
|
||||
}
|
||||
|
||||
|
||||
/*----------------------------------------------------------------------------------------
|
||||
Heap functions
|
||||
------------------------------------------------------------------------------------------- */
|
||||
|
||||
extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap
|
||||
|
||||
static inline bool mi_heap_is_backing(const mi_heap_t* heap) {
|
||||
return (heap->tld->heap_backing == heap);
|
||||
}
|
||||
|
||||
static inline bool mi_heap_is_initialized(mi_heap_t* heap) {
|
||||
mi_assert_internal(heap != NULL);
|
||||
return (heap != &_mi_heap_empty);
|
||||
}
|
||||
|
||||
static inline uintptr_t _mi_ptr_cookie(const void* p) {
|
||||
extern mi_heap_t _mi_heap_main;
|
||||
mi_assert_internal(_mi_heap_main.cookie != 0);
|
||||
return ((uintptr_t)p ^ _mi_heap_main.cookie);
|
||||
}
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
Pages
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t size) {
|
||||
mi_assert_internal(size <= (MI_SMALL_SIZE_MAX + MI_PADDING_SIZE));
|
||||
const size_t idx = _mi_wsize_from_size(size);
|
||||
mi_assert_internal(idx < MI_PAGES_DIRECT);
|
||||
return heap->pages_free_direct[idx];
|
||||
}
|
||||
|
||||
// Segment that contains the pointer
|
||||
// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE),
|
||||
// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it;
|
||||
// therefore we align one byte before `p`.
|
||||
static inline mi_segment_t* _mi_ptr_segment(const void* p) {
|
||||
mi_assert_internal(p != NULL);
|
||||
return (mi_segment_t*)(((uintptr_t)p - 1) & ~MI_SEGMENT_MASK);
|
||||
}
|
||||
|
||||
static inline mi_page_t* mi_slice_to_page(mi_slice_t* s) {
|
||||
mi_assert_internal(s->slice_offset== 0 && s->slice_count > 0);
|
||||
return (mi_page_t*)(s);
|
||||
}
|
||||
|
||||
static inline mi_slice_t* mi_page_to_slice(mi_page_t* p) {
|
||||
mi_assert_internal(p->slice_offset== 0 && p->slice_count > 0);
|
||||
return (mi_slice_t*)(p);
|
||||
}
|
||||
|
||||
// Segment belonging to a page
|
||||
static inline mi_segment_t* _mi_page_segment(const mi_page_t* page) {
|
||||
mi_segment_t* segment = _mi_ptr_segment(page);
|
||||
mi_assert_internal(segment == NULL || ((mi_slice_t*)page >= segment->slices && (mi_slice_t*)page < segment->slices + segment->slice_entries));
|
||||
return segment;
|
||||
}
|
||||
|
||||
static inline mi_slice_t* mi_slice_first(const mi_slice_t* slice) {
|
||||
mi_slice_t* start = (mi_slice_t*)((uint8_t*)slice - slice->slice_offset);
|
||||
mi_assert_internal(start >= _mi_ptr_segment(slice)->slices);
|
||||
mi_assert_internal(start->slice_offset == 0);
|
||||
mi_assert_internal(start + start->slice_count > slice);
|
||||
return start;
|
||||
}
|
||||
|
||||
// Get the page containing the pointer (performance critical as it is called in mi_free)
|
||||
static inline mi_page_t* _mi_segment_page_of(const mi_segment_t* segment, const void* p) {
|
||||
mi_assert_internal(p > (void*)segment);
|
||||
ptrdiff_t diff = (uint8_t*)p - (uint8_t*)segment;
|
||||
mi_assert_internal(diff > 0 && diff <= (ptrdiff_t)MI_SEGMENT_SIZE);
|
||||
size_t idx = (size_t)diff >> MI_SEGMENT_SLICE_SHIFT;
|
||||
mi_assert_internal(idx <= segment->slice_entries);
|
||||
mi_slice_t* slice0 = (mi_slice_t*)&segment->slices[idx];
|
||||
mi_slice_t* slice = mi_slice_first(slice0); // adjust to the block that holds the page data
|
||||
mi_assert_internal(slice->slice_offset == 0);
|
||||
mi_assert_internal(slice >= segment->slices && slice < segment->slices + segment->slice_entries);
|
||||
return mi_slice_to_page(slice);
|
||||
}
|
||||
|
||||
// Quick page start for initialized pages
|
||||
static inline uint8_t* _mi_page_start(const mi_segment_t* segment, const mi_page_t* page, size_t* page_size) {
|
||||
return _mi_segment_page_start(segment, page, page_size);
|
||||
}
|
||||
|
||||
// Get the page containing the pointer
|
||||
static inline mi_page_t* _mi_ptr_page(void* p) {
|
||||
return _mi_segment_page_of(_mi_ptr_segment(p), p);
|
||||
}
|
||||
|
||||
// Get the block size of a page (special case for huge objects)
|
||||
static inline size_t mi_page_block_size(const mi_page_t* page) {
|
||||
const size_t bsize = page->xblock_size;
|
||||
mi_assert_internal(bsize > 0);
|
||||
if mi_likely(bsize < MI_HUGE_BLOCK_SIZE) {
|
||||
return bsize;
|
||||
}
|
||||
else {
|
||||
size_t psize;
|
||||
_mi_segment_page_start(_mi_page_segment(page), page, &psize);
|
||||
return psize;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool mi_page_is_huge(const mi_page_t* page) {
|
||||
return (_mi_page_segment(page)->kind == MI_SEGMENT_HUGE);
|
||||
}
|
||||
|
||||
// Get the usable block size of a page without fixed padding.
|
||||
// This may still include internal padding due to alignment and rounding up size classes.
|
||||
static inline size_t mi_page_usable_block_size(const mi_page_t* page) {
|
||||
return mi_page_block_size(page) - MI_PADDING_SIZE;
|
||||
}
|
||||
|
||||
// size of a segment
|
||||
static inline size_t mi_segment_size(mi_segment_t* segment) {
|
||||
return segment->segment_slices * MI_SEGMENT_SLICE_SIZE;
|
||||
}
|
||||
|
||||
static inline uint8_t* mi_segment_end(mi_segment_t* segment) {
|
||||
return (uint8_t*)segment + mi_segment_size(segment);
|
||||
}
|
||||
|
||||
// Thread free access
|
||||
static inline mi_block_t* mi_page_thread_free(const mi_page_t* page) {
|
||||
return (mi_block_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & ~3);
|
||||
}
|
||||
|
||||
static inline mi_delayed_t mi_page_thread_free_flag(const mi_page_t* page) {
|
||||
return (mi_delayed_t)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xthread_free) & 3);
|
||||
}
|
||||
|
||||
// Heap access
|
||||
static inline mi_heap_t* mi_page_heap(const mi_page_t* page) {
|
||||
return (mi_heap_t*)(mi_atomic_load_relaxed(&((mi_page_t*)page)->xheap));
|
||||
}
|
||||
|
||||
static inline void mi_page_set_heap(mi_page_t* page, mi_heap_t* heap) {
|
||||
mi_assert_internal(mi_page_thread_free_flag(page) != MI_DELAYED_FREEING);
|
||||
mi_atomic_store_release(&page->xheap,(uintptr_t)heap);
|
||||
}
|
||||
|
||||
// Thread free flag helpers
|
||||
static inline mi_block_t* mi_tf_block(mi_thread_free_t tf) {
|
||||
return (mi_block_t*)(tf & ~0x03);
|
||||
}
|
||||
static inline mi_delayed_t mi_tf_delayed(mi_thread_free_t tf) {
|
||||
return (mi_delayed_t)(tf & 0x03);
|
||||
}
|
||||
static inline mi_thread_free_t mi_tf_make(mi_block_t* block, mi_delayed_t delayed) {
|
||||
return (mi_thread_free_t)((uintptr_t)block | (uintptr_t)delayed);
|
||||
}
|
||||
static inline mi_thread_free_t mi_tf_set_delayed(mi_thread_free_t tf, mi_delayed_t delayed) {
|
||||
return mi_tf_make(mi_tf_block(tf),delayed);
|
||||
}
|
||||
static inline mi_thread_free_t mi_tf_set_block(mi_thread_free_t tf, mi_block_t* block) {
|
||||
return mi_tf_make(block, mi_tf_delayed(tf));
|
||||
}
|
||||
|
||||
// are all blocks in a page freed?
|
||||
// note: needs up-to-date used count, (as the `xthread_free` list may not be empty). see `_mi_page_collect_free`.
|
||||
static inline bool mi_page_all_free(const mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
return (page->used == 0);
|
||||
}
|
||||
|
||||
// are there any available blocks?
|
||||
static inline bool mi_page_has_any_available(const mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL && page->reserved > 0);
|
||||
return (page->used < page->reserved || (mi_page_thread_free(page) != NULL));
|
||||
}
|
||||
|
||||
// are there immediately available blocks, i.e. blocks available on the free list.
|
||||
static inline bool mi_page_immediate_available(const mi_page_t* page) {
|
||||
mi_assert_internal(page != NULL);
|
||||
return (page->free != NULL);
|
||||
}
|
||||
|
||||
// is more than 7/8th of a page in use?
|
||||
static inline bool mi_page_mostly_used(const mi_page_t* page) {
|
||||
if (page==NULL) return true;
|
||||
uint16_t frac = page->reserved / 8U;
|
||||
return (page->reserved - page->used <= frac);
|
||||
}
|
||||
|
||||
static inline mi_page_queue_t* mi_page_queue(const mi_heap_t* heap, size_t size) {
|
||||
return &((mi_heap_t*)heap)->pages[_mi_bin(size)];
|
||||
}
|
||||
|
||||
|
||||
|
||||
//-----------------------------------------------------------
|
||||
// Page flags
|
||||
//-----------------------------------------------------------
|
||||
static inline bool mi_page_is_in_full(const mi_page_t* page) {
|
||||
return page->flags.x.in_full;
|
||||
}
|
||||
|
||||
static inline void mi_page_set_in_full(mi_page_t* page, bool in_full) {
|
||||
page->flags.x.in_full = in_full;
|
||||
}
|
||||
|
||||
static inline bool mi_page_has_aligned(const mi_page_t* page) {
|
||||
return page->flags.x.has_aligned;
|
||||
}
|
||||
|
||||
static inline void mi_page_set_has_aligned(mi_page_t* page, bool has_aligned) {
|
||||
page->flags.x.has_aligned = has_aligned;
|
||||
}
|
||||
|
||||
|
||||
/* -------------------------------------------------------------------
|
||||
Encoding/Decoding the free list next pointers
|
||||
|
||||
This is to protect against buffer overflow exploits where the
|
||||
free list is mutated. Many hardened allocators xor the next pointer `p`
|
||||
with a secret key `k1`, as `p^k1`. This prevents overwriting with known
|
||||
values but might be still too weak: if the attacker can guess
|
||||
the pointer `p` this can reveal `k1` (since `p^k1^p == k1`).
|
||||
Moreover, if multiple blocks can be read as well, the attacker can
|
||||
xor both as `(p1^k1) ^ (p2^k1) == p1^p2` which may reveal a lot
|
||||
about the pointers (and subsequently `k1`).
|
||||
|
||||
Instead mimalloc uses an extra key `k2` and encodes as `((p^k2)<<<k1)+k1`.
|
||||
Since these operations are not associative, the above approaches do not
|
||||
work so well any more even if the `p` can be guesstimated. For example,
|
||||
for the read case we can subtract two entries to discard the `+k1` term,
|
||||
but that leads to `((p1^k2)<<<k1) - ((p2^k2)<<<k1)` at best.
|
||||
We include the left-rotation since xor and addition are otherwise linear
|
||||
in the lowest bit. Finally, both keys are unique per page which reduces
|
||||
the re-use of keys by a large factor.
|
||||
|
||||
We also pass a separate `null` value to be used as `NULL` or otherwise
|
||||
`(k2<<<k1)+k1` would appear (too) often as a sentinel value.
|
||||
------------------------------------------------------------------- */
|
||||
|
||||
static inline bool mi_is_in_same_segment(const void* p, const void* q) {
|
||||
return (_mi_ptr_segment(p) == _mi_ptr_segment(q));
|
||||
}
|
||||
|
||||
static inline bool mi_is_in_same_page(const void* p, const void* q) {
|
||||
mi_segment_t* segment = _mi_ptr_segment(p);
|
||||
if (_mi_ptr_segment(q) != segment) return false;
|
||||
// assume q may be invalid // return (_mi_segment_page_of(segment, p) == _mi_segment_page_of(segment, q));
|
||||
mi_page_t* page = _mi_segment_page_of(segment, p);
|
||||
size_t psize;
|
||||
uint8_t* start = _mi_segment_page_start(segment, page, &psize);
|
||||
return (start <= (uint8_t*)q && (uint8_t*)q < start + psize);
|
||||
}
|
||||
|
||||
static inline uintptr_t mi_rotl(uintptr_t x, uintptr_t shift) {
|
||||
shift %= MI_INTPTR_BITS;
|
||||
return (shift==0 ? x : ((x << shift) | (x >> (MI_INTPTR_BITS - shift))));
|
||||
}
|
||||
static inline uintptr_t mi_rotr(uintptr_t x, uintptr_t shift) {
|
||||
shift %= MI_INTPTR_BITS;
|
||||
return (shift==0 ? x : ((x >> shift) | (x << (MI_INTPTR_BITS - shift))));
|
||||
}
|
||||
|
||||
static inline void* mi_ptr_decode(const void* null, const mi_encoded_t x, const uintptr_t* keys) {
|
||||
void* p = (void*)(mi_rotr(x - keys[0], keys[0]) ^ keys[1]);
|
||||
return (p==null ? NULL : p);
|
||||
}
|
||||
|
||||
static inline mi_encoded_t mi_ptr_encode(const void* null, const void* p, const uintptr_t* keys) {
|
||||
uintptr_t x = (uintptr_t)(p==NULL ? null : p);
|
||||
return mi_rotl(x ^ keys[1], keys[0]) + keys[0];
|
||||
}
|
||||
|
||||
static inline mi_block_t* mi_block_nextx( const void* null, const mi_block_t* block, const uintptr_t* keys ) {
|
||||
mi_track_mem_defined(block,sizeof(mi_block_t));
|
||||
mi_block_t* next;
|
||||
#ifdef MI_ENCODE_FREELIST
|
||||
next = (mi_block_t*)mi_ptr_decode(null, block->next, keys);
|
||||
#else
|
||||
MI_UNUSED(keys); MI_UNUSED(null);
|
||||
next = (mi_block_t*)block->next;
|
||||
#endif
|
||||
mi_track_mem_noaccess(block,sizeof(mi_block_t));
|
||||
return next;
|
||||
}
|
||||
|
||||
static inline void mi_block_set_nextx(const void* null, mi_block_t* block, const mi_block_t* next, const uintptr_t* keys) {
|
||||
mi_track_mem_undefined(block,sizeof(mi_block_t));
|
||||
#ifdef MI_ENCODE_FREELIST
|
||||
block->next = mi_ptr_encode(null, next, keys);
|
||||
#else
|
||||
MI_UNUSED(keys); MI_UNUSED(null);
|
||||
block->next = (mi_encoded_t)next;
|
||||
#endif
|
||||
mi_track_mem_noaccess(block,sizeof(mi_block_t));
|
||||
}
|
||||
|
||||
static inline mi_block_t* mi_block_next(const mi_page_t* page, const mi_block_t* block) {
|
||||
#ifdef MI_ENCODE_FREELIST
|
||||
mi_block_t* next = mi_block_nextx(page,block,page->keys);
|
||||
// check for free list corruption: is `next` at least in the same page?
|
||||
// TODO: check if `next` is `page->block_size` aligned?
|
||||
if mi_unlikely(next!=NULL && !mi_is_in_same_page(block, next)) {
|
||||
_mi_error_message(EFAULT, "corrupted free list entry of size %zub at %p: value 0x%zx\n", mi_page_block_size(page), block, (uintptr_t)next);
|
||||
next = NULL;
|
||||
}
|
||||
return next;
|
||||
#else
|
||||
MI_UNUSED(page);
|
||||
return mi_block_nextx(page,block,NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void mi_block_set_next(const mi_page_t* page, mi_block_t* block, const mi_block_t* next) {
|
||||
#ifdef MI_ENCODE_FREELIST
|
||||
mi_block_set_nextx(page,block,next, page->keys);
|
||||
#else
|
||||
MI_UNUSED(page);
|
||||
mi_block_set_nextx(page,block,next,NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// commit mask
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
static inline void mi_commit_mask_create_empty(mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
cm->mask[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void mi_commit_mask_create_full(mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
cm->mask[i] = ~((size_t)0);
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool mi_commit_mask_is_empty(const mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
if (cm->mask[i] != 0) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool mi_commit_mask_is_full(const mi_commit_mask_t* cm) {
|
||||
for (size_t i = 0; i < MI_COMMIT_MASK_FIELD_COUNT; i++) {
|
||||
if (cm->mask[i] != ~((size_t)0)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// defined in `segment.c`:
|
||||
size_t _mi_commit_mask_committed_size(const mi_commit_mask_t* cm, size_t total);
|
||||
size_t _mi_commit_mask_next_run(const mi_commit_mask_t* cm, size_t* idx);
|
||||
|
||||
#define mi_commit_mask_foreach(cm,idx,count) \
|
||||
idx = 0; \
|
||||
while ((count = _mi_commit_mask_next_run(cm,&idx)) > 0) {
|
||||
|
||||
#define mi_commit_mask_foreach_end() \
|
||||
idx += count; \
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* -----------------------------------------------------------
|
||||
memory id's
|
||||
----------------------------------------------------------- */
|
||||
|
||||
static inline mi_memid_t _mi_memid_create(mi_memkind_t memkind) {
|
||||
mi_memid_t memid;
|
||||
_mi_memzero_var(memid);
|
||||
memid.memkind = memkind;
|
||||
return memid;
|
||||
}
|
||||
|
||||
static inline mi_memid_t _mi_memid_none(void) {
|
||||
return _mi_memid_create(MI_MEM_NONE);
|
||||
}
|
||||
|
||||
static inline mi_memid_t _mi_memid_create_os(bool committed, bool is_zero, bool is_large) {
|
||||
mi_memid_t memid = _mi_memid_create(MI_MEM_OS);
|
||||
memid.initially_committed = committed;
|
||||
memid.initially_zero = is_zero;
|
||||
memid.is_pinned = is_large;
|
||||
return memid;
|
||||
}
|
||||
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Fast "random" shuffle
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
static inline uintptr_t _mi_random_shuffle(uintptr_t x) {
|
||||
if (x==0) { x = 17; } // ensure we don't get stuck in generating zeros
|
||||
#if (MI_INTPTR_SIZE==8)
|
||||
// by Sebastiano Vigna, see: <http://xoshiro.di.unimi.it/splitmix64.c>
|
||||
x ^= x >> 30;
|
||||
x *= 0xbf58476d1ce4e5b9UL;
|
||||
x ^= x >> 27;
|
||||
x *= 0x94d049bb133111ebUL;
|
||||
x ^= x >> 31;
|
||||
#elif (MI_INTPTR_SIZE==4)
|
||||
// by Chris Wellons, see: <https://nullprogram.com/blog/2018/07/31/>
|
||||
x ^= x >> 16;
|
||||
x *= 0x7feb352dUL;
|
||||
x ^= x >> 15;
|
||||
x *= 0x846ca68bUL;
|
||||
x ^= x >> 16;
|
||||
#endif
|
||||
return x;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
// Optimize numa node access for the common case (= one node)
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
int _mi_os_numa_node_get(mi_os_tld_t* tld);
|
||||
size_t _mi_os_numa_node_count_get(void);
|
||||
|
||||
extern _Atomic(size_t) _mi_numa_node_count;
|
||||
static inline int _mi_os_numa_node(mi_os_tld_t* tld) {
|
||||
if mi_likely(mi_atomic_load_relaxed(&_mi_numa_node_count) == 1) { return 0; }
|
||||
else return _mi_os_numa_node_get(tld);
|
||||
}
|
||||
static inline size_t _mi_os_numa_node_count(void) {
|
||||
const size_t count = mi_atomic_load_relaxed(&_mi_numa_node_count);
|
||||
if mi_likely(count > 0) { return count; }
|
||||
else return _mi_os_numa_node_count_get();
|
||||
}
|
||||
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Count bits: trailing or leading zeros (with MI_INTPTR_BITS on all zero)
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
#if defined(__GNUC__)
|
||||
|
||||
#include <limits.h> // LONG_MAX
|
||||
#define MI_HAVE_FAST_BITSCAN
|
||||
static inline size_t mi_clz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
return __builtin_clzl(x);
|
||||
#else
|
||||
return __builtin_clzll(x);
|
||||
#endif
|
||||
}
|
||||
static inline size_t mi_ctz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
return __builtin_ctzl(x);
|
||||
#else
|
||||
return __builtin_ctzll(x);
|
||||
#endif
|
||||
}
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
#include <limits.h> // LONG_MAX
|
||||
#include <intrin.h> // BitScanReverse64
|
||||
#define MI_HAVE_FAST_BITSCAN
|
||||
static inline size_t mi_clz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
unsigned long idx;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
_BitScanReverse(&idx, x);
|
||||
#else
|
||||
_BitScanReverse64(&idx, x);
|
||||
#endif
|
||||
return ((MI_INTPTR_BITS - 1) - idx);
|
||||
}
|
||||
static inline size_t mi_ctz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
unsigned long idx;
|
||||
#if (INTPTR_MAX == LONG_MAX)
|
||||
_BitScanForward(&idx, x);
|
||||
#else
|
||||
_BitScanForward64(&idx, x);
|
||||
#endif
|
||||
return idx;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline size_t mi_ctz32(uint32_t x) {
|
||||
// de Bruijn multiplication, see <http://supertech.csail.mit.edu/papers/debruijn.pdf>
|
||||
static const unsigned char debruijn[32] = {
|
||||
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
|
||||
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
|
||||
};
|
||||
if (x==0) return 32;
|
||||
return debruijn[((x & -(int32_t)x) * 0x077CB531UL) >> 27];
|
||||
}
|
||||
static inline size_t mi_clz32(uint32_t x) {
|
||||
// de Bruijn multiplication, see <http://supertech.csail.mit.edu/papers/debruijn.pdf>
|
||||
static const uint8_t debruijn[32] = {
|
||||
31, 22, 30, 21, 18, 10, 29, 2, 20, 17, 15, 13, 9, 6, 28, 1,
|
||||
23, 19, 11, 3, 16, 14, 7, 24, 12, 4, 8, 25, 5, 26, 27, 0
|
||||
};
|
||||
if (x==0) return 32;
|
||||
x |= x >> 1;
|
||||
x |= x >> 2;
|
||||
x |= x >> 4;
|
||||
x |= x >> 8;
|
||||
x |= x >> 16;
|
||||
return debruijn[(uint32_t)(x * 0x07C4ACDDUL) >> 27];
|
||||
}
|
||||
|
||||
static inline size_t mi_clz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
#if (MI_INTPTR_BITS <= 32)
|
||||
return mi_clz32((uint32_t)x);
|
||||
#else
|
||||
size_t count = mi_clz32((uint32_t)(x >> 32));
|
||||
if (count < 32) return count;
|
||||
return (32 + mi_clz32((uint32_t)x));
|
||||
#endif
|
||||
}
|
||||
static inline size_t mi_ctz(uintptr_t x) {
|
||||
if (x==0) return MI_INTPTR_BITS;
|
||||
#if (MI_INTPTR_BITS <= 32)
|
||||
return mi_ctz32((uint32_t)x);
|
||||
#else
|
||||
size_t count = mi_ctz32((uint32_t)x);
|
||||
if (count < 32) return count;
|
||||
return (32 + mi_ctz32((uint32_t)(x>>32)));
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
// "bit scan reverse": Return index of the highest bit (or MI_INTPTR_BITS if `x` is zero)
|
||||
static inline size_t mi_bsr(uintptr_t x) {
|
||||
return (x==0 ? MI_INTPTR_BITS : MI_INTPTR_BITS - 1 - mi_clz(x));
|
||||
}
|
||||
|
||||
|
||||
// ---------------------------------------------------------------------------------
|
||||
// Provide our own `_mi_memcpy` for potential performance optimizations.
|
||||
//
|
||||
// For now, only on Windows with msvc/clang-cl we optimize to `rep movsb` if
|
||||
// we happen to run on x86/x64 cpu's that have "fast short rep movsb" (FSRM) support
|
||||
// (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017). See also issue #201 and pr #253.
|
||||
// ---------------------------------------------------------------------------------
|
||||
|
||||
#if !MI_TRACK_ENABLED && defined(_WIN32) && (defined(_M_IX86) || defined(_M_X64))
|
||||
#include <intrin.h>
|
||||
extern bool _mi_cpu_has_fsrm;
|
||||
static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
|
||||
if (_mi_cpu_has_fsrm) {
|
||||
__movsb((unsigned char*)dst, (const unsigned char*)src, n);
|
||||
}
|
||||
else {
|
||||
memcpy(dst, src, n);
|
||||
}
|
||||
}
|
||||
static inline void _mi_memzero(void* dst, size_t n) {
|
||||
if (_mi_cpu_has_fsrm) {
|
||||
__stosb((unsigned char*)dst, 0, n);
|
||||
}
|
||||
else {
|
||||
memset(dst, 0, n);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void _mi_memcpy(void* dst, const void* src, size_t n) {
|
||||
memcpy(dst, src, n);
|
||||
}
|
||||
static inline void _mi_memzero(void* dst, size_t n) {
|
||||
memset(dst, 0, n);
|
||||
}
|
||||
#endif
|
||||
|
||||
// -------------------------------------------------------------------------------
|
||||
// The `_mi_memcpy_aligned` can be used if the pointers are machine-word aligned
|
||||
// This is used for example in `mi_realloc`.
|
||||
// -------------------------------------------------------------------------------
|
||||
|
||||
#if (defined(__GNUC__) && (__GNUC__ >= 4)) || defined(__clang__)
|
||||
// On GCC/CLang we provide a hint that the pointers are word aligned.
|
||||
static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
|
||||
mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
|
||||
void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
|
||||
const void* asrc = __builtin_assume_aligned(src, MI_INTPTR_SIZE);
|
||||
_mi_memcpy(adst, asrc, n);
|
||||
}
|
||||
|
||||
static inline void _mi_memzero_aligned(void* dst, size_t n) {
|
||||
mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
|
||||
void* adst = __builtin_assume_aligned(dst, MI_INTPTR_SIZE);
|
||||
_mi_memzero(adst, n);
|
||||
}
|
||||
#else
|
||||
// Default fallback on `_mi_memcpy`
|
||||
static inline void _mi_memcpy_aligned(void* dst, const void* src, size_t n) {
|
||||
mi_assert_internal(((uintptr_t)dst % MI_INTPTR_SIZE == 0) && ((uintptr_t)src % MI_INTPTR_SIZE == 0));
|
||||
_mi_memcpy(dst, src, n);
|
||||
}
|
||||
|
||||
static inline void _mi_memzero_aligned(void* dst, size_t n) {
|
||||
mi_assert_internal((uintptr_t)dst % MI_INTPTR_SIZE == 0);
|
||||
_mi_memzero(dst, n);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif
|
||||
329
Dependencies/Python/include/internal/mimalloc/mimalloc/prim.h
vendored
Normal file
329
Dependencies/Python/include/internal/mimalloc/mimalloc/prim.h
vendored
Normal file
@@ -0,0 +1,329 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_PRIM_H
|
||||
#define MIMALLOC_PRIM_H
|
||||
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// This file specifies the primitive portability API.
|
||||
// Each OS/host needs to implement these primitives, see `src/prim`
|
||||
// for implementations on Window, macOS, WASI, and Linux/Unix.
|
||||
//
|
||||
// note: on all primitive functions, we always have result parameters != NUL, and:
|
||||
// addr != NULL and page aligned
|
||||
// size > 0 and page aligned
|
||||
// return value is an error code an int where 0 is success.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// OS memory configuration
|
||||
typedef struct mi_os_mem_config_s {
|
||||
size_t page_size; // 4KiB
|
||||
size_t large_page_size; // 2MiB
|
||||
size_t alloc_granularity; // smallest allocation size (on Windows 64KiB)
|
||||
bool has_overcommit; // can we reserve more memory than can be actually committed?
|
||||
bool must_free_whole; // must allocated blocks be freed as a whole (false for mmap, true for VirtualAlloc)
|
||||
bool has_virtual_reserve; // supports virtual address space reservation? (if true we can reserve virtual address space without using commit or physical memory)
|
||||
} mi_os_mem_config_t;
|
||||
|
||||
// Initialize
|
||||
void _mi_prim_mem_init( mi_os_mem_config_t* config );
|
||||
|
||||
// Free OS memory
|
||||
int _mi_prim_free(void* addr, size_t size );
|
||||
|
||||
// Allocate OS memory. Return NULL on error.
|
||||
// The `try_alignment` is just a hint and the returned pointer does not have to be aligned.
|
||||
// If `commit` is false, the virtual memory range only needs to be reserved (with no access)
|
||||
// which will later be committed explicitly using `_mi_prim_commit`.
|
||||
// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
|
||||
// pre: !commit => !allow_large
|
||||
// try_alignment >= _mi_os_page_size() and a power of 2
|
||||
int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, bool* is_zero, void** addr);
|
||||
|
||||
// Commit memory. Returns error code or 0 on success.
|
||||
// For example, on Linux this would make the memory PROT_READ|PROT_WRITE.
|
||||
// `is_zero` is set to true if the memory was zero initialized (e.g. on Windows)
|
||||
int _mi_prim_commit(void* addr, size_t size, bool* is_zero);
|
||||
|
||||
// Decommit memory. Returns error code or 0 on success. The `needs_recommit` result is true
|
||||
// if the memory would need to be re-committed. For example, on Windows this is always true,
|
||||
// but on Linux we could use MADV_DONTNEED to decommit which does not need a recommit.
|
||||
// pre: needs_recommit != NULL
|
||||
int _mi_prim_decommit(void* addr, size_t size, bool* needs_recommit);
|
||||
|
||||
// Reset memory. The range keeps being accessible but the content might be reset.
|
||||
// Returns error code or 0 on success.
|
||||
int _mi_prim_reset(void* addr, size_t size);
|
||||
|
||||
// Protect memory. Returns error code or 0 on success.
|
||||
int _mi_prim_protect(void* addr, size_t size, bool protect);
|
||||
|
||||
// Allocate huge (1GiB) pages possibly associated with a NUMA node.
|
||||
// `is_zero` is set to true if the memory was zero initialized (as on most OS's)
|
||||
// pre: size > 0 and a multiple of 1GiB.
|
||||
// numa_node is either negative (don't care), or a numa node number.
|
||||
int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, bool* is_zero, void** addr);
|
||||
|
||||
// Return the current NUMA node
|
||||
size_t _mi_prim_numa_node(void);
|
||||
|
||||
// Return the number of logical NUMA nodes
|
||||
size_t _mi_prim_numa_node_count(void);
|
||||
|
||||
// Clock ticks
|
||||
mi_msecs_t _mi_prim_clock_now(void);
|
||||
|
||||
// Return process information (only for statistics)
|
||||
typedef struct mi_process_info_s {
|
||||
mi_msecs_t elapsed;
|
||||
mi_msecs_t utime;
|
||||
mi_msecs_t stime;
|
||||
size_t current_rss;
|
||||
size_t peak_rss;
|
||||
size_t current_commit;
|
||||
size_t peak_commit;
|
||||
size_t page_faults;
|
||||
} mi_process_info_t;
|
||||
|
||||
void _mi_prim_process_info(mi_process_info_t* pinfo);
|
||||
|
||||
// Default stderr output. (only for warnings etc. with verbose enabled)
|
||||
// msg != NULL && _mi_strlen(msg) > 0
|
||||
void _mi_prim_out_stderr( const char* msg );
|
||||
|
||||
// Get an environment variable. (only for options)
|
||||
// name != NULL, result != NULL, result_size >= 64
|
||||
bool _mi_prim_getenv(const char* name, char* result, size_t result_size);
|
||||
|
||||
|
||||
// Fill a buffer with strong randomness; return `false` on error or if
|
||||
// there is no strong randomization available.
|
||||
bool _mi_prim_random_buf(void* buf, size_t buf_len);
|
||||
|
||||
// Called on the first thread start, and should ensure `_mi_thread_done` is called on thread termination.
|
||||
void _mi_prim_thread_init_auto_done(void);
|
||||
|
||||
// Called on process exit and may take action to clean up resources associated with the thread auto done.
|
||||
void _mi_prim_thread_done_auto_done(void);
|
||||
|
||||
// Called when the default heap for a thread changes
|
||||
void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
|
||||
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
// Thread id: `_mi_prim_thread_id()`
|
||||
//
|
||||
// Getting the thread id should be performant as it is called in the
|
||||
// fast path of `_mi_free` and we specialize for various platforms as
|
||||
// inlined definitions. Regular code should call `init.c:_mi_thread_id()`.
|
||||
// We only require _mi_prim_thread_id() to return a unique id
|
||||
// for each thread (unequal to zero).
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
// defined in `init.c`; do not use these directly
|
||||
extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from
|
||||
extern bool _mi_process_is_initialized; // has mi_process_init been called?
|
||||
|
||||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept;
|
||||
|
||||
#ifdef MI_PRIM_THREAD_ID
|
||||
|
||||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
|
||||
return MI_PRIM_THREAD_ID();
|
||||
}
|
||||
|
||||
#elif defined(_WIN32)
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
|
||||
// Windows: works on Intel and ARM in both 32- and 64-bit
|
||||
return (uintptr_t)NtCurrentTeb();
|
||||
}
|
||||
|
||||
// We use assembly for a fast thread id on the main platforms. The TLS layout depends on
|
||||
// both the OS and libc implementation so we use specific tests for each main platform.
|
||||
// If you test on another platform and it works please send a PR :-)
|
||||
// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register.
|
||||
#elif defined(__GNUC__) && ( \
|
||||
(defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__))) \
|
||||
|| (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__))) \
|
||||
|| (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__))) \
|
||||
|| (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
|
||||
|| (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
|
||||
)
|
||||
|
||||
static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept {
|
||||
void* res;
|
||||
const size_t ofs = (slot*sizeof(void*));
|
||||
#if defined(__i386__)
|
||||
__asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS
|
||||
#elif defined(__APPLE__) && defined(__x86_64__)
|
||||
__asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS
|
||||
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
|
||||
__asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI
|
||||
#elif defined(__x86_64__)
|
||||
__asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS
|
||||
#elif defined(__arm__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
|
||||
res = tcb[slot];
|
||||
#elif defined(__aarch64__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
#if defined(__APPLE__) // M1, issue #343
|
||||
__asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
|
||||
#else
|
||||
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
|
||||
#endif
|
||||
res = tcb[slot];
|
||||
#endif
|
||||
return res;
|
||||
}
|
||||
|
||||
// setting a tls slot is only used on macOS for now
|
||||
static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept {
|
||||
const size_t ofs = (slot*sizeof(void*));
|
||||
#if defined(__i386__)
|
||||
__asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS
|
||||
#elif defined(__APPLE__) && defined(__x86_64__)
|
||||
__asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS
|
||||
#elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
|
||||
__asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI
|
||||
#elif defined(__x86_64__)
|
||||
__asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS
|
||||
#elif defined(__arm__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
__asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
|
||||
tcb[slot] = value;
|
||||
#elif defined(__aarch64__)
|
||||
void** tcb; MI_UNUSED(ofs);
|
||||
#if defined(__APPLE__) // M1, issue #343
|
||||
__asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
|
||||
#else
|
||||
__asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
|
||||
#endif
|
||||
tcb[slot] = value;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
|
||||
#if defined(__BIONIC__)
|
||||
// issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id
|
||||
// see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86
|
||||
return (uintptr_t)mi_prim_tls_slot(1);
|
||||
#else
|
||||
// in all our other targets, slot 0 is the thread id
|
||||
// glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h
|
||||
// apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36
|
||||
return (uintptr_t)mi_prim_tls_slot(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms).
|
||||
static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
|
||||
return (uintptr_t)&_mi_heap_default;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
/* ----------------------------------------------------------------------------------------
|
||||
The thread local default heap: `_mi_prim_get_default_heap()`
|
||||
This is inlined here as it is on the fast path for allocation functions.
|
||||
|
||||
On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a
|
||||
__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures
|
||||
that the storage will always be available (allocated on the thread stacks).
|
||||
|
||||
On some platforms though we cannot use that when overriding `malloc` since the underlying
|
||||
TLS implementation (or the loader) will call itself `malloc` on a first access and recurse.
|
||||
We try to circumvent this in an efficient way:
|
||||
- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the
|
||||
loader itself calls `malloc` even before the modules are initialized.
|
||||
- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS).
|
||||
- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323)
|
||||
------------------------------------------------------------------------------------------- */
|
||||
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void);
|
||||
|
||||
#if defined(MI_MALLOC_OVERRIDE)
|
||||
#if defined(__APPLE__) // macOS
|
||||
#define MI_TLS_SLOT 89 // seems unused?
|
||||
// #define MI_TLS_RECURSE_GUARD 1
|
||||
// other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89)
|
||||
// see <https://github.com/rweichler/substrate/blob/master/include/pthread_machdep.h>
|
||||
#elif defined(__OpenBSD__)
|
||||
// use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16)
|
||||
// see <https://github.com/openbsd/src/blob/master/lib/libc/include/thread_private.h#L371>
|
||||
#define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24)
|
||||
// #elif defined(__DragonFly__)
|
||||
// #warning "mimalloc is not working correctly on DragonFly yet."
|
||||
// #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) <https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/lib/libthread_xu/thread/thr_private.h#L458>
|
||||
#elif defined(__ANDROID__)
|
||||
// See issue #381
|
||||
#define MI_TLS_PTHREAD
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(MI_TLS_SLOT)
|
||||
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void) {
|
||||
mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT);
|
||||
if mi_unlikely(heap == NULL) {
|
||||
#ifdef __GNUC__
|
||||
__asm(""); // prevent conditional load of the address of _mi_heap_empty
|
||||
#endif
|
||||
heap = (mi_heap_t*)&_mi_heap_empty;
|
||||
}
|
||||
return heap;
|
||||
}
|
||||
|
||||
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
|
||||
|
||||
static inline mi_heap_t** mi_prim_tls_pthread_heap_slot(void) {
|
||||
pthread_t self = pthread_self();
|
||||
#if defined(__DragonFly__)
|
||||
if (self==NULL) return NULL;
|
||||
#endif
|
||||
return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS);
|
||||
}
|
||||
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void) {
|
||||
mi_heap_t** pheap = mi_prim_tls_pthread_heap_slot();
|
||||
if mi_unlikely(pheap == NULL) return _mi_heap_main_get();
|
||||
mi_heap_t* heap = *pheap;
|
||||
if mi_unlikely(heap == NULL) return (mi_heap_t*)&_mi_heap_empty;
|
||||
return heap;
|
||||
}
|
||||
|
||||
#elif defined(MI_TLS_PTHREAD)
|
||||
|
||||
extern pthread_key_t _mi_heap_default_key;
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void) {
|
||||
mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key));
|
||||
return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
|
||||
}
|
||||
|
||||
#else // default using a thread local variable; used on most platforms.
|
||||
|
||||
static inline mi_heap_t* mi_prim_get_default_heap(void) {
|
||||
#if defined(MI_TLS_RECURSE_GUARD)
|
||||
if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get();
|
||||
#endif
|
||||
return _mi_heap_default;
|
||||
}
|
||||
|
||||
#endif // mi_prim_get_default_heap()
|
||||
|
||||
|
||||
|
||||
#endif // MIMALLOC_PRIM_H
|
||||
147
Dependencies/Python/include/internal/mimalloc/mimalloc/track.h
vendored
Normal file
147
Dependencies/Python/include/internal/mimalloc/mimalloc/track.h
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_TRACK_H
|
||||
#define MIMALLOC_TRACK_H
|
||||
|
||||
/* ------------------------------------------------------------------------------------------------------
|
||||
Track memory ranges with macros for tools like Valgrind address sanitizer, or other memory checkers.
|
||||
These can be defined for tracking allocation:
|
||||
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero)
|
||||
#define mi_track_free_size(p,_size)
|
||||
|
||||
The macros are set up such that the size passed to `mi_track_free_size`
|
||||
always matches the size of `mi_track_malloc_size`. (currently, `size == mi_usable_size(p)`).
|
||||
The `reqsize` is what the user requested, and `size >= reqsize`.
|
||||
The `size` is either byte precise (and `size==reqsize`) if `MI_PADDING` is enabled,
|
||||
or otherwise it is the usable block size which may be larger than the original request.
|
||||
Use `_mi_block_size_of(void* p)` to get the full block size that was allocated (including padding etc).
|
||||
The `zero` parameter is `true` if the allocated block is zero initialized.
|
||||
|
||||
Optional:
|
||||
|
||||
#define mi_track_align(p,alignedp,offset,size)
|
||||
#define mi_track_resize(p,oldsize,newsize)
|
||||
#define mi_track_init()
|
||||
|
||||
The `mi_track_align` is called right after a `mi_track_malloc` for aligned pointers in a block.
|
||||
The corresponding `mi_track_free` still uses the block start pointer and original size (corresponding to the `mi_track_malloc`).
|
||||
The `mi_track_resize` is currently unused but could be called on reallocations within a block.
|
||||
`mi_track_init` is called at program start.
|
||||
|
||||
The following macros are for tools like asan and valgrind to track whether memory is
|
||||
defined, undefined, or not accessible at all:
|
||||
|
||||
#define mi_track_mem_defined(p,size)
|
||||
#define mi_track_mem_undefined(p,size)
|
||||
#define mi_track_mem_noaccess(p,size)
|
||||
|
||||
-------------------------------------------------------------------------------------------------------*/
|
||||
|
||||
#if MI_TRACK_VALGRIND
|
||||
// valgrind tool
|
||||
|
||||
#define MI_TRACK_ENABLED 1
|
||||
#define MI_TRACK_HEAP_DESTROY 1 // track free of individual blocks on heap_destroy
|
||||
#define MI_TRACK_TOOL "valgrind"
|
||||
|
||||
#include <valgrind/valgrind.h>
|
||||
#include <valgrind/memcheck.h>
|
||||
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero)
|
||||
#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/)
|
||||
#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/)
|
||||
#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size)
|
||||
#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size)
|
||||
#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size)
|
||||
|
||||
#elif MI_TRACK_ASAN
|
||||
// address sanitizer
|
||||
|
||||
#define MI_TRACK_ENABLED 1
|
||||
#define MI_TRACK_HEAP_DESTROY 0
|
||||
#define MI_TRACK_TOOL "asan"
|
||||
|
||||
#include <sanitizer/asan_interface.h>
|
||||
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
|
||||
#define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size)
|
||||
|
||||
#elif MI_TRACK_ETW
|
||||
// windows event tracing
|
||||
|
||||
#define MI_TRACK_ENABLED 1
|
||||
#define MI_TRACK_HEAP_DESTROY 1
|
||||
#define MI_TRACK_TOOL "ETW"
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#include "../src/prim/windows/etw.h"
|
||||
|
||||
#define mi_track_init() EventRegistermicrosoft_windows_mimalloc();
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero) EventWriteETW_MI_ALLOC((UINT64)(p), size)
|
||||
#define mi_track_free_size(p,size) EventWriteETW_MI_FREE((UINT64)(p), size)
|
||||
|
||||
#else
|
||||
// no tracking
|
||||
|
||||
#define MI_TRACK_ENABLED 0
|
||||
#define MI_TRACK_HEAP_DESTROY 0
|
||||
#define MI_TRACK_TOOL "none"
|
||||
|
||||
#define mi_track_malloc_size(p,reqsize,size,zero)
|
||||
#define mi_track_free_size(p,_size)
|
||||
|
||||
#endif
|
||||
|
||||
// -------------------
|
||||
// Utility definitions
|
||||
|
||||
#ifndef mi_track_resize
|
||||
#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false)
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_align
|
||||
#define mi_track_align(p,alignedp,offset,size) mi_track_mem_noaccess(p,offset)
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_init
|
||||
#define mi_track_init()
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_mem_defined
|
||||
#define mi_track_mem_defined(p,size)
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_mem_undefined
|
||||
#define mi_track_mem_undefined(p,size)
|
||||
#endif
|
||||
|
||||
#ifndef mi_track_mem_noaccess
|
||||
#define mi_track_mem_noaccess(p,size)
|
||||
#endif
|
||||
|
||||
|
||||
#if MI_PADDING
|
||||
#define mi_track_malloc(p,reqsize,zero) \
|
||||
if ((p)!=NULL) { \
|
||||
mi_assert_internal(mi_usable_size(p)==(reqsize)); \
|
||||
mi_track_malloc_size(p,reqsize,reqsize,zero); \
|
||||
}
|
||||
#else
|
||||
#define mi_track_malloc(p,reqsize,zero) \
|
||||
if ((p)!=NULL) { \
|
||||
mi_assert_internal(mi_usable_size(p)>=(reqsize)); \
|
||||
mi_track_malloc_size(p,reqsize,mi_usable_size(p),zero); \
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
721
Dependencies/Python/include/internal/mimalloc/mimalloc/types.h
vendored
Normal file
721
Dependencies/Python/include/internal/mimalloc/mimalloc/types.h
vendored
Normal file
@@ -0,0 +1,721 @@
|
||||
/* ----------------------------------------------------------------------------
|
||||
Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
|
||||
This is free software; you can redistribute it and/or modify it under the
|
||||
terms of the MIT license. A copy of the license can be found in the file
|
||||
"LICENSE" at the root of this distribution.
|
||||
-----------------------------------------------------------------------------*/
|
||||
#pragma once
|
||||
#ifndef MIMALLOC_TYPES_H
|
||||
#define MIMALLOC_TYPES_H
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// This file contains the main type definitions for mimalloc:
|
||||
// mi_heap_t : all data for a thread-local heap, contains
|
||||
// lists of all managed heap pages.
|
||||
// mi_segment_t : a larger chunk of memory (32GiB) from where pages
|
||||
// are allocated.
|
||||
// mi_page_t : a mimalloc page (usually 64KiB or 512KiB) from
|
||||
// where objects are allocated.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
|
||||
#include <stddef.h> // ptrdiff_t
|
||||
#include <stdint.h> // uintptr_t, uint16_t, etc
|
||||
#include "atomic.h" // _Atomic
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(disable:4214) // bitfield is not int
|
||||
#endif
|
||||
|
||||
// Minimal alignment necessary. On most platforms 16 bytes are needed
|
||||
// due to SSE registers for example. This must be at least `sizeof(void*)`
|
||||
#ifndef MI_MAX_ALIGN_SIZE
|
||||
#define MI_MAX_ALIGN_SIZE 16 // sizeof(max_align_t)
|
||||
#endif
|
||||
|
||||
#define MI_CACHE_LINE 64
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable:4127) // suppress constant conditional warning (due to MI_SECURE paths)
|
||||
#pragma warning(disable:26812) // unscoped enum warning
|
||||
#define mi_decl_noinline __declspec(noinline)
|
||||
#define mi_decl_thread __declspec(thread)
|
||||
#define mi_decl_cache_align __declspec(align(MI_CACHE_LINE))
|
||||
#elif (defined(__GNUC__) && (__GNUC__ >= 3)) || defined(__clang__) // includes clang and icc
|
||||
#define mi_decl_noinline __attribute__((noinline))
|
||||
#define mi_decl_thread __thread
|
||||
#define mi_decl_cache_align __attribute__((aligned(MI_CACHE_LINE)))
|
||||
#else
|
||||
#define mi_decl_noinline
|
||||
#define mi_decl_thread __thread // hope for the best :-)
|
||||
#define mi_decl_cache_align
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Variants
|
||||
// ------------------------------------------------------
|
||||
|
||||
// Define NDEBUG in the release version to disable assertions.
|
||||
// #define NDEBUG
|
||||
|
||||
// Define MI_TRACK_<tool> to enable tracking support
|
||||
// #define MI_TRACK_VALGRIND 1
|
||||
// #define MI_TRACK_ASAN 1
|
||||
// #define MI_TRACK_ETW 1
|
||||
|
||||
// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance).
|
||||
// #define MI_STAT 1
|
||||
|
||||
// Define MI_SECURE to enable security mitigations
|
||||
// #define MI_SECURE 1 // guard page around metadata
|
||||
// #define MI_SECURE 2 // guard page around each mimalloc page
|
||||
// #define MI_SECURE 3 // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free)
|
||||
// #define MI_SECURE 4 // checks for double free. (may be more expensive)
|
||||
|
||||
#if !defined(MI_SECURE)
|
||||
#define MI_SECURE 0
|
||||
#endif
|
||||
|
||||
// Define MI_DEBUG for debug mode
|
||||
// #define MI_DEBUG 1 // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free.
|
||||
// #define MI_DEBUG 2 // + internal assertion checks
|
||||
// #define MI_DEBUG 3 // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON)
|
||||
#if !defined(MI_DEBUG)
|
||||
#if !defined(NDEBUG) || defined(_DEBUG)
|
||||
#define MI_DEBUG 2
|
||||
#else
|
||||
#define MI_DEBUG 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Reserve extra padding at the end of each block to be more resilient against heap block overflows.
|
||||
// The padding can detect buffer overflow on free.
|
||||
#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW))
|
||||
#define MI_PADDING 1
|
||||
#endif
|
||||
|
||||
// Check padding bytes; allows byte-precise buffer overflow detection
|
||||
#if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1)
|
||||
#define MI_PADDING_CHECK 1
|
||||
#endif
|
||||
|
||||
|
||||
// Encoded free lists allow detection of corrupted free lists
|
||||
// and can detect buffer overflows, modify after free, and double `free`s.
|
||||
#if (MI_SECURE>=3 || MI_DEBUG>=1)
|
||||
#define MI_ENCODE_FREELIST 1
|
||||
#endif
|
||||
|
||||
|
||||
// We used to abandon huge pages but to eagerly deallocate if freed from another thread,
|
||||
// but that makes it not possible to visit them during a heap walk or include them in a
|
||||
// `mi_heap_destroy`. We therefore instead reset/decommit the huge blocks if freed from
|
||||
// another thread so most memory is available until it gets properly freed by the owning thread.
|
||||
// #define MI_HUGE_PAGE_ABANDON 1
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Platform specific values
|
||||
// ------------------------------------------------------
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Size of a pointer.
|
||||
// We assume that `sizeof(void*)==sizeof(intptr_t)`
|
||||
// and it holds for all platforms we know of.
|
||||
//
|
||||
// However, the C standard only requires that:
|
||||
// p == (void*)((intptr_t)p))
|
||||
// but we also need:
|
||||
// i == (intptr_t)((void*)i)
|
||||
// or otherwise one might define an intptr_t type that is larger than a pointer...
|
||||
// ------------------------------------------------------
|
||||
|
||||
#if INTPTR_MAX > INT64_MAX
|
||||
# define MI_INTPTR_SHIFT (4) // assume 128-bit (as on arm CHERI for example)
|
||||
#elif INTPTR_MAX == INT64_MAX
|
||||
# define MI_INTPTR_SHIFT (3)
|
||||
#elif INTPTR_MAX == INT32_MAX
|
||||
# define MI_INTPTR_SHIFT (2)
|
||||
#else
|
||||
#error platform pointers must be 32, 64, or 128 bits
|
||||
#endif
|
||||
|
||||
#if SIZE_MAX == UINT64_MAX
|
||||
# define MI_SIZE_SHIFT (3)
|
||||
typedef int64_t mi_ssize_t;
|
||||
#elif SIZE_MAX == UINT32_MAX
|
||||
# define MI_SIZE_SHIFT (2)
|
||||
typedef int32_t mi_ssize_t;
|
||||
#else
|
||||
#error platform objects must be 32 or 64 bits
|
||||
#endif
|
||||
|
||||
#if (SIZE_MAX/2) > LONG_MAX
|
||||
# define MI_ZU(x) x##ULL
|
||||
# define MI_ZI(x) x##LL
|
||||
#else
|
||||
# define MI_ZU(x) x##UL
|
||||
# define MI_ZI(x) x##L
|
||||
#endif
|
||||
|
||||
#define MI_INTPTR_SIZE (1<<MI_INTPTR_SHIFT)
|
||||
#define MI_INTPTR_BITS (MI_INTPTR_SIZE*8)
|
||||
|
||||
#define MI_SIZE_SIZE (1<<MI_SIZE_SHIFT)
|
||||
#define MI_SIZE_BITS (MI_SIZE_SIZE*8)
|
||||
|
||||
#define MI_KiB (MI_ZU(1024))
|
||||
#define MI_MiB (MI_KiB*MI_KiB)
|
||||
#define MI_GiB (MI_MiB*MI_KiB)
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Main internal data-structures
|
||||
// ------------------------------------------------------
|
||||
|
||||
// Main tuning parameters for segment and page sizes
|
||||
// Sizes for 64-bit (usually divide by two for 32-bit)
|
||||
#define MI_SEGMENT_SLICE_SHIFT (13 + MI_INTPTR_SHIFT) // 64KiB (32KiB on 32-bit)
|
||||
|
||||
#if MI_INTPTR_SIZE > 4
|
||||
#define MI_SEGMENT_SHIFT ( 9 + MI_SEGMENT_SLICE_SHIFT) // 32MiB
|
||||
#else
|
||||
#define MI_SEGMENT_SHIFT ( 7 + MI_SEGMENT_SLICE_SHIFT) // 4MiB on 32-bit
|
||||
#endif
|
||||
|
||||
#define MI_SMALL_PAGE_SHIFT (MI_SEGMENT_SLICE_SHIFT) // 64KiB
|
||||
#define MI_MEDIUM_PAGE_SHIFT ( 3 + MI_SMALL_PAGE_SHIFT) // 512KiB
|
||||
|
||||
|
||||
// Derived constants
|
||||
#define MI_SEGMENT_SIZE (MI_ZU(1)<<MI_SEGMENT_SHIFT)
|
||||
#define MI_SEGMENT_ALIGN MI_SEGMENT_SIZE
|
||||
#define MI_SEGMENT_MASK ((uintptr_t)(MI_SEGMENT_ALIGN - 1))
|
||||
#define MI_SEGMENT_SLICE_SIZE (MI_ZU(1)<< MI_SEGMENT_SLICE_SHIFT)
|
||||
#define MI_SLICES_PER_SEGMENT (MI_SEGMENT_SIZE / MI_SEGMENT_SLICE_SIZE) // 1024
|
||||
|
||||
#define MI_SMALL_PAGE_SIZE (MI_ZU(1)<<MI_SMALL_PAGE_SHIFT)
|
||||
#define MI_MEDIUM_PAGE_SIZE (MI_ZU(1)<<MI_MEDIUM_PAGE_SHIFT)
|
||||
|
||||
#define MI_SMALL_OBJ_SIZE_MAX (MI_SMALL_PAGE_SIZE/4) // 8KiB on 64-bit
|
||||
#define MI_MEDIUM_OBJ_SIZE_MAX (MI_MEDIUM_PAGE_SIZE/4) // 128KiB on 64-bit
|
||||
#define MI_MEDIUM_OBJ_WSIZE_MAX (MI_MEDIUM_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
|
||||
#define MI_LARGE_OBJ_SIZE_MAX (MI_SEGMENT_SIZE/2) // 32MiB on 64-bit
|
||||
#define MI_LARGE_OBJ_WSIZE_MAX (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
|
||||
|
||||
// Maximum number of size classes. (spaced exponentially in 12.5% increments)
|
||||
#define MI_BIN_HUGE (73U)
|
||||
|
||||
#if (MI_MEDIUM_OBJ_WSIZE_MAX >= 655360)
|
||||
#error "mimalloc internal: define more bins"
|
||||
#endif
|
||||
|
||||
// Maximum slice offset (15)
|
||||
#define MI_MAX_SLICE_OFFSET ((MI_ALIGNMENT_MAX / MI_SEGMENT_SLICE_SIZE) - 1)
|
||||
|
||||
// Used as a special value to encode block sizes in 32 bits.
|
||||
#define MI_HUGE_BLOCK_SIZE ((uint32_t)(2*MI_GiB))
|
||||
|
||||
// blocks up to this size are always allocated aligned
|
||||
#define MI_MAX_ALIGN_GUARANTEE (8*MI_MAX_ALIGN_SIZE)
|
||||
|
||||
// Alignments over MI_ALIGNMENT_MAX are allocated in dedicated huge page segments
|
||||
#define MI_ALIGNMENT_MAX (MI_SEGMENT_SIZE >> 1)
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Mimalloc pages contain allocated blocks
|
||||
// ------------------------------------------------------
|
||||
|
||||
// The free lists use encoded next fields
|
||||
// (Only actually encodes when MI_ENCODED_FREELIST is defined.)
|
||||
typedef uintptr_t mi_encoded_t;
|
||||
|
||||
// thread id's
|
||||
typedef size_t mi_threadid_t;
|
||||
|
||||
// free lists contain blocks
|
||||
typedef struct mi_block_s {
|
||||
mi_encoded_t next;
|
||||
} mi_block_t;
|
||||
|
||||
|
||||
// The delayed flags are used for efficient multi-threaded free-ing
|
||||
typedef enum mi_delayed_e {
|
||||
MI_USE_DELAYED_FREE = 0, // push on the owning heap thread delayed list
|
||||
MI_DELAYED_FREEING = 1, // temporary: another thread is accessing the owning heap
|
||||
MI_NO_DELAYED_FREE = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list
|
||||
MI_NEVER_DELAYED_FREE = 3 // sticky, only resets on page reclaim
|
||||
} mi_delayed_t;
|
||||
|
||||
|
||||
// The `in_full` and `has_aligned` page flags are put in a union to efficiently
|
||||
// test if both are false (`full_aligned == 0`) in the `mi_free` routine.
|
||||
#if !MI_TSAN
|
||||
typedef union mi_page_flags_s {
|
||||
uint8_t full_aligned;
|
||||
struct {
|
||||
uint8_t in_full : 1;
|
||||
uint8_t has_aligned : 1;
|
||||
} x;
|
||||
} mi_page_flags_t;
|
||||
#else
|
||||
// under thread sanitizer, use a byte for each flag to suppress warning, issue #130
|
||||
typedef union mi_page_flags_s {
|
||||
uint16_t full_aligned;
|
||||
struct {
|
||||
uint8_t in_full;
|
||||
uint8_t has_aligned;
|
||||
} x;
|
||||
} mi_page_flags_t;
|
||||
#endif
|
||||
|
||||
// Thread free list.
|
||||
// We use the bottom 2 bits of the pointer for mi_delayed_t flags
|
||||
typedef uintptr_t mi_thread_free_t;
|
||||
|
||||
// A page contains blocks of one specific size (`block_size`).
|
||||
// Each page has three list of free blocks:
|
||||
// `free` for blocks that can be allocated,
|
||||
// `local_free` for freed blocks that are not yet available to `mi_malloc`
|
||||
// `thread_free` for freed blocks by other threads
|
||||
// The `local_free` and `thread_free` lists are migrated to the `free` list
|
||||
// when it is exhausted. The separate `local_free` list is necessary to
|
||||
// implement a monotonic heartbeat. The `thread_free` list is needed for
|
||||
// avoiding atomic operations in the common case.
|
||||
//
|
||||
//
|
||||
// `used - |thread_free|` == actual blocks that are in use (alive)
|
||||
// `used - |thread_free| + |free| + |local_free| == capacity`
|
||||
//
|
||||
// We don't count `freed` (as |free|) but use `used` to reduce
|
||||
// the number of memory accesses in the `mi_page_all_free` function(s).
|
||||
//
|
||||
// Notes:
|
||||
// - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`)
|
||||
// - Using `uint16_t` does not seem to slow things down
|
||||
// - The size is 8 words on 64-bit which helps the page index calculations
|
||||
// (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10
|
||||
// and 12 are still good for address calculation)
|
||||
// - To limit the structure size, the `xblock_size` is 32-bits only; for
|
||||
// blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size
|
||||
// - `thread_free` uses the bottom bits as a delayed-free flags to optimize
|
||||
// concurrent frees where only the first concurrent free adds to the owning
|
||||
// heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`).
|
||||
// The invariant is that no-delayed-free is only set if there is
|
||||
// at least one block that will be added, or as already been added, to
|
||||
// the owning heap `thread_delayed_free` list. This guarantees that pages
|
||||
// will be freed correctly even if only other threads free blocks.
|
||||
typedef struct mi_page_s {
|
||||
// "owned" by the segment
|
||||
uint32_t slice_count; // slices in this page (0 if not a page)
|
||||
uint32_t slice_offset; // distance from the actual page data slice (0 if a page)
|
||||
uint8_t is_committed : 1; // `true` if the page virtual memory is committed
|
||||
uint8_t is_zero_init : 1; // `true` if the page was initially zero initialized
|
||||
uint8_t use_qsbr : 1; // delay page freeing using qsbr
|
||||
uint8_t tag : 4; // tag from the owning heap
|
||||
uint8_t debug_offset; // number of bytes to preserve when filling freed or uninitialized memory
|
||||
|
||||
// layout like this to optimize access in `mi_malloc` and `mi_free`
|
||||
uint16_t capacity; // number of blocks committed, must be the first field, see `segment.c:page_clear`
|
||||
uint16_t reserved; // number of blocks reserved in memory
|
||||
mi_page_flags_t flags; // `in_full` and `has_aligned` flags (8 bits)
|
||||
uint8_t free_is_zero : 1; // `true` if the blocks in the free list are zero initialized
|
||||
uint8_t retire_expire : 7; // expiration count for retired blocks
|
||||
|
||||
mi_block_t* free; // list of available free blocks (`malloc` allocates from this list)
|
||||
uint32_t used; // number of blocks in use (including blocks in `local_free` and `thread_free`)
|
||||
uint32_t xblock_size; // size available in each block (always `>0`)
|
||||
mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
|
||||
|
||||
#if (MI_ENCODE_FREELIST || MI_PADDING)
|
||||
uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
|
||||
#endif
|
||||
|
||||
_Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
|
||||
_Atomic(uintptr_t) xheap;
|
||||
|
||||
struct mi_page_s* next; // next page owned by this thread with the same `block_size`
|
||||
struct mi_page_s* prev; // previous page owned by this thread with the same `block_size`
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
struct llist_node qsbr_node;
|
||||
uint64_t qsbr_goal;
|
||||
#endif
|
||||
|
||||
// 64-bit 9 words, 32-bit 12 words, (+2 for secure)
|
||||
#if MI_INTPTR_SIZE==8 && !defined(Py_GIL_DISABLED)
|
||||
uintptr_t padding[1];
|
||||
#endif
|
||||
} mi_page_t;
|
||||
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Mimalloc segments contain mimalloc pages
|
||||
// ------------------------------------------------------
|
||||
|
||||
typedef enum mi_page_kind_e {
|
||||
MI_PAGE_SMALL, // small blocks go into 64KiB pages inside a segment
|
||||
MI_PAGE_MEDIUM, // medium blocks go into medium pages inside a segment
|
||||
MI_PAGE_LARGE, // larger blocks go into a page of just one block
|
||||
MI_PAGE_HUGE, // huge blocks (> 16 MiB) are put into a single page in a single segment.
|
||||
} mi_page_kind_t;
|
||||
|
||||
typedef enum mi_segment_kind_e {
|
||||
MI_SEGMENT_NORMAL, // MI_SEGMENT_SIZE size with pages inside.
|
||||
MI_SEGMENT_HUGE, // > MI_LARGE_SIZE_MAX segment with just one huge page inside.
|
||||
} mi_segment_kind_t;
|
||||
|
||||
// ------------------------------------------------------
|
||||
// A segment holds a commit mask where a bit is set if
|
||||
// the corresponding MI_COMMIT_SIZE area is committed.
|
||||
// The MI_COMMIT_SIZE must be a multiple of the slice
|
||||
// size. If it is equal we have the most fine grained
|
||||
// decommit (but setting it higher can be more efficient).
|
||||
// The MI_MINIMAL_COMMIT_SIZE is the minimal amount that will
|
||||
// be committed in one go which can be set higher than
|
||||
// MI_COMMIT_SIZE for efficiency (while the decommit mask
|
||||
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
|
||||
// ------------------------------------------------------
|
||||
|
||||
#define MI_MINIMAL_COMMIT_SIZE (1*MI_SEGMENT_SLICE_SIZE)
|
||||
#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
|
||||
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
|
||||
#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
|
||||
#define MI_COMMIT_MASK_FIELD_COUNT (MI_COMMIT_MASK_BITS / MI_COMMIT_MASK_FIELD_BITS)
|
||||
|
||||
#if (MI_COMMIT_MASK_BITS != (MI_COMMIT_MASK_FIELD_COUNT * MI_COMMIT_MASK_FIELD_BITS))
|
||||
#error "the segment size must be exactly divisible by the (commit size * size_t bits)"
|
||||
#endif
|
||||
|
||||
typedef struct mi_commit_mask_s {
|
||||
size_t mask[MI_COMMIT_MASK_FIELD_COUNT];
|
||||
} mi_commit_mask_t;
|
||||
|
||||
typedef mi_page_t mi_slice_t;
|
||||
typedef int64_t mi_msecs_t;
|
||||
|
||||
|
||||
// Memory can reside in arena's, direct OS allocated, or statically allocated. The memid keeps track of this.
|
||||
typedef enum mi_memkind_e {
|
||||
MI_MEM_NONE, // not allocated
|
||||
MI_MEM_EXTERNAL, // not owned by mimalloc but provided externally (via `mi_manage_os_memory` for example)
|
||||
MI_MEM_STATIC, // allocated in a static area and should not be freed (for arena meta data for example)
|
||||
MI_MEM_OS, // allocated from the OS
|
||||
MI_MEM_OS_HUGE, // allocated as huge os pages
|
||||
MI_MEM_OS_REMAP, // allocated in a remapable area (i.e. using `mremap`)
|
||||
MI_MEM_ARENA // allocated from an arena (the usual case)
|
||||
} mi_memkind_t;
|
||||
|
||||
static inline bool mi_memkind_is_os(mi_memkind_t memkind) {
|
||||
return (memkind >= MI_MEM_OS && memkind <= MI_MEM_OS_REMAP);
|
||||
}
|
||||
|
||||
typedef struct mi_memid_os_info {
|
||||
void* base; // actual base address of the block (used for offset aligned allocations)
|
||||
size_t alignment; // alignment at allocation
|
||||
} mi_memid_os_info_t;
|
||||
|
||||
typedef struct mi_memid_arena_info {
|
||||
size_t block_index; // index in the arena
|
||||
mi_arena_id_t id; // arena id (>= 1)
|
||||
bool is_exclusive; // the arena can only be used for specific arena allocations
|
||||
} mi_memid_arena_info_t;
|
||||
|
||||
typedef struct mi_memid_s {
|
||||
union {
|
||||
mi_memid_os_info_t os; // only used for MI_MEM_OS
|
||||
mi_memid_arena_info_t arena; // only used for MI_MEM_ARENA
|
||||
} mem;
|
||||
bool is_pinned; // `true` if we cannot decommit/reset/protect in this memory (e.g. when allocated using large OS pages)
|
||||
bool initially_committed;// `true` if the memory was originally allocated as committed
|
||||
bool initially_zero; // `true` if the memory was originally zero initialized
|
||||
mi_memkind_t memkind;
|
||||
} mi_memid_t;
|
||||
|
||||
|
||||
// Segments are large allocated memory blocks (8mb on 64 bit) from
|
||||
// the OS. Inside segments we allocated fixed size _pages_ that
|
||||
// contain blocks.
|
||||
typedef struct mi_segment_s {
|
||||
// constant fields
|
||||
mi_memid_t memid; // memory id for arena allocation
|
||||
bool allow_decommit;
|
||||
bool allow_purge;
|
||||
size_t segment_size;
|
||||
|
||||
// segment fields
|
||||
mi_msecs_t purge_expire;
|
||||
mi_commit_mask_t purge_mask;
|
||||
mi_commit_mask_t commit_mask;
|
||||
|
||||
_Atomic(struct mi_segment_s*) abandoned_next;
|
||||
|
||||
// from here is zero initialized
|
||||
struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
|
||||
|
||||
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
|
||||
size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
|
||||
size_t used; // count of pages in use
|
||||
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
|
||||
|
||||
size_t segment_slices; // for huge segments this may be different from `MI_SLICES_PER_SEGMENT`
|
||||
size_t segment_info_slices; // initial slices we are using segment info and possible guard pages.
|
||||
|
||||
// layout like this to optimize access in `mi_free`
|
||||
mi_segment_kind_t kind;
|
||||
size_t slice_entries; // entries in the `slices` array, at most `MI_SLICES_PER_SEGMENT`
|
||||
_Atomic(mi_threadid_t) thread_id; // unique id of the thread owning this segment
|
||||
|
||||
mi_slice_t slices[MI_SLICES_PER_SEGMENT+1]; // one more for huge blocks with large alignment
|
||||
} mi_segment_t;
|
||||
|
||||
typedef uintptr_t mi_tagged_segment_t;
|
||||
|
||||
// Segments unowned by any thread are put in a shared pool
|
||||
typedef struct mi_abandoned_pool_s {
|
||||
// This is a list of visited abandoned pages that were full at the time.
|
||||
// this list migrates to `abandoned` when that becomes NULL. The use of
|
||||
// this list reduces contention and the rate at which segments are visited.
|
||||
mi_decl_cache_align _Atomic(mi_segment_t*) abandoned_visited; // = NULL
|
||||
|
||||
// The abandoned page list (tagged as it supports pop)
|
||||
mi_decl_cache_align _Atomic(mi_tagged_segment_t) abandoned; // = NULL
|
||||
|
||||
// Maintain these for debug purposes (these counts may be a bit off)
|
||||
mi_decl_cache_align _Atomic(size_t) abandoned_count;
|
||||
mi_decl_cache_align _Atomic(size_t) abandoned_visited_count;
|
||||
|
||||
// We also maintain a count of current readers of the abandoned list
|
||||
// in order to prevent resetting/decommitting segment memory if it might
|
||||
// still be read.
|
||||
mi_decl_cache_align _Atomic(size_t) abandoned_readers; // = 0
|
||||
} mi_abandoned_pool_t;
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Heaps
|
||||
// Provide first-class heaps to allocate from.
|
||||
// A heap just owns a set of pages for allocation and
|
||||
// can only be allocate/reallocate from the thread that created it.
|
||||
// Freeing blocks can be done from any thread though.
|
||||
// Per thread, the segments are shared among its heaps.
|
||||
// Per thread, there is always a default heap that is
|
||||
// used for allocation; it is initialized to statically
|
||||
// point to an empty heap to avoid initialization checks
|
||||
// in the fast path.
|
||||
// ------------------------------------------------------
|
||||
|
||||
// Thread local data
|
||||
typedef struct mi_tld_s mi_tld_t;
|
||||
|
||||
// Pages of a certain block size are held in a queue.
|
||||
typedef struct mi_page_queue_s {
|
||||
mi_page_t* first;
|
||||
mi_page_t* last;
|
||||
size_t block_size;
|
||||
} mi_page_queue_t;
|
||||
|
||||
#define MI_BIN_FULL (MI_BIN_HUGE+1)
|
||||
|
||||
// Random context
|
||||
typedef struct mi_random_cxt_s {
|
||||
uint32_t input[16];
|
||||
uint32_t output[16];
|
||||
int output_available;
|
||||
bool weak;
|
||||
} mi_random_ctx_t;
|
||||
|
||||
|
||||
// In debug mode there is a padding structure at the end of the blocks to check for buffer overflows
|
||||
#if (MI_PADDING)
|
||||
typedef struct mi_padding_s {
|
||||
uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)
|
||||
uint32_t delta; // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
|
||||
} mi_padding_t;
|
||||
#define MI_PADDING_SIZE (sizeof(mi_padding_t))
|
||||
#define MI_PADDING_WSIZE ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE)
|
||||
#else
|
||||
#define MI_PADDING_SIZE 0
|
||||
#define MI_PADDING_WSIZE 0
|
||||
#endif
|
||||
|
||||
#define MI_PAGES_DIRECT (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1)
|
||||
|
||||
|
||||
// A heap owns a set of pages.
|
||||
struct mi_heap_s {
|
||||
mi_tld_t* tld;
|
||||
mi_page_t* pages_free_direct[MI_PAGES_DIRECT]; // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
|
||||
mi_page_queue_t pages[MI_BIN_FULL + 1]; // queue of pages for each size class (or "bin")
|
||||
_Atomic(mi_block_t*) thread_delayed_free;
|
||||
mi_threadid_t thread_id; // thread this heap belongs too
|
||||
mi_arena_id_t arena_id; // arena id if the heap belongs to a specific arena (or 0)
|
||||
uintptr_t cookie; // random cookie to verify pointers (see `_mi_ptr_cookie`)
|
||||
uintptr_t keys[2]; // two random keys used to encode the `thread_delayed_free` list
|
||||
mi_random_ctx_t random; // random number context used for secure allocation
|
||||
size_t page_count; // total number of pages in the `pages` queues.
|
||||
size_t page_retired_min; // smallest retired index (retired pages are fully free, but still in the page queues)
|
||||
size_t page_retired_max; // largest retired index into the `pages` array.
|
||||
mi_heap_t* next; // list of heaps per thread
|
||||
bool no_reclaim; // `true` if this heap should not reclaim abandoned pages
|
||||
uint8_t tag; // custom identifier for this heap
|
||||
uint8_t debug_offset; // number of bytes to preserve when filling freed or uninitialized memory
|
||||
bool page_use_qsbr; // should freeing pages be delayed using QSBR
|
||||
};
|
||||
|
||||
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Debug
|
||||
// ------------------------------------------------------
|
||||
|
||||
#if !defined(MI_DEBUG_UNINIT)
|
||||
#define MI_DEBUG_UNINIT (0xD0)
|
||||
#endif
|
||||
#if !defined(MI_DEBUG_FREED)
|
||||
#define MI_DEBUG_FREED (0xDF)
|
||||
#endif
|
||||
#if !defined(MI_DEBUG_PADDING)
|
||||
#define MI_DEBUG_PADDING (0xDE)
|
||||
#endif
|
||||
|
||||
#if (MI_DEBUG)
|
||||
// use our own assertion to print without memory allocation
|
||||
void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func );
|
||||
#define mi_assert(expr) ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__))
|
||||
#else
|
||||
#define mi_assert(x)
|
||||
#endif
|
||||
|
||||
#if (MI_DEBUG>1)
|
||||
#define mi_assert_internal mi_assert
|
||||
#else
|
||||
#define mi_assert_internal(x)
|
||||
#endif
|
||||
|
||||
#if (MI_DEBUG>2)
|
||||
#define mi_assert_expensive mi_assert
|
||||
#else
|
||||
#define mi_assert_expensive(x)
|
||||
#endif
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Statistics
|
||||
// ------------------------------------------------------
|
||||
|
||||
#ifndef MI_STAT
|
||||
#if (MI_DEBUG>0)
|
||||
#define MI_STAT 2
|
||||
#else
|
||||
#define MI_STAT 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
typedef struct mi_stat_count_s {
|
||||
int64_t allocated;
|
||||
int64_t freed;
|
||||
int64_t peak;
|
||||
int64_t current;
|
||||
} mi_stat_count_t;
|
||||
|
||||
typedef struct mi_stat_counter_s {
|
||||
int64_t total;
|
||||
int64_t count;
|
||||
} mi_stat_counter_t;
|
||||
|
||||
typedef struct mi_stats_s {
|
||||
mi_stat_count_t segments;
|
||||
mi_stat_count_t pages;
|
||||
mi_stat_count_t reserved;
|
||||
mi_stat_count_t committed;
|
||||
mi_stat_count_t reset;
|
||||
mi_stat_count_t purged;
|
||||
mi_stat_count_t page_committed;
|
||||
mi_stat_count_t segments_abandoned;
|
||||
mi_stat_count_t pages_abandoned;
|
||||
mi_stat_count_t threads;
|
||||
mi_stat_count_t normal;
|
||||
mi_stat_count_t huge;
|
||||
mi_stat_count_t large;
|
||||
mi_stat_count_t malloc;
|
||||
mi_stat_count_t segments_cache;
|
||||
mi_stat_counter_t pages_extended;
|
||||
mi_stat_counter_t mmap_calls;
|
||||
mi_stat_counter_t commit_calls;
|
||||
mi_stat_counter_t reset_calls;
|
||||
mi_stat_counter_t purge_calls;
|
||||
mi_stat_counter_t page_no_retire;
|
||||
mi_stat_counter_t searches;
|
||||
mi_stat_counter_t normal_count;
|
||||
mi_stat_counter_t huge_count;
|
||||
mi_stat_counter_t large_count;
|
||||
#if MI_STAT>1
|
||||
mi_stat_count_t normal_bins[MI_BIN_HUGE+1];
|
||||
#endif
|
||||
} mi_stats_t;
|
||||
|
||||
|
||||
void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
|
||||
void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
|
||||
void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
|
||||
|
||||
#if (MI_STAT)
|
||||
#define mi_stat_increase(stat,amount) _mi_stat_increase( &(stat), amount)
|
||||
#define mi_stat_decrease(stat,amount) _mi_stat_decrease( &(stat), amount)
|
||||
#define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)
|
||||
#else
|
||||
#define mi_stat_increase(stat,amount) (void)0
|
||||
#define mi_stat_decrease(stat,amount) (void)0
|
||||
#define mi_stat_counter_increase(stat,amount) (void)0
|
||||
#endif
|
||||
|
||||
#define mi_heap_stat_counter_increase(heap,stat,amount) mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
|
||||
#define mi_heap_stat_increase(heap,stat,amount) mi_stat_increase( (heap)->tld->stats.stat, amount)
|
||||
#define mi_heap_stat_decrease(heap,stat,amount) mi_stat_decrease( (heap)->tld->stats.stat, amount)
|
||||
|
||||
// ------------------------------------------------------
|
||||
// Thread Local data
|
||||
// ------------------------------------------------------
|
||||
|
||||
// A "span" is is an available range of slices. The span queues keep
|
||||
// track of slice spans of at most the given `slice_count` (but more than the previous size class).
|
||||
typedef struct mi_span_queue_s {
|
||||
mi_slice_t* first;
|
||||
mi_slice_t* last;
|
||||
size_t slice_count;
|
||||
} mi_span_queue_t;
|
||||
|
||||
#define MI_SEGMENT_BIN_MAX (35) // 35 == mi_segment_bin(MI_SLICES_PER_SEGMENT)
|
||||
|
||||
// OS thread local data
|
||||
typedef struct mi_os_tld_s {
|
||||
size_t region_idx; // start point for next allocation
|
||||
mi_stats_t* stats; // points to tld stats
|
||||
} mi_os_tld_t;
|
||||
|
||||
|
||||
// Segments thread local data
|
||||
typedef struct mi_segments_tld_s {
|
||||
mi_span_queue_t spans[MI_SEGMENT_BIN_MAX+1]; // free slice spans inside segments
|
||||
size_t count; // current number of segments;
|
||||
size_t peak_count; // peak number of segments
|
||||
size_t current_size; // current size of all segments
|
||||
size_t peak_size; // peak size of all segments
|
||||
mi_stats_t* stats; // points to tld stats
|
||||
mi_os_tld_t* os; // points to os stats
|
||||
mi_abandoned_pool_t* abandoned; // pool of abandoned segments
|
||||
} mi_segments_tld_t;
|
||||
|
||||
// Thread local data
|
||||
struct mi_tld_s {
|
||||
unsigned long long heartbeat; // monotonic heartbeat count
|
||||
bool recurse; // true if deferred was called; used to prevent infinite recursion.
|
||||
mi_heap_t* heap_backing; // backing heap of this thread (cannot be deleted)
|
||||
mi_heap_t* heaps; // list of heaps in this thread (so we can abandon all when the thread terminates)
|
||||
mi_segments_tld_t segments; // segment tld
|
||||
mi_os_tld_t os; // os tld
|
||||
mi_stats_t stats; // statistics
|
||||
};
|
||||
|
||||
#endif
|
||||
61
Dependencies/Python/include/internal/pycore_abstract.h
vendored
Normal file
61
Dependencies/Python/include/internal/pycore_abstract.h
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
#ifndef Py_INTERNAL_ABSTRACT_H
|
||||
#define Py_INTERNAL_ABSTRACT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// Fast inlined version of PyIndex_Check()
|
||||
static inline int
|
||||
_PyIndex_Check(PyObject *obj)
|
||||
{
|
||||
PyNumberMethods *tp_as_number = Py_TYPE(obj)->tp_as_number;
|
||||
return (tp_as_number != NULL && tp_as_number->nb_index != NULL);
|
||||
}
|
||||
|
||||
PyObject *_PyNumber_PowerNoMod(PyObject *lhs, PyObject *rhs);
|
||||
PyObject *_PyNumber_InPlacePowerNoMod(PyObject *lhs, PyObject *rhs);
|
||||
|
||||
extern int _PyObject_HasLen(PyObject *o);
|
||||
|
||||
/* === Sequence protocol ================================================ */
|
||||
|
||||
#define PY_ITERSEARCH_COUNT 1
|
||||
#define PY_ITERSEARCH_INDEX 2
|
||||
#define PY_ITERSEARCH_CONTAINS 3
|
||||
|
||||
/* Iterate over seq.
|
||||
|
||||
Result depends on the operation:
|
||||
|
||||
PY_ITERSEARCH_COUNT: return # of times obj appears in seq; -1 if
|
||||
error.
|
||||
PY_ITERSEARCH_INDEX: return 0-based index of first occurrence of
|
||||
obj in seq; set ValueError and return -1 if none found;
|
||||
also return -1 on error.
|
||||
PY_ITERSEARCH_CONTAINS: return 1 if obj in seq, else 0; -1 on
|
||||
error. */
|
||||
extern Py_ssize_t _PySequence_IterSearch(PyObject *seq,
|
||||
PyObject *obj, int operation);
|
||||
|
||||
/* === Mapping protocol ================================================= */
|
||||
|
||||
extern int _PyObject_RealIsInstance(PyObject *inst, PyObject *cls);
|
||||
|
||||
extern int _PyObject_RealIsSubclass(PyObject *derived, PyObject *cls);
|
||||
|
||||
// Convert Python int to Py_ssize_t. Do nothing if the argument is None.
|
||||
// Export for '_bisect' shared extension.
|
||||
PyAPI_FUNC(int) _Py_convert_optional_to_ssize_t(PyObject *, void *);
|
||||
|
||||
// Same as PyNumber_Index() but can return an instance of a subclass of int.
|
||||
// Export for 'math' shared extension.
|
||||
PyAPI_FUNC(PyObject*) _PyNumber_Index(PyObject *o);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_ABSTRACT_H */
|
||||
112
Dependencies/Python/include/internal/pycore_asdl.h
vendored
Normal file
112
Dependencies/Python/include/internal/pycore_asdl.h
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
#ifndef Py_INTERNAL_ASDL_H
|
||||
#define Py_INTERNAL_ASDL_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_pyarena.h" // _PyArena_Malloc()
|
||||
|
||||
typedef PyObject * identifier;
|
||||
typedef PyObject * string;
|
||||
typedef PyObject * object;
|
||||
typedef PyObject * constant;
|
||||
|
||||
/* It would be nice if the code generated by asdl_c.py was completely
|
||||
independent of Python, but it is a goal the requires too much work
|
||||
at this stage. So, for example, I'll represent identifiers as
|
||||
interned Python strings.
|
||||
*/
|
||||
|
||||
#define _ASDL_SEQ_HEAD \
|
||||
Py_ssize_t size; \
|
||||
void **elements;
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
} asdl_seq;
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
void *typed_elements[1];
|
||||
} asdl_generic_seq;
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
PyObject *typed_elements[1];
|
||||
} asdl_identifier_seq;
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
int typed_elements[1];
|
||||
} asdl_int_seq;
|
||||
|
||||
asdl_generic_seq *_Py_asdl_generic_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
asdl_identifier_seq *_Py_asdl_identifier_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
asdl_int_seq *_Py_asdl_int_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
|
||||
#define GENERATE_ASDL_SEQ_CONSTRUCTOR(NAME, TYPE) \
|
||||
asdl_ ## NAME ## _seq *_Py_asdl_ ## NAME ## _seq_new(Py_ssize_t size, PyArena *arena) \
|
||||
{ \
|
||||
asdl_ ## NAME ## _seq *seq = NULL; \
|
||||
size_t n; \
|
||||
/* check size is sane */ \
|
||||
if (size < 0 || \
|
||||
(size && (((size_t)size - 1) > (SIZE_MAX / sizeof(void *))))) { \
|
||||
PyErr_NoMemory(); \
|
||||
return NULL; \
|
||||
} \
|
||||
n = (size ? (sizeof(TYPE *) * (size - 1)) : 0); \
|
||||
/* check if size can be added safely */ \
|
||||
if (n > SIZE_MAX - sizeof(asdl_ ## NAME ## _seq)) { \
|
||||
PyErr_NoMemory(); \
|
||||
return NULL; \
|
||||
} \
|
||||
n += sizeof(asdl_ ## NAME ## _seq); \
|
||||
seq = (asdl_ ## NAME ## _seq *)_PyArena_Malloc(arena, n); \
|
||||
if (!seq) { \
|
||||
PyErr_NoMemory(); \
|
||||
return NULL; \
|
||||
} \
|
||||
memset(seq, 0, n); \
|
||||
seq->size = size; \
|
||||
seq->elements = (void**)seq->typed_elements; \
|
||||
return seq; \
|
||||
}
|
||||
|
||||
#define asdl_seq_GET_UNTYPED(S, I) _Py_RVALUE((S)->elements[(I)])
|
||||
#define asdl_seq_GET(S, I) _Py_RVALUE((S)->typed_elements[(I)])
|
||||
#define asdl_seq_LEN(S) _Py_RVALUE(((S) == NULL ? 0 : (S)->size))
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
# define asdl_seq_SET(S, I, V) \
|
||||
do { \
|
||||
Py_ssize_t _asdl_i = (I); \
|
||||
assert((S) != NULL); \
|
||||
assert(0 <= _asdl_i && _asdl_i < (S)->size); \
|
||||
(S)->typed_elements[_asdl_i] = (V); \
|
||||
} while (0)
|
||||
#else
|
||||
# define asdl_seq_SET(S, I, V) _Py_RVALUE((S)->typed_elements[(I)] = (V))
|
||||
#endif
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
# define asdl_seq_SET_UNTYPED(S, I, V) \
|
||||
do { \
|
||||
Py_ssize_t _asdl_i = (I); \
|
||||
assert((S) != NULL); \
|
||||
assert(0 <= _asdl_i && _asdl_i < (S)->size); \
|
||||
(S)->elements[_asdl_i] = (V); \
|
||||
} while (0)
|
||||
#else
|
||||
# define asdl_seq_SET_UNTYPED(S, I, V) _Py_RVALUE((S)->elements[(I)] = (V))
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_ASDL_H */
|
||||
926
Dependencies/Python/include/internal/pycore_ast.h
vendored
Normal file
926
Dependencies/Python/include/internal/pycore_ast.h
vendored
Normal file
@@ -0,0 +1,926 @@
|
||||
// File automatically generated by Parser/asdl_c.py.
|
||||
|
||||
#ifndef Py_INTERNAL_AST_H
|
||||
#define Py_INTERNAL_AST_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_asdl.h" // _ASDL_SEQ_HEAD
|
||||
|
||||
typedef struct _mod *mod_ty;
|
||||
|
||||
typedef struct _stmt *stmt_ty;
|
||||
|
||||
typedef struct _expr *expr_ty;
|
||||
|
||||
typedef enum _expr_context { Load=1, Store=2, Del=3 } expr_context_ty;
|
||||
|
||||
typedef enum _boolop { And=1, Or=2 } boolop_ty;
|
||||
|
||||
typedef enum _operator { Add=1, Sub=2, Mult=3, MatMult=4, Div=5, Mod=6, Pow=7,
|
||||
LShift=8, RShift=9, BitOr=10, BitXor=11, BitAnd=12,
|
||||
FloorDiv=13 } operator_ty;
|
||||
|
||||
typedef enum _unaryop { Invert=1, Not=2, UAdd=3, USub=4 } unaryop_ty;
|
||||
|
||||
typedef enum _cmpop { Eq=1, NotEq=2, Lt=3, LtE=4, Gt=5, GtE=6, Is=7, IsNot=8,
|
||||
In=9, NotIn=10 } cmpop_ty;
|
||||
|
||||
typedef struct _comprehension *comprehension_ty;
|
||||
|
||||
typedef struct _excepthandler *excepthandler_ty;
|
||||
|
||||
typedef struct _arguments *arguments_ty;
|
||||
|
||||
typedef struct _arg *arg_ty;
|
||||
|
||||
typedef struct _keyword *keyword_ty;
|
||||
|
||||
typedef struct _alias *alias_ty;
|
||||
|
||||
typedef struct _withitem *withitem_ty;
|
||||
|
||||
typedef struct _match_case *match_case_ty;
|
||||
|
||||
typedef struct _pattern *pattern_ty;
|
||||
|
||||
typedef struct _type_ignore *type_ignore_ty;
|
||||
|
||||
typedef struct _type_param *type_param_ty;
|
||||
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
mod_ty typed_elements[1];
|
||||
} asdl_mod_seq;
|
||||
|
||||
asdl_mod_seq *_Py_asdl_mod_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
stmt_ty typed_elements[1];
|
||||
} asdl_stmt_seq;
|
||||
|
||||
asdl_stmt_seq *_Py_asdl_stmt_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
expr_ty typed_elements[1];
|
||||
} asdl_expr_seq;
|
||||
|
||||
asdl_expr_seq *_Py_asdl_expr_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
comprehension_ty typed_elements[1];
|
||||
} asdl_comprehension_seq;
|
||||
|
||||
asdl_comprehension_seq *_Py_asdl_comprehension_seq_new(Py_ssize_t size, PyArena
|
||||
*arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
excepthandler_ty typed_elements[1];
|
||||
} asdl_excepthandler_seq;
|
||||
|
||||
asdl_excepthandler_seq *_Py_asdl_excepthandler_seq_new(Py_ssize_t size, PyArena
|
||||
*arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
arguments_ty typed_elements[1];
|
||||
} asdl_arguments_seq;
|
||||
|
||||
asdl_arguments_seq *_Py_asdl_arguments_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
arg_ty typed_elements[1];
|
||||
} asdl_arg_seq;
|
||||
|
||||
asdl_arg_seq *_Py_asdl_arg_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
keyword_ty typed_elements[1];
|
||||
} asdl_keyword_seq;
|
||||
|
||||
asdl_keyword_seq *_Py_asdl_keyword_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
alias_ty typed_elements[1];
|
||||
} asdl_alias_seq;
|
||||
|
||||
asdl_alias_seq *_Py_asdl_alias_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
withitem_ty typed_elements[1];
|
||||
} asdl_withitem_seq;
|
||||
|
||||
asdl_withitem_seq *_Py_asdl_withitem_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
match_case_ty typed_elements[1];
|
||||
} asdl_match_case_seq;
|
||||
|
||||
asdl_match_case_seq *_Py_asdl_match_case_seq_new(Py_ssize_t size, PyArena
|
||||
*arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
pattern_ty typed_elements[1];
|
||||
} asdl_pattern_seq;
|
||||
|
||||
asdl_pattern_seq *_Py_asdl_pattern_seq_new(Py_ssize_t size, PyArena *arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
type_ignore_ty typed_elements[1];
|
||||
} asdl_type_ignore_seq;
|
||||
|
||||
asdl_type_ignore_seq *_Py_asdl_type_ignore_seq_new(Py_ssize_t size, PyArena
|
||||
*arena);
|
||||
|
||||
typedef struct {
|
||||
_ASDL_SEQ_HEAD
|
||||
type_param_ty typed_elements[1];
|
||||
} asdl_type_param_seq;
|
||||
|
||||
asdl_type_param_seq *_Py_asdl_type_param_seq_new(Py_ssize_t size, PyArena
|
||||
*arena);
|
||||
|
||||
|
||||
enum _mod_kind {Module_kind=1, Interactive_kind=2, Expression_kind=3,
|
||||
FunctionType_kind=4};
|
||||
struct _mod {
|
||||
enum _mod_kind kind;
|
||||
union {
|
||||
struct {
|
||||
asdl_stmt_seq *body;
|
||||
asdl_type_ignore_seq *type_ignores;
|
||||
} Module;
|
||||
|
||||
struct {
|
||||
asdl_stmt_seq *body;
|
||||
} Interactive;
|
||||
|
||||
struct {
|
||||
expr_ty body;
|
||||
} Expression;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *argtypes;
|
||||
expr_ty returns;
|
||||
} FunctionType;
|
||||
|
||||
} v;
|
||||
};
|
||||
|
||||
enum _stmt_kind {FunctionDef_kind=1, AsyncFunctionDef_kind=2, ClassDef_kind=3,
|
||||
Return_kind=4, Delete_kind=5, Assign_kind=6,
|
||||
TypeAlias_kind=7, AugAssign_kind=8, AnnAssign_kind=9,
|
||||
For_kind=10, AsyncFor_kind=11, While_kind=12, If_kind=13,
|
||||
With_kind=14, AsyncWith_kind=15, Match_kind=16,
|
||||
Raise_kind=17, Try_kind=18, TryStar_kind=19, Assert_kind=20,
|
||||
Import_kind=21, ImportFrom_kind=22, Global_kind=23,
|
||||
Nonlocal_kind=24, Expr_kind=25, Pass_kind=26, Break_kind=27,
|
||||
Continue_kind=28};
|
||||
struct _stmt {
|
||||
enum _stmt_kind kind;
|
||||
union {
|
||||
struct {
|
||||
identifier name;
|
||||
arguments_ty args;
|
||||
asdl_stmt_seq *body;
|
||||
asdl_expr_seq *decorator_list;
|
||||
expr_ty returns;
|
||||
string type_comment;
|
||||
asdl_type_param_seq *type_params;
|
||||
} FunctionDef;
|
||||
|
||||
struct {
|
||||
identifier name;
|
||||
arguments_ty args;
|
||||
asdl_stmt_seq *body;
|
||||
asdl_expr_seq *decorator_list;
|
||||
expr_ty returns;
|
||||
string type_comment;
|
||||
asdl_type_param_seq *type_params;
|
||||
} AsyncFunctionDef;
|
||||
|
||||
struct {
|
||||
identifier name;
|
||||
asdl_expr_seq *bases;
|
||||
asdl_keyword_seq *keywords;
|
||||
asdl_stmt_seq *body;
|
||||
asdl_expr_seq *decorator_list;
|
||||
asdl_type_param_seq *type_params;
|
||||
} ClassDef;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
} Return;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *targets;
|
||||
} Delete;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *targets;
|
||||
expr_ty value;
|
||||
string type_comment;
|
||||
} Assign;
|
||||
|
||||
struct {
|
||||
expr_ty name;
|
||||
asdl_type_param_seq *type_params;
|
||||
expr_ty value;
|
||||
} TypeAlias;
|
||||
|
||||
struct {
|
||||
expr_ty target;
|
||||
operator_ty op;
|
||||
expr_ty value;
|
||||
} AugAssign;
|
||||
|
||||
struct {
|
||||
expr_ty target;
|
||||
expr_ty annotation;
|
||||
expr_ty value;
|
||||
int simple;
|
||||
} AnnAssign;
|
||||
|
||||
struct {
|
||||
expr_ty target;
|
||||
expr_ty iter;
|
||||
asdl_stmt_seq *body;
|
||||
asdl_stmt_seq *orelse;
|
||||
string type_comment;
|
||||
} For;
|
||||
|
||||
struct {
|
||||
expr_ty target;
|
||||
expr_ty iter;
|
||||
asdl_stmt_seq *body;
|
||||
asdl_stmt_seq *orelse;
|
||||
string type_comment;
|
||||
} AsyncFor;
|
||||
|
||||
struct {
|
||||
expr_ty test;
|
||||
asdl_stmt_seq *body;
|
||||
asdl_stmt_seq *orelse;
|
||||
} While;
|
||||
|
||||
struct {
|
||||
expr_ty test;
|
||||
asdl_stmt_seq *body;
|
||||
asdl_stmt_seq *orelse;
|
||||
} If;
|
||||
|
||||
struct {
|
||||
asdl_withitem_seq *items;
|
||||
asdl_stmt_seq *body;
|
||||
string type_comment;
|
||||
} With;
|
||||
|
||||
struct {
|
||||
asdl_withitem_seq *items;
|
||||
asdl_stmt_seq *body;
|
||||
string type_comment;
|
||||
} AsyncWith;
|
||||
|
||||
struct {
|
||||
expr_ty subject;
|
||||
asdl_match_case_seq *cases;
|
||||
} Match;
|
||||
|
||||
struct {
|
||||
expr_ty exc;
|
||||
expr_ty cause;
|
||||
} Raise;
|
||||
|
||||
struct {
|
||||
asdl_stmt_seq *body;
|
||||
asdl_excepthandler_seq *handlers;
|
||||
asdl_stmt_seq *orelse;
|
||||
asdl_stmt_seq *finalbody;
|
||||
} Try;
|
||||
|
||||
struct {
|
||||
asdl_stmt_seq *body;
|
||||
asdl_excepthandler_seq *handlers;
|
||||
asdl_stmt_seq *orelse;
|
||||
asdl_stmt_seq *finalbody;
|
||||
} TryStar;
|
||||
|
||||
struct {
|
||||
expr_ty test;
|
||||
expr_ty msg;
|
||||
} Assert;
|
||||
|
||||
struct {
|
||||
asdl_alias_seq *names;
|
||||
} Import;
|
||||
|
||||
struct {
|
||||
identifier module;
|
||||
asdl_alias_seq *names;
|
||||
int level;
|
||||
} ImportFrom;
|
||||
|
||||
struct {
|
||||
asdl_identifier_seq *names;
|
||||
} Global;
|
||||
|
||||
struct {
|
||||
asdl_identifier_seq *names;
|
||||
} Nonlocal;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
} Expr;
|
||||
|
||||
} v;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
enum _expr_kind {BoolOp_kind=1, NamedExpr_kind=2, BinOp_kind=3, UnaryOp_kind=4,
|
||||
Lambda_kind=5, IfExp_kind=6, Dict_kind=7, Set_kind=8,
|
||||
ListComp_kind=9, SetComp_kind=10, DictComp_kind=11,
|
||||
GeneratorExp_kind=12, Await_kind=13, Yield_kind=14,
|
||||
YieldFrom_kind=15, Compare_kind=16, Call_kind=17,
|
||||
FormattedValue_kind=18, JoinedStr_kind=19, Constant_kind=20,
|
||||
Attribute_kind=21, Subscript_kind=22, Starred_kind=23,
|
||||
Name_kind=24, List_kind=25, Tuple_kind=26, Slice_kind=27};
|
||||
struct _expr {
|
||||
enum _expr_kind kind;
|
||||
union {
|
||||
struct {
|
||||
boolop_ty op;
|
||||
asdl_expr_seq *values;
|
||||
} BoolOp;
|
||||
|
||||
struct {
|
||||
expr_ty target;
|
||||
expr_ty value;
|
||||
} NamedExpr;
|
||||
|
||||
struct {
|
||||
expr_ty left;
|
||||
operator_ty op;
|
||||
expr_ty right;
|
||||
} BinOp;
|
||||
|
||||
struct {
|
||||
unaryop_ty op;
|
||||
expr_ty operand;
|
||||
} UnaryOp;
|
||||
|
||||
struct {
|
||||
arguments_ty args;
|
||||
expr_ty body;
|
||||
} Lambda;
|
||||
|
||||
struct {
|
||||
expr_ty test;
|
||||
expr_ty body;
|
||||
expr_ty orelse;
|
||||
} IfExp;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *keys;
|
||||
asdl_expr_seq *values;
|
||||
} Dict;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *elts;
|
||||
} Set;
|
||||
|
||||
struct {
|
||||
expr_ty elt;
|
||||
asdl_comprehension_seq *generators;
|
||||
} ListComp;
|
||||
|
||||
struct {
|
||||
expr_ty elt;
|
||||
asdl_comprehension_seq *generators;
|
||||
} SetComp;
|
||||
|
||||
struct {
|
||||
expr_ty key;
|
||||
expr_ty value;
|
||||
asdl_comprehension_seq *generators;
|
||||
} DictComp;
|
||||
|
||||
struct {
|
||||
expr_ty elt;
|
||||
asdl_comprehension_seq *generators;
|
||||
} GeneratorExp;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
} Await;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
} Yield;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
} YieldFrom;
|
||||
|
||||
struct {
|
||||
expr_ty left;
|
||||
asdl_int_seq *ops;
|
||||
asdl_expr_seq *comparators;
|
||||
} Compare;
|
||||
|
||||
struct {
|
||||
expr_ty func;
|
||||
asdl_expr_seq *args;
|
||||
asdl_keyword_seq *keywords;
|
||||
} Call;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
int conversion;
|
||||
expr_ty format_spec;
|
||||
} FormattedValue;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *values;
|
||||
} JoinedStr;
|
||||
|
||||
struct {
|
||||
constant value;
|
||||
string kind;
|
||||
} Constant;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
identifier attr;
|
||||
expr_context_ty ctx;
|
||||
} Attribute;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
expr_ty slice;
|
||||
expr_context_ty ctx;
|
||||
} Subscript;
|
||||
|
||||
struct {
|
||||
expr_ty value;
|
||||
expr_context_ty ctx;
|
||||
} Starred;
|
||||
|
||||
struct {
|
||||
identifier id;
|
||||
expr_context_ty ctx;
|
||||
} Name;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *elts;
|
||||
expr_context_ty ctx;
|
||||
} List;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *elts;
|
||||
expr_context_ty ctx;
|
||||
} Tuple;
|
||||
|
||||
struct {
|
||||
expr_ty lower;
|
||||
expr_ty upper;
|
||||
expr_ty step;
|
||||
} Slice;
|
||||
|
||||
} v;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
struct _comprehension {
|
||||
expr_ty target;
|
||||
expr_ty iter;
|
||||
asdl_expr_seq *ifs;
|
||||
int is_async;
|
||||
};
|
||||
|
||||
enum _excepthandler_kind {ExceptHandler_kind=1};
|
||||
struct _excepthandler {
|
||||
enum _excepthandler_kind kind;
|
||||
union {
|
||||
struct {
|
||||
expr_ty type;
|
||||
identifier name;
|
||||
asdl_stmt_seq *body;
|
||||
} ExceptHandler;
|
||||
|
||||
} v;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
struct _arguments {
|
||||
asdl_arg_seq *posonlyargs;
|
||||
asdl_arg_seq *args;
|
||||
arg_ty vararg;
|
||||
asdl_arg_seq *kwonlyargs;
|
||||
asdl_expr_seq *kw_defaults;
|
||||
arg_ty kwarg;
|
||||
asdl_expr_seq *defaults;
|
||||
};
|
||||
|
||||
struct _arg {
|
||||
identifier arg;
|
||||
expr_ty annotation;
|
||||
string type_comment;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
struct _keyword {
|
||||
identifier arg;
|
||||
expr_ty value;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
struct _alias {
|
||||
identifier name;
|
||||
identifier asname;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
struct _withitem {
|
||||
expr_ty context_expr;
|
||||
expr_ty optional_vars;
|
||||
};
|
||||
|
||||
struct _match_case {
|
||||
pattern_ty pattern;
|
||||
expr_ty guard;
|
||||
asdl_stmt_seq *body;
|
||||
};
|
||||
|
||||
enum _pattern_kind {MatchValue_kind=1, MatchSingleton_kind=2,
|
||||
MatchSequence_kind=3, MatchMapping_kind=4,
|
||||
MatchClass_kind=5, MatchStar_kind=6, MatchAs_kind=7,
|
||||
MatchOr_kind=8};
|
||||
struct _pattern {
|
||||
enum _pattern_kind kind;
|
||||
union {
|
||||
struct {
|
||||
expr_ty value;
|
||||
} MatchValue;
|
||||
|
||||
struct {
|
||||
constant value;
|
||||
} MatchSingleton;
|
||||
|
||||
struct {
|
||||
asdl_pattern_seq *patterns;
|
||||
} MatchSequence;
|
||||
|
||||
struct {
|
||||
asdl_expr_seq *keys;
|
||||
asdl_pattern_seq *patterns;
|
||||
identifier rest;
|
||||
} MatchMapping;
|
||||
|
||||
struct {
|
||||
expr_ty cls;
|
||||
asdl_pattern_seq *patterns;
|
||||
asdl_identifier_seq *kwd_attrs;
|
||||
asdl_pattern_seq *kwd_patterns;
|
||||
} MatchClass;
|
||||
|
||||
struct {
|
||||
identifier name;
|
||||
} MatchStar;
|
||||
|
||||
struct {
|
||||
pattern_ty pattern;
|
||||
identifier name;
|
||||
} MatchAs;
|
||||
|
||||
struct {
|
||||
asdl_pattern_seq *patterns;
|
||||
} MatchOr;
|
||||
|
||||
} v;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
enum _type_ignore_kind {TypeIgnore_kind=1};
|
||||
struct _type_ignore {
|
||||
enum _type_ignore_kind kind;
|
||||
union {
|
||||
struct {
|
||||
int lineno;
|
||||
string tag;
|
||||
} TypeIgnore;
|
||||
|
||||
} v;
|
||||
};
|
||||
|
||||
enum _type_param_kind {TypeVar_kind=1, ParamSpec_kind=2, TypeVarTuple_kind=3};
|
||||
struct _type_param {
|
||||
enum _type_param_kind kind;
|
||||
union {
|
||||
struct {
|
||||
identifier name;
|
||||
expr_ty bound;
|
||||
expr_ty default_value;
|
||||
} TypeVar;
|
||||
|
||||
struct {
|
||||
identifier name;
|
||||
expr_ty default_value;
|
||||
} ParamSpec;
|
||||
|
||||
struct {
|
||||
identifier name;
|
||||
expr_ty default_value;
|
||||
} TypeVarTuple;
|
||||
|
||||
} v;
|
||||
int lineno;
|
||||
int col_offset;
|
||||
int end_lineno;
|
||||
int end_col_offset;
|
||||
};
|
||||
|
||||
|
||||
// Note: these macros affect function definitions, not only call sites.
|
||||
mod_ty _PyAST_Module(asdl_stmt_seq * body, asdl_type_ignore_seq * type_ignores,
|
||||
PyArena *arena);
|
||||
mod_ty _PyAST_Interactive(asdl_stmt_seq * body, PyArena *arena);
|
||||
mod_ty _PyAST_Expression(expr_ty body, PyArena *arena);
|
||||
mod_ty _PyAST_FunctionType(asdl_expr_seq * argtypes, expr_ty returns, PyArena
|
||||
*arena);
|
||||
stmt_ty _PyAST_FunctionDef(identifier name, arguments_ty args, asdl_stmt_seq *
|
||||
body, asdl_expr_seq * decorator_list, expr_ty
|
||||
returns, string type_comment, asdl_type_param_seq *
|
||||
type_params, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_AsyncFunctionDef(identifier name, arguments_ty args,
|
||||
asdl_stmt_seq * body, asdl_expr_seq *
|
||||
decorator_list, expr_ty returns, string
|
||||
type_comment, asdl_type_param_seq *
|
||||
type_params, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_ClassDef(identifier name, asdl_expr_seq * bases,
|
||||
asdl_keyword_seq * keywords, asdl_stmt_seq * body,
|
||||
asdl_expr_seq * decorator_list, asdl_type_param_seq *
|
||||
type_params, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Return(expr_ty value, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Delete(asdl_expr_seq * targets, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Assign(asdl_expr_seq * targets, expr_ty value, string
|
||||
type_comment, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_TypeAlias(expr_ty name, asdl_type_param_seq * type_params,
|
||||
expr_ty value, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_AugAssign(expr_ty target, operator_ty op, expr_ty value, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_AnnAssign(expr_ty target, expr_ty annotation, expr_ty value, int
|
||||
simple, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_For(expr_ty target, expr_ty iter, asdl_stmt_seq * body,
|
||||
asdl_stmt_seq * orelse, string type_comment, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
stmt_ty _PyAST_AsyncFor(expr_ty target, expr_ty iter, asdl_stmt_seq * body,
|
||||
asdl_stmt_seq * orelse, string type_comment, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_While(expr_ty test, asdl_stmt_seq * body, asdl_stmt_seq *
|
||||
orelse, int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_If(expr_ty test, asdl_stmt_seq * body, asdl_stmt_seq * orelse,
|
||||
int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_With(asdl_withitem_seq * items, asdl_stmt_seq * body, string
|
||||
type_comment, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_AsyncWith(asdl_withitem_seq * items, asdl_stmt_seq * body,
|
||||
string type_comment, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Match(expr_ty subject, asdl_match_case_seq * cases, int lineno,
|
||||
int col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
stmt_ty _PyAST_Raise(expr_ty exc, expr_ty cause, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Try(asdl_stmt_seq * body, asdl_excepthandler_seq * handlers,
|
||||
asdl_stmt_seq * orelse, asdl_stmt_seq * finalbody, int
|
||||
lineno, int col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
stmt_ty _PyAST_TryStar(asdl_stmt_seq * body, asdl_excepthandler_seq * handlers,
|
||||
asdl_stmt_seq * orelse, asdl_stmt_seq * finalbody, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Assert(expr_ty test, expr_ty msg, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Import(asdl_alias_seq * names, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_ImportFrom(identifier module, asdl_alias_seq * names, int level,
|
||||
int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Global(asdl_identifier_seq * names, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Nonlocal(asdl_identifier_seq * names, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
stmt_ty _PyAST_Expr(expr_ty value, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Pass(int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Break(int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
stmt_ty _PyAST_Continue(int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_BoolOp(boolop_ty op, asdl_expr_seq * values, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_NamedExpr(expr_ty target, expr_ty value, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
expr_ty _PyAST_BinOp(expr_ty left, operator_ty op, expr_ty right, int lineno,
|
||||
int col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
expr_ty _PyAST_UnaryOp(unaryop_ty op, expr_ty operand, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_Lambda(arguments_ty args, expr_ty body, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_IfExp(expr_ty test, expr_ty body, expr_ty orelse, int lineno,
|
||||
int col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
expr_ty _PyAST_Dict(asdl_expr_seq * keys, asdl_expr_seq * values, int lineno,
|
||||
int col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_Set(asdl_expr_seq * elts, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_ListComp(expr_ty elt, asdl_comprehension_seq * generators, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_SetComp(expr_ty elt, asdl_comprehension_seq * generators, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_DictComp(expr_ty key, expr_ty value, asdl_comprehension_seq *
|
||||
generators, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_GeneratorExp(expr_ty elt, asdl_comprehension_seq * generators,
|
||||
int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Await(expr_ty value, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Yield(expr_ty value, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_YieldFrom(expr_ty value, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Compare(expr_ty left, asdl_int_seq * ops, asdl_expr_seq *
|
||||
comparators, int lineno, int col_offset, int end_lineno,
|
||||
int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Call(expr_ty func, asdl_expr_seq * args, asdl_keyword_seq *
|
||||
keywords, int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_FormattedValue(expr_ty value, int conversion, expr_ty
|
||||
format_spec, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_JoinedStr(asdl_expr_seq * values, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Constant(constant value, string kind, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_Attribute(expr_ty value, identifier attr, expr_context_ty ctx,
|
||||
int lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Subscript(expr_ty value, expr_ty slice, expr_context_ty ctx, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
expr_ty _PyAST_Starred(expr_ty value, expr_context_ty ctx, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_Name(identifier id, expr_context_ty ctx, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_List(asdl_expr_seq * elts, expr_context_ty ctx, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_Tuple(asdl_expr_seq * elts, expr_context_ty ctx, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
expr_ty _PyAST_Slice(expr_ty lower, expr_ty upper, expr_ty step, int lineno,
|
||||
int col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
comprehension_ty _PyAST_comprehension(expr_ty target, expr_ty iter,
|
||||
asdl_expr_seq * ifs, int is_async,
|
||||
PyArena *arena);
|
||||
excepthandler_ty _PyAST_ExceptHandler(expr_ty type, identifier name,
|
||||
asdl_stmt_seq * body, int lineno, int
|
||||
col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
arguments_ty _PyAST_arguments(asdl_arg_seq * posonlyargs, asdl_arg_seq * args,
|
||||
arg_ty vararg, asdl_arg_seq * kwonlyargs,
|
||||
asdl_expr_seq * kw_defaults, arg_ty kwarg,
|
||||
asdl_expr_seq * defaults, PyArena *arena);
|
||||
arg_ty _PyAST_arg(identifier arg, expr_ty annotation, string type_comment, int
|
||||
lineno, int col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
keyword_ty _PyAST_keyword(identifier arg, expr_ty value, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
alias_ty _PyAST_alias(identifier name, identifier asname, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
withitem_ty _PyAST_withitem(expr_ty context_expr, expr_ty optional_vars,
|
||||
PyArena *arena);
|
||||
match_case_ty _PyAST_match_case(pattern_ty pattern, expr_ty guard,
|
||||
asdl_stmt_seq * body, PyArena *arena);
|
||||
pattern_ty _PyAST_MatchValue(expr_ty value, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
pattern_ty _PyAST_MatchSingleton(constant value, int lineno, int col_offset,
|
||||
int end_lineno, int end_col_offset, PyArena
|
||||
*arena);
|
||||
pattern_ty _PyAST_MatchSequence(asdl_pattern_seq * patterns, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
pattern_ty _PyAST_MatchMapping(asdl_expr_seq * keys, asdl_pattern_seq *
|
||||
patterns, identifier rest, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
pattern_ty _PyAST_MatchClass(expr_ty cls, asdl_pattern_seq * patterns,
|
||||
asdl_identifier_seq * kwd_attrs, asdl_pattern_seq
|
||||
* kwd_patterns, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
pattern_ty _PyAST_MatchStar(identifier name, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
pattern_ty _PyAST_MatchAs(pattern_ty pattern, identifier name, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
pattern_ty _PyAST_MatchOr(asdl_pattern_seq * patterns, int lineno, int
|
||||
col_offset, int end_lineno, int end_col_offset,
|
||||
PyArena *arena);
|
||||
type_ignore_ty _PyAST_TypeIgnore(int lineno, string tag, PyArena *arena);
|
||||
type_param_ty _PyAST_TypeVar(identifier name, expr_ty bound, expr_ty
|
||||
default_value, int lineno, int col_offset, int
|
||||
end_lineno, int end_col_offset, PyArena *arena);
|
||||
type_param_ty _PyAST_ParamSpec(identifier name, expr_ty default_value, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
type_param_ty _PyAST_TypeVarTuple(identifier name, expr_ty default_value, int
|
||||
lineno, int col_offset, int end_lineno, int
|
||||
end_col_offset, PyArena *arena);
|
||||
|
||||
|
||||
PyObject* PyAST_mod2obj(mod_ty t);
|
||||
mod_ty PyAST_obj2mod(PyObject* ast, PyArena* arena, int mode);
|
||||
int PyAST_Check(PyObject* obj);
|
||||
|
||||
extern int _PyAST_Validate(mod_ty);
|
||||
|
||||
/* _PyAST_ExprAsUnicode is defined in ast_unparse.c */
|
||||
extern PyObject* _PyAST_ExprAsUnicode(expr_ty);
|
||||
|
||||
/* Return the borrowed reference to the first literal string in the
|
||||
sequence of statements or NULL if it doesn't start from a literal string.
|
||||
Doesn't set exception. */
|
||||
extern PyObject* _PyAST_GetDocString(asdl_stmt_seq *);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_AST_H */
|
||||
268
Dependencies/Python/include/internal/pycore_ast_state.h
vendored
Normal file
268
Dependencies/Python/include/internal/pycore_ast_state.h
vendored
Normal file
@@ -0,0 +1,268 @@
|
||||
// File automatically generated by Parser/asdl_c.py.
|
||||
|
||||
#ifndef Py_INTERNAL_AST_STATE_H
|
||||
#define Py_INTERNAL_AST_STATE_H
|
||||
|
||||
#include "pycore_lock.h" // _PyOnceFlag
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
struct ast_state {
|
||||
_PyOnceFlag once;
|
||||
int finalized;
|
||||
PyObject *AST_type;
|
||||
PyObject *Add_singleton;
|
||||
PyObject *Add_type;
|
||||
PyObject *And_singleton;
|
||||
PyObject *And_type;
|
||||
PyObject *AnnAssign_type;
|
||||
PyObject *Assert_type;
|
||||
PyObject *Assign_type;
|
||||
PyObject *AsyncFor_type;
|
||||
PyObject *AsyncFunctionDef_type;
|
||||
PyObject *AsyncWith_type;
|
||||
PyObject *Attribute_type;
|
||||
PyObject *AugAssign_type;
|
||||
PyObject *Await_type;
|
||||
PyObject *BinOp_type;
|
||||
PyObject *BitAnd_singleton;
|
||||
PyObject *BitAnd_type;
|
||||
PyObject *BitOr_singleton;
|
||||
PyObject *BitOr_type;
|
||||
PyObject *BitXor_singleton;
|
||||
PyObject *BitXor_type;
|
||||
PyObject *BoolOp_type;
|
||||
PyObject *Break_type;
|
||||
PyObject *Call_type;
|
||||
PyObject *ClassDef_type;
|
||||
PyObject *Compare_type;
|
||||
PyObject *Constant_type;
|
||||
PyObject *Continue_type;
|
||||
PyObject *Del_singleton;
|
||||
PyObject *Del_type;
|
||||
PyObject *Delete_type;
|
||||
PyObject *DictComp_type;
|
||||
PyObject *Dict_type;
|
||||
PyObject *Div_singleton;
|
||||
PyObject *Div_type;
|
||||
PyObject *Eq_singleton;
|
||||
PyObject *Eq_type;
|
||||
PyObject *ExceptHandler_type;
|
||||
PyObject *Expr_type;
|
||||
PyObject *Expression_type;
|
||||
PyObject *FloorDiv_singleton;
|
||||
PyObject *FloorDiv_type;
|
||||
PyObject *For_type;
|
||||
PyObject *FormattedValue_type;
|
||||
PyObject *FunctionDef_type;
|
||||
PyObject *FunctionType_type;
|
||||
PyObject *GeneratorExp_type;
|
||||
PyObject *Global_type;
|
||||
PyObject *GtE_singleton;
|
||||
PyObject *GtE_type;
|
||||
PyObject *Gt_singleton;
|
||||
PyObject *Gt_type;
|
||||
PyObject *IfExp_type;
|
||||
PyObject *If_type;
|
||||
PyObject *ImportFrom_type;
|
||||
PyObject *Import_type;
|
||||
PyObject *In_singleton;
|
||||
PyObject *In_type;
|
||||
PyObject *Interactive_type;
|
||||
PyObject *Invert_singleton;
|
||||
PyObject *Invert_type;
|
||||
PyObject *IsNot_singleton;
|
||||
PyObject *IsNot_type;
|
||||
PyObject *Is_singleton;
|
||||
PyObject *Is_type;
|
||||
PyObject *JoinedStr_type;
|
||||
PyObject *LShift_singleton;
|
||||
PyObject *LShift_type;
|
||||
PyObject *Lambda_type;
|
||||
PyObject *ListComp_type;
|
||||
PyObject *List_type;
|
||||
PyObject *Load_singleton;
|
||||
PyObject *Load_type;
|
||||
PyObject *LtE_singleton;
|
||||
PyObject *LtE_type;
|
||||
PyObject *Lt_singleton;
|
||||
PyObject *Lt_type;
|
||||
PyObject *MatMult_singleton;
|
||||
PyObject *MatMult_type;
|
||||
PyObject *MatchAs_type;
|
||||
PyObject *MatchClass_type;
|
||||
PyObject *MatchMapping_type;
|
||||
PyObject *MatchOr_type;
|
||||
PyObject *MatchSequence_type;
|
||||
PyObject *MatchSingleton_type;
|
||||
PyObject *MatchStar_type;
|
||||
PyObject *MatchValue_type;
|
||||
PyObject *Match_type;
|
||||
PyObject *Mod_singleton;
|
||||
PyObject *Mod_type;
|
||||
PyObject *Module_type;
|
||||
PyObject *Mult_singleton;
|
||||
PyObject *Mult_type;
|
||||
PyObject *Name_type;
|
||||
PyObject *NamedExpr_type;
|
||||
PyObject *Nonlocal_type;
|
||||
PyObject *NotEq_singleton;
|
||||
PyObject *NotEq_type;
|
||||
PyObject *NotIn_singleton;
|
||||
PyObject *NotIn_type;
|
||||
PyObject *Not_singleton;
|
||||
PyObject *Not_type;
|
||||
PyObject *Or_singleton;
|
||||
PyObject *Or_type;
|
||||
PyObject *ParamSpec_type;
|
||||
PyObject *Pass_type;
|
||||
PyObject *Pow_singleton;
|
||||
PyObject *Pow_type;
|
||||
PyObject *RShift_singleton;
|
||||
PyObject *RShift_type;
|
||||
PyObject *Raise_type;
|
||||
PyObject *Return_type;
|
||||
PyObject *SetComp_type;
|
||||
PyObject *Set_type;
|
||||
PyObject *Slice_type;
|
||||
PyObject *Starred_type;
|
||||
PyObject *Store_singleton;
|
||||
PyObject *Store_type;
|
||||
PyObject *Sub_singleton;
|
||||
PyObject *Sub_type;
|
||||
PyObject *Subscript_type;
|
||||
PyObject *TryStar_type;
|
||||
PyObject *Try_type;
|
||||
PyObject *Tuple_type;
|
||||
PyObject *TypeAlias_type;
|
||||
PyObject *TypeIgnore_type;
|
||||
PyObject *TypeVarTuple_type;
|
||||
PyObject *TypeVar_type;
|
||||
PyObject *UAdd_singleton;
|
||||
PyObject *UAdd_type;
|
||||
PyObject *USub_singleton;
|
||||
PyObject *USub_type;
|
||||
PyObject *UnaryOp_type;
|
||||
PyObject *While_type;
|
||||
PyObject *With_type;
|
||||
PyObject *YieldFrom_type;
|
||||
PyObject *Yield_type;
|
||||
PyObject *__dict__;
|
||||
PyObject *__doc__;
|
||||
PyObject *__match_args__;
|
||||
PyObject *__module__;
|
||||
PyObject *_attributes;
|
||||
PyObject *_fields;
|
||||
PyObject *alias_type;
|
||||
PyObject *annotation;
|
||||
PyObject *arg;
|
||||
PyObject *arg_type;
|
||||
PyObject *args;
|
||||
PyObject *argtypes;
|
||||
PyObject *arguments_type;
|
||||
PyObject *asname;
|
||||
PyObject *ast;
|
||||
PyObject *attr;
|
||||
PyObject *bases;
|
||||
PyObject *body;
|
||||
PyObject *boolop_type;
|
||||
PyObject *bound;
|
||||
PyObject *cases;
|
||||
PyObject *cause;
|
||||
PyObject *cls;
|
||||
PyObject *cmpop_type;
|
||||
PyObject *col_offset;
|
||||
PyObject *comparators;
|
||||
PyObject *comprehension_type;
|
||||
PyObject *context_expr;
|
||||
PyObject *conversion;
|
||||
PyObject *ctx;
|
||||
PyObject *decorator_list;
|
||||
PyObject *default_value;
|
||||
PyObject *defaults;
|
||||
PyObject *elt;
|
||||
PyObject *elts;
|
||||
PyObject *end_col_offset;
|
||||
PyObject *end_lineno;
|
||||
PyObject *exc;
|
||||
PyObject *excepthandler_type;
|
||||
PyObject *expr_context_type;
|
||||
PyObject *expr_type;
|
||||
PyObject *finalbody;
|
||||
PyObject *format_spec;
|
||||
PyObject *func;
|
||||
PyObject *generators;
|
||||
PyObject *guard;
|
||||
PyObject *handlers;
|
||||
PyObject *id;
|
||||
PyObject *ifs;
|
||||
PyObject *is_async;
|
||||
PyObject *items;
|
||||
PyObject *iter;
|
||||
PyObject *key;
|
||||
PyObject *keys;
|
||||
PyObject *keyword_type;
|
||||
PyObject *keywords;
|
||||
PyObject *kind;
|
||||
PyObject *kw_defaults;
|
||||
PyObject *kwarg;
|
||||
PyObject *kwd_attrs;
|
||||
PyObject *kwd_patterns;
|
||||
PyObject *kwonlyargs;
|
||||
PyObject *left;
|
||||
PyObject *level;
|
||||
PyObject *lineno;
|
||||
PyObject *lower;
|
||||
PyObject *match_case_type;
|
||||
PyObject *mod_type;
|
||||
PyObject *module;
|
||||
PyObject *msg;
|
||||
PyObject *name;
|
||||
PyObject *names;
|
||||
PyObject *op;
|
||||
PyObject *operand;
|
||||
PyObject *operator_type;
|
||||
PyObject *ops;
|
||||
PyObject *optional_vars;
|
||||
PyObject *orelse;
|
||||
PyObject *pattern;
|
||||
PyObject *pattern_type;
|
||||
PyObject *patterns;
|
||||
PyObject *posonlyargs;
|
||||
PyObject *rest;
|
||||
PyObject *returns;
|
||||
PyObject *right;
|
||||
PyObject *simple;
|
||||
PyObject *slice;
|
||||
PyObject *step;
|
||||
PyObject *stmt_type;
|
||||
PyObject *subject;
|
||||
PyObject *tag;
|
||||
PyObject *target;
|
||||
PyObject *targets;
|
||||
PyObject *test;
|
||||
PyObject *type;
|
||||
PyObject *type_comment;
|
||||
PyObject *type_ignore_type;
|
||||
PyObject *type_ignores;
|
||||
PyObject *type_param_type;
|
||||
PyObject *type_params;
|
||||
PyObject *unaryop_type;
|
||||
PyObject *upper;
|
||||
PyObject *value;
|
||||
PyObject *values;
|
||||
PyObject *vararg;
|
||||
PyObject *withitem_type;
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_AST_STATE_H */
|
||||
|
||||
67
Dependencies/Python/include/internal/pycore_atexit.h
vendored
Normal file
67
Dependencies/Python/include/internal/pycore_atexit.h
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
#ifndef Py_INTERNAL_ATEXIT_H
|
||||
#define Py_INTERNAL_ATEXIT_H
|
||||
|
||||
#include "pycore_lock.h" // PyMutex
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
//###############
|
||||
// runtime atexit
|
||||
|
||||
typedef void (*atexit_callbackfunc)(void);
|
||||
|
||||
struct _atexit_runtime_state {
|
||||
PyMutex mutex;
|
||||
#define NEXITFUNCS 32
|
||||
atexit_callbackfunc callbacks[NEXITFUNCS];
|
||||
int ncallbacks;
|
||||
};
|
||||
|
||||
|
||||
//###################
|
||||
// interpreter atexit
|
||||
|
||||
typedef void (*atexit_datacallbackfunc)(void *);
|
||||
|
||||
typedef struct atexit_callback {
|
||||
atexit_datacallbackfunc func;
|
||||
void *data;
|
||||
struct atexit_callback *next;
|
||||
} atexit_callback;
|
||||
|
||||
typedef struct {
|
||||
PyObject *func;
|
||||
PyObject *args;
|
||||
PyObject *kwargs;
|
||||
} atexit_py_callback;
|
||||
|
||||
struct atexit_state {
|
||||
atexit_callback *ll_callbacks;
|
||||
// Kept for ABI compatibility--do not use! (See GH-127791.)
|
||||
atexit_callback *last_ll_callback;
|
||||
|
||||
// XXX The rest of the state could be moved to the atexit module state
|
||||
// and a low-level callback added for it during module exec.
|
||||
// For the moment we leave it here.
|
||||
atexit_py_callback **callbacks;
|
||||
int ncallbacks;
|
||||
int callback_len;
|
||||
};
|
||||
|
||||
// Export for '_interpchannels' shared extension
|
||||
PyAPI_FUNC(int) _Py_AtExit(
|
||||
PyInterpreterState *interp,
|
||||
atexit_datacallbackfunc func,
|
||||
void *data);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_ATEXIT_H */
|
||||
145
Dependencies/Python/include/internal/pycore_backoff.h
vendored
Normal file
145
Dependencies/Python/include/internal/pycore_backoff.h
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
|
||||
#ifndef Py_INTERNAL_BACKOFF_H
|
||||
#define Py_INTERNAL_BACKOFF_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
struct {
|
||||
uint16_t backoff : 4;
|
||||
uint16_t value : 12;
|
||||
};
|
||||
uint16_t as_counter; // For printf("%#x", ...)
|
||||
};
|
||||
} _Py_BackoffCounter;
|
||||
|
||||
|
||||
/* 16-bit countdown counters using exponential backoff.
|
||||
|
||||
These are used by the adaptive specializer to count down until
|
||||
it is time to specialize an instruction. If specialization fails
|
||||
the counter is reset using exponential backoff.
|
||||
|
||||
Another use is for the Tier 2 optimizer to decide when to create
|
||||
a new Tier 2 trace (executor). Again, exponential backoff is used.
|
||||
|
||||
The 16-bit counter is structured as a 12-bit unsigned 'value'
|
||||
and a 4-bit 'backoff' field. When resetting the counter, the
|
||||
backoff field is incremented (until it reaches a limit) and the
|
||||
value is set to a bit mask representing the value 2**backoff - 1.
|
||||
The maximum backoff is 12 (the number of value bits).
|
||||
|
||||
There is an exceptional value which must not be updated, 0xFFFF.
|
||||
*/
|
||||
|
||||
#define UNREACHABLE_BACKOFF 0xFFFF
|
||||
|
||||
static inline bool
|
||||
is_unreachable_backoff_counter(_Py_BackoffCounter counter)
|
||||
{
|
||||
return counter.as_counter == UNREACHABLE_BACKOFF;
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
make_backoff_counter(uint16_t value, uint16_t backoff)
|
||||
{
|
||||
assert(backoff <= 15);
|
||||
assert(value <= 0xFFF);
|
||||
_Py_BackoffCounter result;
|
||||
result.value = value;
|
||||
result.backoff = backoff;
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
forge_backoff_counter(uint16_t counter)
|
||||
{
|
||||
_Py_BackoffCounter result;
|
||||
result.as_counter = counter;
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
restart_backoff_counter(_Py_BackoffCounter counter)
|
||||
{
|
||||
assert(!is_unreachable_backoff_counter(counter));
|
||||
if (counter.backoff < 12) {
|
||||
return make_backoff_counter((1 << (counter.backoff + 1)) - 1, counter.backoff + 1);
|
||||
}
|
||||
else {
|
||||
return make_backoff_counter((1 << 12) - 1, 12);
|
||||
}
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
pause_backoff_counter(_Py_BackoffCounter counter)
|
||||
{
|
||||
return make_backoff_counter(counter.value | 1, counter.backoff);
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
advance_backoff_counter(_Py_BackoffCounter counter)
|
||||
{
|
||||
if (!is_unreachable_backoff_counter(counter)) {
|
||||
return make_backoff_counter((counter.value - 1) & 0xFFF, counter.backoff);
|
||||
}
|
||||
else {
|
||||
return counter;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool
|
||||
backoff_counter_triggers(_Py_BackoffCounter counter)
|
||||
{
|
||||
return counter.value == 0;
|
||||
}
|
||||
|
||||
/* Initial JUMP_BACKWARD counter.
|
||||
* This determines when we create a trace for a loop.
|
||||
* Backoff sequence 16, 32, 64, 128, 256, 512, 1024, 2048, 4096. */
|
||||
#define JUMP_BACKWARD_INITIAL_VALUE 16
|
||||
#define JUMP_BACKWARD_INITIAL_BACKOFF 4
|
||||
static inline _Py_BackoffCounter
|
||||
initial_jump_backoff_counter(void)
|
||||
{
|
||||
return make_backoff_counter(JUMP_BACKWARD_INITIAL_VALUE,
|
||||
JUMP_BACKWARD_INITIAL_BACKOFF);
|
||||
}
|
||||
|
||||
/* Initial exit temperature.
|
||||
* Must be larger than ADAPTIVE_COOLDOWN_VALUE,
|
||||
* otherwise when a side exit warms up we may construct
|
||||
* a new trace before the Tier 1 code has properly re-specialized.
|
||||
* Backoff sequence 64, 128, 256, 512, 1024, 2048, 4096. */
|
||||
#define COLD_EXIT_INITIAL_VALUE 64
|
||||
#define COLD_EXIT_INITIAL_BACKOFF 6
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
initial_temperature_backoff_counter(void)
|
||||
{
|
||||
return make_backoff_counter(COLD_EXIT_INITIAL_VALUE,
|
||||
COLD_EXIT_INITIAL_BACKOFF);
|
||||
}
|
||||
|
||||
/* Unreachable backoff counter. */
|
||||
static inline _Py_BackoffCounter
|
||||
initial_unreachable_backoff_counter(void)
|
||||
{
|
||||
return forge_backoff_counter(UNREACHABLE_BACKOFF);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_BACKOFF_H */
|
||||
186
Dependencies/Python/include/internal/pycore_bitutils.h
vendored
Normal file
186
Dependencies/Python/include/internal/pycore_bitutils.h
vendored
Normal file
@@ -0,0 +1,186 @@
|
||||
/* Bit and bytes utilities.
|
||||
|
||||
Bytes swap functions, reverse order of bytes:
|
||||
|
||||
- _Py_bswap16(uint16_t)
|
||||
- _Py_bswap32(uint32_t)
|
||||
- _Py_bswap64(uint64_t)
|
||||
*/
|
||||
|
||||
#ifndef Py_INTERNAL_BITUTILS_H
|
||||
#define Py_INTERNAL_BITUTILS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) \
|
||||
&& ((__GNUC__ >= 5) || (__GNUC__ == 4) && (__GNUC_MINOR__ >= 8))
|
||||
/* __builtin_bswap16() is available since GCC 4.8,
|
||||
__builtin_bswap32() is available since GCC 4.3,
|
||||
__builtin_bswap64() is available since GCC 4.3. */
|
||||
# define _PY_HAVE_BUILTIN_BSWAP
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
# include <intrin.h> // _byteswap_uint64()
|
||||
#endif
|
||||
|
||||
|
||||
static inline uint16_t
|
||||
_Py_bswap16(uint16_t word)
|
||||
{
|
||||
#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap16)
|
||||
return __builtin_bswap16(word);
|
||||
#elif defined(_MSC_VER)
|
||||
Py_BUILD_ASSERT(sizeof(word) == sizeof(unsigned short));
|
||||
return _byteswap_ushort(word);
|
||||
#else
|
||||
// Portable implementation which doesn't rely on circular bit shift
|
||||
return ( ((word & UINT16_C(0x00FF)) << 8)
|
||||
| ((word & UINT16_C(0xFF00)) >> 8));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
_Py_bswap32(uint32_t word)
|
||||
{
|
||||
#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap32)
|
||||
return __builtin_bswap32(word);
|
||||
#elif defined(_MSC_VER)
|
||||
Py_BUILD_ASSERT(sizeof(word) == sizeof(unsigned long));
|
||||
return _byteswap_ulong(word);
|
||||
#else
|
||||
// Portable implementation which doesn't rely on circular bit shift
|
||||
return ( ((word & UINT32_C(0x000000FF)) << 24)
|
||||
| ((word & UINT32_C(0x0000FF00)) << 8)
|
||||
| ((word & UINT32_C(0x00FF0000)) >> 8)
|
||||
| ((word & UINT32_C(0xFF000000)) >> 24));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
_Py_bswap64(uint64_t word)
|
||||
{
|
||||
#if defined(_PY_HAVE_BUILTIN_BSWAP) || _Py__has_builtin(__builtin_bswap64)
|
||||
return __builtin_bswap64(word);
|
||||
#elif defined(_MSC_VER)
|
||||
return _byteswap_uint64(word);
|
||||
#else
|
||||
// Portable implementation which doesn't rely on circular bit shift
|
||||
return ( ((word & UINT64_C(0x00000000000000FF)) << 56)
|
||||
| ((word & UINT64_C(0x000000000000FF00)) << 40)
|
||||
| ((word & UINT64_C(0x0000000000FF0000)) << 24)
|
||||
| ((word & UINT64_C(0x00000000FF000000)) << 8)
|
||||
| ((word & UINT64_C(0x000000FF00000000)) >> 8)
|
||||
| ((word & UINT64_C(0x0000FF0000000000)) >> 24)
|
||||
| ((word & UINT64_C(0x00FF000000000000)) >> 40)
|
||||
| ((word & UINT64_C(0xFF00000000000000)) >> 56));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// Population count: count the number of 1's in 'x'
|
||||
// (number of bits set to 1), also known as the hamming weight.
|
||||
//
|
||||
// Implementation note. CPUID is not used, to test if x86 POPCNT instruction
|
||||
// can be used, to keep the implementation simple. For example, Visual Studio
|
||||
// __popcnt() is not used this reason. The clang and GCC builtin function can
|
||||
// use the x86 POPCNT instruction if the target architecture has SSE4a or
|
||||
// newer.
|
||||
static inline int
|
||||
_Py_popcount32(uint32_t x)
|
||||
{
|
||||
#if (defined(__clang__) || defined(__GNUC__))
|
||||
|
||||
#if SIZEOF_INT >= 4
|
||||
Py_BUILD_ASSERT(sizeof(x) <= sizeof(unsigned int));
|
||||
return __builtin_popcount(x);
|
||||
#else
|
||||
// The C standard guarantees that unsigned long will always be big enough
|
||||
// to hold a uint32_t value without losing information.
|
||||
Py_BUILD_ASSERT(sizeof(x) <= sizeof(unsigned long));
|
||||
return __builtin_popcountl(x);
|
||||
#endif
|
||||
|
||||
#else
|
||||
// 32-bit SWAR (SIMD Within A Register) popcount
|
||||
|
||||
// Binary: 0 1 0 1 ...
|
||||
const uint32_t M1 = 0x55555555;
|
||||
// Binary: 00 11 00 11. ..
|
||||
const uint32_t M2 = 0x33333333;
|
||||
// Binary: 0000 1111 0000 1111 ...
|
||||
const uint32_t M4 = 0x0F0F0F0F;
|
||||
|
||||
// Put count of each 2 bits into those 2 bits
|
||||
x = x - ((x >> 1) & M1);
|
||||
// Put count of each 4 bits into those 4 bits
|
||||
x = (x & M2) + ((x >> 2) & M2);
|
||||
// Put count of each 8 bits into those 8 bits
|
||||
x = (x + (x >> 4)) & M4;
|
||||
// Sum of the 4 byte counts.
|
||||
// Take care when considering changes to the next line. Portability and
|
||||
// correctness are delicate here, thanks to C's "integer promotions" (C99
|
||||
// §6.3.1.1p2). On machines where the `int` type has width greater than 32
|
||||
// bits, `x` will be promoted to an `int`, and following C's "usual
|
||||
// arithmetic conversions" (C99 §6.3.1.8), the multiplication will be
|
||||
// performed as a multiplication of two `unsigned int` operands. In this
|
||||
// case it's critical that we cast back to `uint32_t` in order to keep only
|
||||
// the least significant 32 bits. On machines where the `int` type has
|
||||
// width no greater than 32, the multiplication is of two 32-bit unsigned
|
||||
// integer types, and the (uint32_t) cast is a no-op. In both cases, we
|
||||
// avoid the risk of undefined behaviour due to overflow of a
|
||||
// multiplication of signed integer types.
|
||||
return (uint32_t)(x * 0x01010101U) >> 24;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// Return the index of the most significant 1 bit in 'x'. This is the smallest
|
||||
// integer k such that x < 2**k. Equivalent to floor(log2(x)) + 1 for x != 0.
|
||||
static inline int
|
||||
_Py_bit_length(unsigned long x)
|
||||
{
|
||||
#if (defined(__clang__) || defined(__GNUC__))
|
||||
if (x != 0) {
|
||||
// __builtin_clzl() is available since GCC 3.4.
|
||||
// Undefined behavior for x == 0.
|
||||
return (int)sizeof(unsigned long) * 8 - __builtin_clzl(x);
|
||||
}
|
||||
else {
|
||||
return 0;
|
||||
}
|
||||
#elif defined(_MSC_VER)
|
||||
// _BitScanReverse() is documented to search 32 bits.
|
||||
Py_BUILD_ASSERT(sizeof(unsigned long) <= 4);
|
||||
unsigned long msb;
|
||||
if (_BitScanReverse(&msb, x)) {
|
||||
return (int)msb + 1;
|
||||
}
|
||||
else {
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
const int BIT_LENGTH_TABLE[32] = {
|
||||
0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
|
||||
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
|
||||
};
|
||||
int msb = 0;
|
||||
while (x >= 32) {
|
||||
msb += 6;
|
||||
x >>= 6;
|
||||
}
|
||||
msb += BIT_LENGTH_TABLE[x];
|
||||
return msb;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_BITUTILS_H */
|
||||
321
Dependencies/Python/include/internal/pycore_blocks_output_buffer.h
vendored
Normal file
321
Dependencies/Python/include/internal/pycore_blocks_output_buffer.h
vendored
Normal file
@@ -0,0 +1,321 @@
|
||||
/*
|
||||
_BlocksOutputBuffer is used to maintain an output buffer
|
||||
that has unpredictable size. Suitable for compression/decompression
|
||||
API (bz2/lzma/zlib) that has stream->next_out and stream->avail_out:
|
||||
|
||||
stream->next_out: point to the next output position.
|
||||
stream->avail_out: the number of available bytes left in the buffer.
|
||||
|
||||
It maintains a list of bytes object, so there is no overhead of resizing
|
||||
the buffer.
|
||||
|
||||
Usage:
|
||||
|
||||
1, Initialize the struct instance like this:
|
||||
_BlocksOutputBuffer buffer = {.list = NULL};
|
||||
Set .list to NULL for _BlocksOutputBuffer_OnError()
|
||||
|
||||
2, Initialize the buffer use one of these functions:
|
||||
_BlocksOutputBuffer_InitAndGrow()
|
||||
_BlocksOutputBuffer_InitWithSize()
|
||||
|
||||
3, If (avail_out == 0), grow the buffer:
|
||||
_BlocksOutputBuffer_Grow()
|
||||
|
||||
4, Get the current outputted data size:
|
||||
_BlocksOutputBuffer_GetDataSize()
|
||||
|
||||
5, Finish the buffer, and return a bytes object:
|
||||
_BlocksOutputBuffer_Finish()
|
||||
|
||||
6, Clean up the buffer when an error occurred:
|
||||
_BlocksOutputBuffer_OnError()
|
||||
*/
|
||||
|
||||
#ifndef Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H
|
||||
#define Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "Python.h"
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
// List of bytes objects
|
||||
PyObject *list;
|
||||
// Number of whole allocated size
|
||||
Py_ssize_t allocated;
|
||||
// Max length of the buffer, negative number means unlimited length.
|
||||
Py_ssize_t max_length;
|
||||
} _BlocksOutputBuffer;
|
||||
|
||||
static const char unable_allocate_msg[] = "Unable to allocate output buffer.";
|
||||
|
||||
/* In 32-bit build, the max block size should <= INT32_MAX. */
|
||||
#define OUTPUT_BUFFER_MAX_BLOCK_SIZE (256*1024*1024)
|
||||
|
||||
/* Block size sequence */
|
||||
#define KB (1024)
|
||||
#define MB (1024*1024)
|
||||
static const Py_ssize_t BUFFER_BLOCK_SIZE[] =
|
||||
{ 32*KB, 64*KB, 256*KB, 1*MB, 4*MB, 8*MB, 16*MB, 16*MB,
|
||||
32*MB, 32*MB, 32*MB, 32*MB, 64*MB, 64*MB, 128*MB, 128*MB,
|
||||
OUTPUT_BUFFER_MAX_BLOCK_SIZE };
|
||||
#undef KB
|
||||
#undef MB
|
||||
|
||||
/* According to the block sizes defined by BUFFER_BLOCK_SIZE, the whole
|
||||
allocated size growth step is:
|
||||
1 32 KB +32 KB
|
||||
2 96 KB +64 KB
|
||||
3 352 KB +256 KB
|
||||
4 1.34 MB +1 MB
|
||||
5 5.34 MB +4 MB
|
||||
6 13.34 MB +8 MB
|
||||
7 29.34 MB +16 MB
|
||||
8 45.34 MB +16 MB
|
||||
9 77.34 MB +32 MB
|
||||
10 109.34 MB +32 MB
|
||||
11 141.34 MB +32 MB
|
||||
12 173.34 MB +32 MB
|
||||
13 237.34 MB +64 MB
|
||||
14 301.34 MB +64 MB
|
||||
15 429.34 MB +128 MB
|
||||
16 557.34 MB +128 MB
|
||||
17 813.34 MB +256 MB
|
||||
18 1069.34 MB +256 MB
|
||||
19 1325.34 MB +256 MB
|
||||
20 1581.34 MB +256 MB
|
||||
21 1837.34 MB +256 MB
|
||||
22 2093.34 MB +256 MB
|
||||
...
|
||||
*/
|
||||
|
||||
/* Initialize the buffer, and grow the buffer.
|
||||
|
||||
max_length: Max length of the buffer, -1 for unlimited length.
|
||||
|
||||
On success, return allocated size (>=0)
|
||||
On failure, return -1
|
||||
*/
|
||||
static inline Py_ssize_t
|
||||
_BlocksOutputBuffer_InitAndGrow(_BlocksOutputBuffer *buffer,
|
||||
const Py_ssize_t max_length,
|
||||
void **next_out)
|
||||
{
|
||||
PyObject *b;
|
||||
Py_ssize_t block_size;
|
||||
|
||||
// ensure .list was set to NULL
|
||||
assert(buffer->list == NULL);
|
||||
|
||||
// get block size
|
||||
if (0 <= max_length && max_length < BUFFER_BLOCK_SIZE[0]) {
|
||||
block_size = max_length;
|
||||
} else {
|
||||
block_size = BUFFER_BLOCK_SIZE[0];
|
||||
}
|
||||
|
||||
// the first block
|
||||
b = PyBytes_FromStringAndSize(NULL, block_size);
|
||||
if (b == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// create the list
|
||||
buffer->list = PyList_New(1);
|
||||
if (buffer->list == NULL) {
|
||||
Py_DECREF(b);
|
||||
return -1;
|
||||
}
|
||||
PyList_SET_ITEM(buffer->list, 0, b);
|
||||
|
||||
// set variables
|
||||
buffer->allocated = block_size;
|
||||
buffer->max_length = max_length;
|
||||
|
||||
*next_out = PyBytes_AS_STRING(b);
|
||||
return block_size;
|
||||
}
|
||||
|
||||
/* Initialize the buffer, with an initial size.
|
||||
|
||||
Check block size limit in the outer wrapper function. For example, some libs
|
||||
accept UINT32_MAX as the maximum block size, then init_size should <= it.
|
||||
|
||||
On success, return allocated size (>=0)
|
||||
On failure, return -1
|
||||
*/
|
||||
static inline Py_ssize_t
|
||||
_BlocksOutputBuffer_InitWithSize(_BlocksOutputBuffer *buffer,
|
||||
const Py_ssize_t init_size,
|
||||
void **next_out)
|
||||
{
|
||||
PyObject *b;
|
||||
|
||||
// ensure .list was set to NULL
|
||||
assert(buffer->list == NULL);
|
||||
|
||||
// the first block
|
||||
b = PyBytes_FromStringAndSize(NULL, init_size);
|
||||
if (b == NULL) {
|
||||
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// create the list
|
||||
buffer->list = PyList_New(1);
|
||||
if (buffer->list == NULL) {
|
||||
Py_DECREF(b);
|
||||
return -1;
|
||||
}
|
||||
PyList_SET_ITEM(buffer->list, 0, b);
|
||||
|
||||
// set variables
|
||||
buffer->allocated = init_size;
|
||||
buffer->max_length = -1;
|
||||
|
||||
*next_out = PyBytes_AS_STRING(b);
|
||||
return init_size;
|
||||
}
|
||||
|
||||
/* Grow the buffer. The avail_out must be 0, please check it before calling.
|
||||
|
||||
On success, return allocated size (>=0)
|
||||
On failure, return -1
|
||||
*/
|
||||
static inline Py_ssize_t
|
||||
_BlocksOutputBuffer_Grow(_BlocksOutputBuffer *buffer,
|
||||
void **next_out,
|
||||
const Py_ssize_t avail_out)
|
||||
{
|
||||
PyObject *b;
|
||||
const Py_ssize_t list_len = Py_SIZE(buffer->list);
|
||||
Py_ssize_t block_size;
|
||||
|
||||
// ensure no gaps in the data
|
||||
if (avail_out != 0) {
|
||||
PyErr_SetString(PyExc_SystemError,
|
||||
"avail_out is non-zero in _BlocksOutputBuffer_Grow().");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// get block size
|
||||
if (list_len < (Py_ssize_t) Py_ARRAY_LENGTH(BUFFER_BLOCK_SIZE)) {
|
||||
block_size = BUFFER_BLOCK_SIZE[list_len];
|
||||
} else {
|
||||
block_size = BUFFER_BLOCK_SIZE[Py_ARRAY_LENGTH(BUFFER_BLOCK_SIZE) - 1];
|
||||
}
|
||||
|
||||
// check max_length
|
||||
if (buffer->max_length >= 0) {
|
||||
// if (rest == 0), should not grow the buffer.
|
||||
Py_ssize_t rest = buffer->max_length - buffer->allocated;
|
||||
assert(rest > 0);
|
||||
|
||||
// block_size of the last block
|
||||
if (block_size > rest) {
|
||||
block_size = rest;
|
||||
}
|
||||
}
|
||||
|
||||
// check buffer->allocated overflow
|
||||
if (block_size > PY_SSIZE_T_MAX - buffer->allocated) {
|
||||
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// create the block
|
||||
b = PyBytes_FromStringAndSize(NULL, block_size);
|
||||
if (b == NULL) {
|
||||
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
|
||||
return -1;
|
||||
}
|
||||
if (PyList_Append(buffer->list, b) < 0) {
|
||||
Py_DECREF(b);
|
||||
return -1;
|
||||
}
|
||||
Py_DECREF(b);
|
||||
|
||||
// set variables
|
||||
buffer->allocated += block_size;
|
||||
|
||||
*next_out = PyBytes_AS_STRING(b);
|
||||
return block_size;
|
||||
}
|
||||
|
||||
/* Return the current outputted data size. */
|
||||
static inline Py_ssize_t
|
||||
_BlocksOutputBuffer_GetDataSize(_BlocksOutputBuffer *buffer,
|
||||
const Py_ssize_t avail_out)
|
||||
{
|
||||
return buffer->allocated - avail_out;
|
||||
}
|
||||
|
||||
/* Finish the buffer.
|
||||
|
||||
Return a bytes object on success
|
||||
Return NULL on failure
|
||||
*/
|
||||
static inline PyObject *
|
||||
_BlocksOutputBuffer_Finish(_BlocksOutputBuffer *buffer,
|
||||
const Py_ssize_t avail_out)
|
||||
{
|
||||
PyObject *result, *block;
|
||||
const Py_ssize_t list_len = Py_SIZE(buffer->list);
|
||||
|
||||
// fast path for single block
|
||||
if ((list_len == 1 && avail_out == 0) ||
|
||||
(list_len == 2 && Py_SIZE(PyList_GET_ITEM(buffer->list, 1)) == avail_out))
|
||||
{
|
||||
block = PyList_GET_ITEM(buffer->list, 0);
|
||||
Py_INCREF(block);
|
||||
|
||||
Py_CLEAR(buffer->list);
|
||||
return block;
|
||||
}
|
||||
|
||||
// final bytes object
|
||||
result = PyBytes_FromStringAndSize(NULL, buffer->allocated - avail_out);
|
||||
if (result == NULL) {
|
||||
PyErr_SetString(PyExc_MemoryError, unable_allocate_msg);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// memory copy
|
||||
if (list_len > 0) {
|
||||
char *posi = PyBytes_AS_STRING(result);
|
||||
|
||||
// blocks except the last one
|
||||
Py_ssize_t i = 0;
|
||||
for (; i < list_len-1; i++) {
|
||||
block = PyList_GET_ITEM(buffer->list, i);
|
||||
memcpy(posi, PyBytes_AS_STRING(block), Py_SIZE(block));
|
||||
posi += Py_SIZE(block);
|
||||
}
|
||||
// the last block
|
||||
block = PyList_GET_ITEM(buffer->list, i);
|
||||
memcpy(posi, PyBytes_AS_STRING(block), Py_SIZE(block) - avail_out);
|
||||
} else {
|
||||
assert(Py_SIZE(result) == 0);
|
||||
}
|
||||
|
||||
Py_CLEAR(buffer->list);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Clean up the buffer when an error occurred. */
|
||||
static inline void
|
||||
_BlocksOutputBuffer_OnError(_BlocksOutputBuffer *buffer)
|
||||
{
|
||||
Py_CLEAR(buffer->list);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* Py_INTERNAL_BLOCKS_OUTPUT_BUFFER_H */
|
||||
74
Dependencies/Python/include/internal/pycore_brc.h
vendored
Normal file
74
Dependencies/Python/include/internal/pycore_brc.h
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
#ifndef Py_INTERNAL_BRC_H
|
||||
#define Py_INTERNAL_BRC_H
|
||||
|
||||
#include <stdint.h>
|
||||
#include "pycore_llist.h" // struct llist_node
|
||||
#include "pycore_lock.h" // PyMutex
|
||||
#include "pycore_object_stack.h" // _PyObjectStack
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
// Prime number to avoid correlations with memory addresses.
|
||||
#define _Py_BRC_NUM_BUCKETS 257
|
||||
|
||||
// Hash table bucket
|
||||
struct _brc_bucket {
|
||||
// Mutex protects both the bucket and thread state queues in this bucket.
|
||||
PyMutex mutex;
|
||||
|
||||
// Linked list of _PyThreadStateImpl objects hashed to this bucket.
|
||||
struct llist_node root;
|
||||
};
|
||||
|
||||
// Per-interpreter biased reference counting state
|
||||
struct _brc_state {
|
||||
// Hash table of thread states by thread-id. Thread states within a bucket
|
||||
// are chained using a doubly-linked list.
|
||||
struct _brc_bucket table[_Py_BRC_NUM_BUCKETS];
|
||||
};
|
||||
|
||||
// Per-thread biased reference counting state
|
||||
struct _brc_thread_state {
|
||||
// Linked-list of thread states per hash bucket
|
||||
struct llist_node bucket_node;
|
||||
|
||||
// Thread-id as determined by _PyThread_Id()
|
||||
uintptr_t tid;
|
||||
|
||||
// Objects with refcounts to be merged (protected by bucket mutex)
|
||||
_PyObjectStack objects_to_merge;
|
||||
|
||||
// Local stack of objects to be merged (not accessed by other threads)
|
||||
_PyObjectStack local_objects_to_merge;
|
||||
};
|
||||
|
||||
// Initialize/finalize the per-thread biased reference counting state
|
||||
void _Py_brc_init_thread(PyThreadState *tstate);
|
||||
void _Py_brc_remove_thread(PyThreadState *tstate);
|
||||
|
||||
// Initialize per-interpreter state
|
||||
void _Py_brc_init_state(PyInterpreterState *interp);
|
||||
|
||||
void _Py_brc_after_fork(PyInterpreterState *interp);
|
||||
|
||||
// Enqueues an object to be merged by it's owning thread (tid). This
|
||||
// steals a reference to the object.
|
||||
void _Py_brc_queue_object(PyObject *ob);
|
||||
|
||||
// Merge the refcounts of queued objects for the current thread.
|
||||
void _Py_brc_merge_refcounts(PyThreadState *tstate);
|
||||
|
||||
#endif /* Py_GIL_DISABLED */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_BRC_H */
|
||||
82
Dependencies/Python/include/internal/pycore_bytes_methods.h
vendored
Normal file
82
Dependencies/Python/include/internal/pycore_bytes_methods.h
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
#ifndef Py_LIMITED_API
|
||||
#ifndef Py_BYTES_CTYPE_H
|
||||
#define Py_BYTES_CTYPE_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The internal implementation behind PyBytes (bytes) and PyByteArray (bytearray)
|
||||
* methods of the given names, they operate on ASCII byte strings.
|
||||
*/
|
||||
extern PyObject* _Py_bytes_isspace(const char *cptr, Py_ssize_t len);
|
||||
extern PyObject* _Py_bytes_isalpha(const char *cptr, Py_ssize_t len);
|
||||
extern PyObject* _Py_bytes_isalnum(const char *cptr, Py_ssize_t len);
|
||||
extern PyObject* _Py_bytes_isascii(const char *cptr, Py_ssize_t len);
|
||||
extern PyObject* _Py_bytes_isdigit(const char *cptr, Py_ssize_t len);
|
||||
extern PyObject* _Py_bytes_islower(const char *cptr, Py_ssize_t len);
|
||||
extern PyObject* _Py_bytes_isupper(const char *cptr, Py_ssize_t len);
|
||||
extern PyObject* _Py_bytes_istitle(const char *cptr, Py_ssize_t len);
|
||||
|
||||
/* These store their len sized answer in the given preallocated *result arg. */
|
||||
extern void _Py_bytes_lower(char *result, const char *cptr, Py_ssize_t len);
|
||||
extern void _Py_bytes_upper(char *result, const char *cptr, Py_ssize_t len);
|
||||
extern void _Py_bytes_title(char *result, const char *s, Py_ssize_t len);
|
||||
extern void _Py_bytes_capitalize(char *result, const char *s, Py_ssize_t len);
|
||||
extern void _Py_bytes_swapcase(char *result, const char *s, Py_ssize_t len);
|
||||
|
||||
extern PyObject *_Py_bytes_find(const char *str, Py_ssize_t len, PyObject *sub,
|
||||
Py_ssize_t start, Py_ssize_t end);
|
||||
extern PyObject *_Py_bytes_index(const char *str, Py_ssize_t len, PyObject *sub,
|
||||
Py_ssize_t start, Py_ssize_t end);
|
||||
extern PyObject *_Py_bytes_rfind(const char *str, Py_ssize_t len, PyObject *sub,
|
||||
Py_ssize_t start, Py_ssize_t end);
|
||||
extern PyObject *_Py_bytes_rindex(const char *str, Py_ssize_t len, PyObject *sub,
|
||||
Py_ssize_t start, Py_ssize_t end);
|
||||
extern PyObject *_Py_bytes_count(const char *str, Py_ssize_t len, PyObject *sub,
|
||||
Py_ssize_t start, Py_ssize_t end);
|
||||
extern int _Py_bytes_contains(const char *str, Py_ssize_t len, PyObject *arg);
|
||||
extern PyObject *_Py_bytes_startswith(const char *str, Py_ssize_t len,
|
||||
PyObject *subobj, Py_ssize_t start,
|
||||
Py_ssize_t end);
|
||||
extern PyObject *_Py_bytes_endswith(const char *str, Py_ssize_t len,
|
||||
PyObject *subobj, Py_ssize_t start,
|
||||
Py_ssize_t end);
|
||||
|
||||
/* The maketrans() static method. */
|
||||
extern PyObject* _Py_bytes_maketrans(Py_buffer *frm, Py_buffer *to);
|
||||
|
||||
/* Shared __doc__ strings. */
|
||||
extern const char _Py_isspace__doc__[];
|
||||
extern const char _Py_isalpha__doc__[];
|
||||
extern const char _Py_isalnum__doc__[];
|
||||
extern const char _Py_isascii__doc__[];
|
||||
extern const char _Py_isdigit__doc__[];
|
||||
extern const char _Py_islower__doc__[];
|
||||
extern const char _Py_isupper__doc__[];
|
||||
extern const char _Py_istitle__doc__[];
|
||||
extern const char _Py_lower__doc__[];
|
||||
extern const char _Py_upper__doc__[];
|
||||
extern const char _Py_title__doc__[];
|
||||
extern const char _Py_capitalize__doc__[];
|
||||
extern const char _Py_swapcase__doc__[];
|
||||
extern const char _Py_count__doc__[];
|
||||
extern const char _Py_find__doc__[];
|
||||
extern const char _Py_index__doc__[];
|
||||
extern const char _Py_rfind__doc__[];
|
||||
extern const char _Py_rindex__doc__[];
|
||||
extern const char _Py_startswith__doc__[];
|
||||
extern const char _Py_endswith__doc__[];
|
||||
extern const char _Py_maketrans__doc__[];
|
||||
extern const char _Py_expandtabs__doc__[];
|
||||
extern const char _Py_ljust__doc__[];
|
||||
extern const char _Py_rjust__doc__[];
|
||||
extern const char _Py_center__doc__[];
|
||||
extern const char _Py_zfill__doc__[];
|
||||
|
||||
/* this is needed because some docs are shared from the .o, not static */
|
||||
#define PyDoc_STRVAR_shared(name,str) const char name[] = PyDoc_STR(str)
|
||||
|
||||
#endif /* !Py_BYTES_CTYPE_H */
|
||||
#endif /* !Py_LIMITED_API */
|
||||
148
Dependencies/Python/include/internal/pycore_bytesobject.h
vendored
Normal file
148
Dependencies/Python/include/internal/pycore_bytesobject.h
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
#ifndef Py_INTERNAL_BYTESOBJECT_H
|
||||
#define Py_INTERNAL_BYTESOBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern PyObject* _PyBytes_FormatEx(
|
||||
const char *format,
|
||||
Py_ssize_t format_len,
|
||||
PyObject *args,
|
||||
int use_bytearray);
|
||||
|
||||
extern PyObject* _PyBytes_FromHex(
|
||||
PyObject *string,
|
||||
int use_bytearray);
|
||||
|
||||
// Helper for PyBytes_DecodeEscape that detects invalid escape chars.
|
||||
// Export for test_peg_generator.
|
||||
PyAPI_FUNC(PyObject*) _PyBytes_DecodeEscape(const char *, Py_ssize_t,
|
||||
const char *, const char **);
|
||||
|
||||
|
||||
// Substring Search.
|
||||
//
|
||||
// Returns the index of the first occurrence of
|
||||
// a substring ("needle") in a larger text ("haystack").
|
||||
// If the needle is not found, return -1.
|
||||
// If the needle is found, add offset to the index.
|
||||
//
|
||||
// Export for 'mmap' shared extension.
|
||||
PyAPI_FUNC(Py_ssize_t)
|
||||
_PyBytes_Find(const char *haystack, Py_ssize_t len_haystack,
|
||||
const char *needle, Py_ssize_t len_needle,
|
||||
Py_ssize_t offset);
|
||||
|
||||
// Same as above, but search right-to-left.
|
||||
// Export for 'mmap' shared extension.
|
||||
PyAPI_FUNC(Py_ssize_t)
|
||||
_PyBytes_ReverseFind(const char *haystack, Py_ssize_t len_haystack,
|
||||
const char *needle, Py_ssize_t len_needle,
|
||||
Py_ssize_t offset);
|
||||
|
||||
|
||||
// Helper function to implement the repeat and inplace repeat methods on a
|
||||
// buffer.
|
||||
//
|
||||
// len_dest is assumed to be an integer multiple of len_src.
|
||||
// If src equals dest, then assume the operation is inplace.
|
||||
//
|
||||
// This method repeately doubles the number of bytes copied to reduce
|
||||
// the number of invocations of memcpy.
|
||||
//
|
||||
// Export for 'array' shared extension.
|
||||
PyAPI_FUNC(void)
|
||||
_PyBytes_Repeat(char* dest, Py_ssize_t len_dest,
|
||||
const char* src, Py_ssize_t len_src);
|
||||
|
||||
/* --- _PyBytesWriter ----------------------------------------------------- */
|
||||
|
||||
/* The _PyBytesWriter structure is big: it contains an embedded "stack buffer".
|
||||
A _PyBytesWriter variable must be declared at the end of variables in a
|
||||
function to optimize the memory allocation on the stack. */
|
||||
typedef struct {
|
||||
/* bytes, bytearray or NULL (when the small buffer is used) */
|
||||
PyObject *buffer;
|
||||
|
||||
/* Number of allocated size. */
|
||||
Py_ssize_t allocated;
|
||||
|
||||
/* Minimum number of allocated bytes,
|
||||
incremented by _PyBytesWriter_Prepare() */
|
||||
Py_ssize_t min_size;
|
||||
|
||||
/* If non-zero, use a bytearray instead of a bytes object for buffer. */
|
||||
int use_bytearray;
|
||||
|
||||
/* If non-zero, overallocate the buffer (default: 0).
|
||||
This flag must be zero if use_bytearray is non-zero. */
|
||||
int overallocate;
|
||||
|
||||
/* Stack buffer */
|
||||
int use_small_buffer;
|
||||
char small_buffer[512];
|
||||
} _PyBytesWriter;
|
||||
|
||||
/* Initialize a bytes writer
|
||||
|
||||
By default, the overallocation is disabled. Set the overallocate attribute
|
||||
to control the allocation of the buffer.
|
||||
|
||||
Export _PyBytesWriter API for '_pickle' shared extension. */
|
||||
PyAPI_FUNC(void) _PyBytesWriter_Init(_PyBytesWriter *writer);
|
||||
|
||||
/* Get the buffer content and reset the writer.
|
||||
Return a bytes object, or a bytearray object if use_bytearray is non-zero.
|
||||
Raise an exception and return NULL on error. */
|
||||
PyAPI_FUNC(PyObject *) _PyBytesWriter_Finish(_PyBytesWriter *writer,
|
||||
void *str);
|
||||
|
||||
/* Deallocate memory of a writer (clear its internal buffer). */
|
||||
PyAPI_FUNC(void) _PyBytesWriter_Dealloc(_PyBytesWriter *writer);
|
||||
|
||||
/* Allocate the buffer to write size bytes.
|
||||
Return the pointer to the beginning of buffer data.
|
||||
Raise an exception and return NULL on error. */
|
||||
PyAPI_FUNC(void*) _PyBytesWriter_Alloc(_PyBytesWriter *writer,
|
||||
Py_ssize_t size);
|
||||
|
||||
/* Ensure that the buffer is large enough to write *size* bytes.
|
||||
Add size to the writer minimum size (min_size attribute).
|
||||
|
||||
str is the current pointer inside the buffer.
|
||||
Return the updated current pointer inside the buffer.
|
||||
Raise an exception and return NULL on error. */
|
||||
PyAPI_FUNC(void*) _PyBytesWriter_Prepare(_PyBytesWriter *writer,
|
||||
void *str,
|
||||
Py_ssize_t size);
|
||||
|
||||
/* Resize the buffer to make it larger.
|
||||
The new buffer may be larger than size bytes because of overallocation.
|
||||
Return the updated current pointer inside the buffer.
|
||||
Raise an exception and return NULL on error.
|
||||
|
||||
Note: size must be greater than the number of allocated bytes in the writer.
|
||||
|
||||
This function doesn't use the writer minimum size (min_size attribute).
|
||||
|
||||
See also _PyBytesWriter_Prepare().
|
||||
*/
|
||||
PyAPI_FUNC(void*) _PyBytesWriter_Resize(_PyBytesWriter *writer,
|
||||
void *str,
|
||||
Py_ssize_t size);
|
||||
|
||||
/* Write bytes.
|
||||
Raise an exception and return NULL on error. */
|
||||
PyAPI_FUNC(void*) _PyBytesWriter_WriteBytes(_PyBytesWriter *writer,
|
||||
void *str,
|
||||
const void *bytes,
|
||||
Py_ssize_t size);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_BYTESOBJECT_H */
|
||||
205
Dependencies/Python/include/internal/pycore_call.h
vendored
Normal file
205
Dependencies/Python/include/internal/pycore_call.h
vendored
Normal file
@@ -0,0 +1,205 @@
|
||||
#ifndef Py_INTERNAL_CALL_H
|
||||
#define Py_INTERNAL_CALL_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_identifier.h" // _Py_Identifier
|
||||
#include "pycore_pystate.h" // _PyThreadState_GET()
|
||||
|
||||
/* Suggested size (number of positional arguments) for arrays of PyObject*
|
||||
allocated on a C stack to avoid allocating memory on the heap memory. Such
|
||||
array is used to pass positional arguments to call functions of the
|
||||
PyObject_Vectorcall() family.
|
||||
|
||||
The size is chosen to not abuse the C stack and so limit the risk of stack
|
||||
overflow. The size is also chosen to allow using the small stack for most
|
||||
function calls of the Python standard library. On 64-bit CPU, it allocates
|
||||
40 bytes on the stack. */
|
||||
#define _PY_FASTCALL_SMALL_STACK 5
|
||||
|
||||
|
||||
// Export for 'math' shared extension, used via _PyObject_VectorcallTstate()
|
||||
// static inline function.
|
||||
PyAPI_FUNC(PyObject*) _Py_CheckFunctionResult(
|
||||
PyThreadState *tstate,
|
||||
PyObject *callable,
|
||||
PyObject *result,
|
||||
const char *where);
|
||||
|
||||
extern PyObject* _PyObject_Call_Prepend(
|
||||
PyThreadState *tstate,
|
||||
PyObject *callable,
|
||||
PyObject *obj,
|
||||
PyObject *args,
|
||||
PyObject *kwargs);
|
||||
|
||||
extern PyObject* _PyObject_VectorcallDictTstate(
|
||||
PyThreadState *tstate,
|
||||
PyObject *callable,
|
||||
PyObject *const *args,
|
||||
size_t nargsf,
|
||||
PyObject *kwargs);
|
||||
|
||||
extern PyObject* _PyObject_Call(
|
||||
PyThreadState *tstate,
|
||||
PyObject *callable,
|
||||
PyObject *args,
|
||||
PyObject *kwargs);
|
||||
|
||||
extern PyObject * _PyObject_CallMethodFormat(
|
||||
PyThreadState *tstate,
|
||||
PyObject *callable,
|
||||
const char *format,
|
||||
...);
|
||||
|
||||
// Export for 'array' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyObject_CallMethod(
|
||||
PyObject *obj,
|
||||
PyObject *name,
|
||||
const char *format, ...);
|
||||
|
||||
extern PyObject* _PyObject_CallMethodIdObjArgs(
|
||||
PyObject *obj,
|
||||
_Py_Identifier *name,
|
||||
...);
|
||||
|
||||
static inline PyObject *
|
||||
_PyObject_VectorcallMethodId(
|
||||
_Py_Identifier *name, PyObject *const *args,
|
||||
size_t nargsf, PyObject *kwnames)
|
||||
{
|
||||
PyObject *oname = _PyUnicode_FromId(name); /* borrowed */
|
||||
if (!oname) {
|
||||
return _Py_NULL;
|
||||
}
|
||||
return PyObject_VectorcallMethod(oname, args, nargsf, kwnames);
|
||||
}
|
||||
|
||||
static inline PyObject *
|
||||
_PyObject_CallMethodIdNoArgs(PyObject *self, _Py_Identifier *name)
|
||||
{
|
||||
size_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET;
|
||||
return _PyObject_VectorcallMethodId(name, &self, nargsf, _Py_NULL);
|
||||
}
|
||||
|
||||
static inline PyObject *
|
||||
_PyObject_CallMethodIdOneArg(PyObject *self, _Py_Identifier *name, PyObject *arg)
|
||||
{
|
||||
PyObject *args[2] = {self, arg};
|
||||
size_t nargsf = 2 | PY_VECTORCALL_ARGUMENTS_OFFSET;
|
||||
assert(arg != NULL);
|
||||
return _PyObject_VectorcallMethodId(name, args, nargsf, _Py_NULL);
|
||||
}
|
||||
|
||||
|
||||
/* === Vectorcall protocol (PEP 590) ============================= */
|
||||
|
||||
// Call callable using tp_call. Arguments are like PyObject_Vectorcall(),
|
||||
// except that nargs is plainly the number of arguments without flags.
|
||||
//
|
||||
// Export for 'math' shared extension, used via _PyObject_VectorcallTstate()
|
||||
// static inline function.
|
||||
PyAPI_FUNC(PyObject*) _PyObject_MakeTpCall(
|
||||
PyThreadState *tstate,
|
||||
PyObject *callable,
|
||||
PyObject *const *args, Py_ssize_t nargs,
|
||||
PyObject *keywords);
|
||||
|
||||
// Static inline variant of public PyVectorcall_Function().
|
||||
static inline vectorcallfunc
|
||||
_PyVectorcall_FunctionInline(PyObject *callable)
|
||||
{
|
||||
assert(callable != NULL);
|
||||
|
||||
PyTypeObject *tp = Py_TYPE(callable);
|
||||
if (!PyType_HasFeature(tp, Py_TPFLAGS_HAVE_VECTORCALL)) {
|
||||
return NULL;
|
||||
}
|
||||
assert(PyCallable_Check(callable));
|
||||
|
||||
Py_ssize_t offset = tp->tp_vectorcall_offset;
|
||||
assert(offset > 0);
|
||||
|
||||
vectorcallfunc ptr;
|
||||
memcpy(&ptr, (char *) callable + offset, sizeof(ptr));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
/* Call the callable object 'callable' with the "vectorcall" calling
|
||||
convention.
|
||||
|
||||
args is a C array for positional arguments.
|
||||
|
||||
nargsf is the number of positional arguments plus optionally the flag
|
||||
PY_VECTORCALL_ARGUMENTS_OFFSET which means that the caller is allowed to
|
||||
modify args[-1].
|
||||
|
||||
kwnames is a tuple of keyword names. The values of the keyword arguments
|
||||
are stored in "args" after the positional arguments (note that the number
|
||||
of keyword arguments does not change nargsf). kwnames can also be NULL if
|
||||
there are no keyword arguments.
|
||||
|
||||
keywords must only contain strings and all keys must be unique.
|
||||
|
||||
Return the result on success. Raise an exception and return NULL on
|
||||
error. */
|
||||
static inline PyObject *
|
||||
_PyObject_VectorcallTstate(PyThreadState *tstate, PyObject *callable,
|
||||
PyObject *const *args, size_t nargsf,
|
||||
PyObject *kwnames)
|
||||
{
|
||||
vectorcallfunc func;
|
||||
PyObject *res;
|
||||
|
||||
assert(kwnames == NULL || PyTuple_Check(kwnames));
|
||||
assert(args != NULL || PyVectorcall_NARGS(nargsf) == 0);
|
||||
|
||||
func = _PyVectorcall_FunctionInline(callable);
|
||||
if (func == NULL) {
|
||||
Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
|
||||
return _PyObject_MakeTpCall(tstate, callable, args, nargs, kwnames);
|
||||
}
|
||||
res = func(callable, args, nargsf, kwnames);
|
||||
return _Py_CheckFunctionResult(tstate, callable, res, NULL);
|
||||
}
|
||||
|
||||
|
||||
static inline PyObject *
|
||||
_PyObject_CallNoArgsTstate(PyThreadState *tstate, PyObject *func) {
|
||||
return _PyObject_VectorcallTstate(tstate, func, NULL, 0, NULL);
|
||||
}
|
||||
|
||||
|
||||
// Private static inline function variant of public PyObject_CallNoArgs()
|
||||
static inline PyObject *
|
||||
_PyObject_CallNoArgs(PyObject *func) {
|
||||
EVAL_CALL_STAT_INC_IF_FUNCTION(EVAL_CALL_API, func);
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
return _PyObject_VectorcallTstate(tstate, func, NULL, 0, NULL);
|
||||
}
|
||||
|
||||
|
||||
extern PyObject *const *
|
||||
_PyStack_UnpackDict(PyThreadState *tstate,
|
||||
PyObject *const *args, Py_ssize_t nargs,
|
||||
PyObject *kwargs, PyObject **p_kwnames);
|
||||
|
||||
extern void _PyStack_UnpackDict_Free(
|
||||
PyObject *const *stack,
|
||||
Py_ssize_t nargs,
|
||||
PyObject *kwnames);
|
||||
|
||||
extern void _PyStack_UnpackDict_FreeNoDecRef(
|
||||
PyObject *const *stack,
|
||||
PyObject *kwnames);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CALL_H */
|
||||
17
Dependencies/Python/include/internal/pycore_capsule.h
vendored
Normal file
17
Dependencies/Python/include/internal/pycore_capsule.h
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
#ifndef Py_INTERNAL_PYCAPSULE_H
|
||||
#define Py_INTERNAL_PYCAPSULE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// Export for '_socket' shared extension
|
||||
PyAPI_FUNC(int) _PyCapsule_SetTraverse(PyObject *op, traverseproc traverse_func, inquiry clear_func);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PYCAPSULE_H */
|
||||
48
Dependencies/Python/include/internal/pycore_cell.h
vendored
Normal file
48
Dependencies/Python/include/internal/pycore_cell.h
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
#ifndef Py_INTERNAL_CELL_H
|
||||
#define Py_INTERNAL_CELL_H
|
||||
|
||||
#include "pycore_critical_section.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// Sets the cell contents to `value` and return previous contents. Steals a
|
||||
// reference to `value`.
|
||||
static inline PyObject *
|
||||
PyCell_SwapTakeRef(PyCellObject *cell, PyObject *value)
|
||||
{
|
||||
PyObject *old_value;
|
||||
Py_BEGIN_CRITICAL_SECTION(cell);
|
||||
old_value = cell->ob_ref;
|
||||
cell->ob_ref = value;
|
||||
Py_END_CRITICAL_SECTION();
|
||||
return old_value;
|
||||
}
|
||||
|
||||
static inline void
|
||||
PyCell_SetTakeRef(PyCellObject *cell, PyObject *value)
|
||||
{
|
||||
PyObject *old_value = PyCell_SwapTakeRef(cell, value);
|
||||
Py_XDECREF(old_value);
|
||||
}
|
||||
|
||||
// Gets the cell contents. Returns a new reference.
|
||||
static inline PyObject *
|
||||
PyCell_GetRef(PyCellObject *cell)
|
||||
{
|
||||
PyObject *res;
|
||||
Py_BEGIN_CRITICAL_SECTION(cell);
|
||||
res = Py_XNewRef(cell->ob_ref);
|
||||
Py_END_CRITICAL_SECTION();
|
||||
return res;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CELL_H */
|
||||
303
Dependencies/Python/include/internal/pycore_ceval.h
vendored
Normal file
303
Dependencies/Python/include/internal/pycore_ceval.h
vendored
Normal file
@@ -0,0 +1,303 @@
|
||||
#ifndef Py_INTERNAL_CEVAL_H
|
||||
#define Py_INTERNAL_CEVAL_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "dynamic_annotations.h" // _Py_ANNOTATE_RWLOCK_CREATE
|
||||
|
||||
#include "pycore_interp.h" // PyInterpreterState.eval_frame
|
||||
#include "pycore_pystate.h" // _PyThreadState_GET()
|
||||
|
||||
/* Forward declarations */
|
||||
struct pyruntimestate;
|
||||
struct _ceval_runtime_state;
|
||||
|
||||
// Export for '_lsprof' shared extension
|
||||
PyAPI_FUNC(int) _PyEval_SetProfile(PyThreadState *tstate, Py_tracefunc func, PyObject *arg);
|
||||
|
||||
extern int _PyEval_SetTrace(PyThreadState *tstate, Py_tracefunc func, PyObject *arg);
|
||||
|
||||
extern int _PyEval_SetOpcodeTrace(PyFrameObject *f, bool enable);
|
||||
|
||||
// Helper to look up a builtin object
|
||||
// Export for 'array' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyEval_GetBuiltin(PyObject *);
|
||||
|
||||
extern PyObject* _PyEval_GetBuiltinId(_Py_Identifier *);
|
||||
|
||||
extern void _PyEval_SetSwitchInterval(unsigned long microseconds);
|
||||
extern unsigned long _PyEval_GetSwitchInterval(void);
|
||||
|
||||
// Export for '_queue' shared extension
|
||||
PyAPI_FUNC(int) _PyEval_MakePendingCalls(PyThreadState *);
|
||||
|
||||
#ifndef Py_DEFAULT_RECURSION_LIMIT
|
||||
# define Py_DEFAULT_RECURSION_LIMIT 1000
|
||||
#endif
|
||||
|
||||
extern void _Py_FinishPendingCalls(PyThreadState *tstate);
|
||||
extern void _PyEval_InitState(PyInterpreterState *);
|
||||
extern void _PyEval_SignalReceived(void);
|
||||
|
||||
// bitwise flags:
|
||||
#define _Py_PENDING_MAINTHREADONLY 1
|
||||
#define _Py_PENDING_RAWFREE 2
|
||||
|
||||
typedef int _Py_add_pending_call_result;
|
||||
#define _Py_ADD_PENDING_SUCCESS 0
|
||||
#define _Py_ADD_PENDING_FULL -1
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(_Py_add_pending_call_result) _PyEval_AddPendingCall(
|
||||
PyInterpreterState *interp,
|
||||
_Py_pending_call_func func,
|
||||
void *arg,
|
||||
int flags);
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
extern PyStatus _PyEval_ReInitThreads(PyThreadState *tstate);
|
||||
#endif
|
||||
|
||||
// Used by sys.call_tracing()
|
||||
extern PyObject* _PyEval_CallTracing(PyObject *func, PyObject *args);
|
||||
|
||||
// Used by sys.get_asyncgen_hooks()
|
||||
extern PyObject* _PyEval_GetAsyncGenFirstiter(void);
|
||||
extern PyObject* _PyEval_GetAsyncGenFinalizer(void);
|
||||
|
||||
// Used by sys.set_asyncgen_hooks()
|
||||
extern int _PyEval_SetAsyncGenFirstiter(PyObject *);
|
||||
extern int _PyEval_SetAsyncGenFinalizer(PyObject *);
|
||||
|
||||
// Used by sys.get_coroutine_origin_tracking_depth()
|
||||
// and sys.set_coroutine_origin_tracking_depth()
|
||||
extern int _PyEval_GetCoroutineOriginTrackingDepth(void);
|
||||
extern int _PyEval_SetCoroutineOriginTrackingDepth(int depth);
|
||||
|
||||
extern void _PyEval_Fini(void);
|
||||
|
||||
|
||||
extern PyObject* _PyEval_GetBuiltins(PyThreadState *tstate);
|
||||
extern PyObject* _PyEval_BuiltinsFromGlobals(
|
||||
PyThreadState *tstate,
|
||||
PyObject *globals);
|
||||
|
||||
// Trampoline API
|
||||
|
||||
typedef struct {
|
||||
// Callback to initialize the trampoline state
|
||||
void* (*init_state)(void);
|
||||
// Callback to register every trampoline being created
|
||||
void (*write_state)(void* state, const void *code_addr,
|
||||
unsigned int code_size, PyCodeObject* code);
|
||||
// Callback to free the trampoline state
|
||||
int (*free_state)(void* state);
|
||||
} _PyPerf_Callbacks;
|
||||
|
||||
extern int _PyPerfTrampoline_SetCallbacks(_PyPerf_Callbacks *);
|
||||
extern void _PyPerfTrampoline_GetCallbacks(_PyPerf_Callbacks *);
|
||||
extern int _PyPerfTrampoline_Init(int activate);
|
||||
extern int _PyPerfTrampoline_Fini(void);
|
||||
extern void _PyPerfTrampoline_FreeArenas(void);
|
||||
extern int _PyIsPerfTrampolineActive(void);
|
||||
extern PyStatus _PyPerfTrampoline_AfterFork_Child(void);
|
||||
#ifdef PY_HAVE_PERF_TRAMPOLINE
|
||||
extern _PyPerf_Callbacks _Py_perfmap_callbacks;
|
||||
extern _PyPerf_Callbacks _Py_perfmap_jit_callbacks;
|
||||
#endif
|
||||
|
||||
static inline PyObject*
|
||||
_PyEval_EvalFrame(PyThreadState *tstate, struct _PyInterpreterFrame *frame, int throwflag)
|
||||
{
|
||||
EVAL_CALL_STAT_INC(EVAL_CALL_TOTAL);
|
||||
if (tstate->interp->eval_frame == NULL) {
|
||||
return _PyEval_EvalFrameDefault(tstate, frame, throwflag);
|
||||
}
|
||||
return tstate->interp->eval_frame(tstate, frame, throwflag);
|
||||
}
|
||||
|
||||
extern PyObject*
|
||||
_PyEval_Vector(PyThreadState *tstate,
|
||||
PyFunctionObject *func, PyObject *locals,
|
||||
PyObject* const* args, size_t argcount,
|
||||
PyObject *kwnames);
|
||||
|
||||
extern int _PyEval_ThreadsInitialized(void);
|
||||
extern void _PyEval_InitGIL(PyThreadState *tstate, int own_gil);
|
||||
extern void _PyEval_FiniGIL(PyInterpreterState *interp);
|
||||
|
||||
extern void _PyEval_AcquireLock(PyThreadState *tstate);
|
||||
|
||||
extern void _PyEval_ReleaseLock(PyInterpreterState *, PyThreadState *,
|
||||
int final_release);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// Returns 0 or 1 if the GIL for the given thread's interpreter is disabled or
|
||||
// enabled, respectively.
|
||||
//
|
||||
// The enabled state of the GIL will not change while one or more threads are
|
||||
// attached.
|
||||
static inline int
|
||||
_PyEval_IsGILEnabled(PyThreadState *tstate)
|
||||
{
|
||||
struct _gil_runtime_state *gil = tstate->interp->ceval.gil;
|
||||
return _Py_atomic_load_int_relaxed(&gil->enabled) != 0;
|
||||
}
|
||||
|
||||
// Enable or disable the GIL used by the interpreter that owns tstate, which
|
||||
// must be the current thread. This may affect other interpreters, if the GIL
|
||||
// is shared. All three functions will be no-ops (and return 0) if the
|
||||
// interpreter's `enable_gil' config is not _PyConfig_GIL_DEFAULT.
|
||||
//
|
||||
// Every call to _PyEval_EnableGILTransient() must be paired with exactly one
|
||||
// call to either _PyEval_EnableGILPermanent() or
|
||||
// _PyEval_DisableGIL(). _PyEval_EnableGILPermanent() and _PyEval_DisableGIL()
|
||||
// must only be called while the GIL is enabled from a call to
|
||||
// _PyEval_EnableGILTransient().
|
||||
//
|
||||
// _PyEval_EnableGILTransient() returns 1 if it enabled the GIL, or 0 if the
|
||||
// GIL was already enabled, whether transiently or permanently. The caller will
|
||||
// hold the GIL upon return.
|
||||
//
|
||||
// _PyEval_EnableGILPermanent() returns 1 if it permanently enabled the GIL
|
||||
// (which must already be enabled), or 0 if it was already permanently
|
||||
// enabled. Once _PyEval_EnableGILPermanent() has been called once, all
|
||||
// subsequent calls to any of the three functions will be no-ops.
|
||||
//
|
||||
// _PyEval_DisableGIL() returns 1 if it disabled the GIL, or 0 if the GIL was
|
||||
// kept enabled because of another request, whether transient or permanent.
|
||||
//
|
||||
// All three functions must be called by an attached thread (this implies that
|
||||
// if the GIL is enabled, the current thread must hold it).
|
||||
extern int _PyEval_EnableGILTransient(PyThreadState *tstate);
|
||||
extern int _PyEval_EnableGILPermanent(PyThreadState *tstate);
|
||||
extern int _PyEval_DisableGIL(PyThreadState *state);
|
||||
#endif
|
||||
|
||||
extern void _PyEval_DeactivateOpCache(void);
|
||||
|
||||
|
||||
/* --- _Py_EnterRecursiveCall() ----------------------------------------- */
|
||||
|
||||
#ifdef USE_STACKCHECK
|
||||
/* With USE_STACKCHECK macro defined, trigger stack checks in
|
||||
_Py_CheckRecursiveCall() on every 64th call to _Py_EnterRecursiveCall. */
|
||||
static inline int _Py_MakeRecCheck(PyThreadState *tstate) {
|
||||
return (tstate->c_recursion_remaining-- < 0
|
||||
|| (tstate->c_recursion_remaining & 63) == 0);
|
||||
}
|
||||
#else
|
||||
static inline int _Py_MakeRecCheck(PyThreadState *tstate) {
|
||||
return tstate->c_recursion_remaining-- < 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Export for '_json' shared extension, used via _Py_EnterRecursiveCall()
|
||||
// static inline function.
|
||||
PyAPI_FUNC(int) _Py_CheckRecursiveCall(
|
||||
PyThreadState *tstate,
|
||||
const char *where);
|
||||
|
||||
int _Py_CheckRecursiveCallPy(
|
||||
PyThreadState *tstate);
|
||||
|
||||
static inline int _Py_EnterRecursiveCallTstate(PyThreadState *tstate,
|
||||
const char *where) {
|
||||
return (_Py_MakeRecCheck(tstate) && _Py_CheckRecursiveCall(tstate, where));
|
||||
}
|
||||
|
||||
static inline void _Py_EnterRecursiveCallTstateUnchecked(PyThreadState *tstate) {
|
||||
assert(tstate->c_recursion_remaining > 0);
|
||||
tstate->c_recursion_remaining--;
|
||||
}
|
||||
|
||||
static inline int _Py_EnterRecursiveCall(const char *where) {
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
return _Py_EnterRecursiveCallTstate(tstate, where);
|
||||
}
|
||||
|
||||
static inline void _Py_LeaveRecursiveCallTstate(PyThreadState *tstate) {
|
||||
tstate->c_recursion_remaining++;
|
||||
}
|
||||
|
||||
static inline void _Py_LeaveRecursiveCall(void) {
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
_Py_LeaveRecursiveCallTstate(tstate);
|
||||
}
|
||||
|
||||
extern struct _PyInterpreterFrame* _PyEval_GetFrame(void);
|
||||
|
||||
PyAPI_FUNC(PyObject *)_Py_MakeCoro(PyFunctionObject *func);
|
||||
|
||||
/* Handle signals, pending calls, GIL drop request
|
||||
and asynchronous exception */
|
||||
PyAPI_FUNC(int) _Py_HandlePending(PyThreadState *tstate);
|
||||
|
||||
extern PyObject * _PyEval_GetFrameLocals(void);
|
||||
|
||||
typedef PyObject *(*conversion_func)(PyObject *);
|
||||
|
||||
PyAPI_DATA(const binaryfunc) _PyEval_BinaryOps[];
|
||||
PyAPI_DATA(const conversion_func) _PyEval_ConversionFuncs[];
|
||||
|
||||
PyAPI_FUNC(int) _PyEval_CheckExceptStarTypeValid(PyThreadState *tstate, PyObject* right);
|
||||
PyAPI_FUNC(int) _PyEval_CheckExceptTypeValid(PyThreadState *tstate, PyObject* right);
|
||||
PyAPI_FUNC(int) _PyEval_ExceptionGroupMatch(PyObject* exc_value, PyObject *match_type, PyObject **match, PyObject **rest);
|
||||
PyAPI_FUNC(void) _PyEval_FormatAwaitableError(PyThreadState *tstate, PyTypeObject *type, int oparg);
|
||||
PyAPI_FUNC(void) _PyEval_FormatExcCheckArg(PyThreadState *tstate, PyObject *exc, const char *format_str, PyObject *obj);
|
||||
PyAPI_FUNC(void) _PyEval_FormatExcUnbound(PyThreadState *tstate, PyCodeObject *co, int oparg);
|
||||
PyAPI_FUNC(void) _PyEval_FormatKwargsError(PyThreadState *tstate, PyObject *func, PyObject *kwargs);
|
||||
PyAPI_FUNC(PyObject *)_PyEval_MatchClass(PyThreadState *tstate, PyObject *subject, PyObject *type, Py_ssize_t nargs, PyObject *kwargs);
|
||||
PyAPI_FUNC(PyObject *)_PyEval_MatchKeys(PyThreadState *tstate, PyObject *map, PyObject *keys);
|
||||
PyAPI_FUNC(int) _PyEval_UnpackIterable(PyThreadState *tstate, PyObject *v, int argcnt, int argcntafter, PyObject **sp);
|
||||
PyAPI_FUNC(void) _PyEval_MonitorRaise(PyThreadState *tstate, _PyInterpreterFrame *frame, _Py_CODEUNIT *instr);
|
||||
PyAPI_FUNC(void) _PyEval_FrameClearAndPop(PyThreadState *tstate, _PyInterpreterFrame *frame);
|
||||
|
||||
|
||||
/* Bits that can be set in PyThreadState.eval_breaker */
|
||||
#define _PY_GIL_DROP_REQUEST_BIT (1U << 0)
|
||||
#define _PY_SIGNALS_PENDING_BIT (1U << 1)
|
||||
#define _PY_CALLS_TO_DO_BIT (1U << 2)
|
||||
#define _PY_ASYNC_EXCEPTION_BIT (1U << 3)
|
||||
#define _PY_GC_SCHEDULED_BIT (1U << 4)
|
||||
#define _PY_EVAL_PLEASE_STOP_BIT (1U << 5)
|
||||
#define _PY_EVAL_EXPLICIT_MERGE_BIT (1U << 6)
|
||||
|
||||
/* Reserve a few bits for future use */
|
||||
#define _PY_EVAL_EVENTS_BITS 8
|
||||
#define _PY_EVAL_EVENTS_MASK ((1 << _PY_EVAL_EVENTS_BITS)-1)
|
||||
|
||||
static inline void
|
||||
_Py_set_eval_breaker_bit(PyThreadState *tstate, uintptr_t bit)
|
||||
{
|
||||
_Py_atomic_or_uintptr(&tstate->eval_breaker, bit);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_unset_eval_breaker_bit(PyThreadState *tstate, uintptr_t bit)
|
||||
{
|
||||
_Py_atomic_and_uintptr(&tstate->eval_breaker, ~bit);
|
||||
}
|
||||
|
||||
static inline int
|
||||
_Py_eval_breaker_bit_is_set(PyThreadState *tstate, uintptr_t bit)
|
||||
{
|
||||
uintptr_t b = _Py_atomic_load_uintptr_relaxed(&tstate->eval_breaker);
|
||||
return (b & bit) != 0;
|
||||
}
|
||||
|
||||
// Free-threaded builds use these functions to set or unset a bit on all
|
||||
// threads in the given interpreter.
|
||||
void _Py_set_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit);
|
||||
void _Py_unset_eval_breaker_bit_all(PyInterpreterState *interp, uintptr_t bit);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CEVAL_H */
|
||||
134
Dependencies/Python/include/internal/pycore_ceval_state.h
vendored
Normal file
134
Dependencies/Python/include/internal/pycore_ceval_state.h
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
#ifndef Py_INTERNAL_CEVAL_STATE_H
|
||||
#define Py_INTERNAL_CEVAL_STATE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_lock.h" // PyMutex
|
||||
#include "pycore_gil.h" // struct _gil_runtime_state
|
||||
|
||||
|
||||
typedef int (*_Py_pending_call_func)(void *);
|
||||
|
||||
struct _pending_call {
|
||||
_Py_pending_call_func func;
|
||||
void *arg;
|
||||
int flags;
|
||||
};
|
||||
|
||||
#define PENDINGCALLSARRAYSIZE 300
|
||||
|
||||
#define MAXPENDINGCALLS PENDINGCALLSARRAYSIZE
|
||||
/* For interpreter-level pending calls, we want to avoid spending too
|
||||
much time on pending calls in any one thread, so we apply a limit. */
|
||||
#if MAXPENDINGCALLS > 100
|
||||
# define MAXPENDINGCALLSLOOP 100
|
||||
#else
|
||||
# define MAXPENDINGCALLSLOOP MAXPENDINGCALLS
|
||||
#endif
|
||||
|
||||
/* We keep the number small to preserve as much compatibility
|
||||
as possible with earlier versions. */
|
||||
#define MAXPENDINGCALLS_MAIN 32
|
||||
/* For the main thread, we want to make sure all pending calls are
|
||||
run at once, for the sake of prompt signal handling. This is
|
||||
unlikely to cause any problems since there should be very few
|
||||
pending calls for the main thread. */
|
||||
#define MAXPENDINGCALLSLOOP_MAIN 0
|
||||
|
||||
struct _pending_calls {
|
||||
PyThreadState *handling_thread;
|
||||
PyMutex mutex;
|
||||
/* Request for running pending calls. */
|
||||
int32_t npending;
|
||||
/* The maximum allowed number of pending calls.
|
||||
If the queue fills up to this point then _PyEval_AddPendingCall()
|
||||
will return _Py_ADD_PENDING_FULL. */
|
||||
int32_t max;
|
||||
/* We don't want a flood of pending calls to interrupt any one thread
|
||||
for too long, so we keep a limit on the number handled per pass.
|
||||
A value of 0 means there is no limit (other than the maximum
|
||||
size of the list of pending calls). */
|
||||
int32_t maxloop;
|
||||
struct _pending_call calls[PENDINGCALLSARRAYSIZE];
|
||||
int first;
|
||||
int next;
|
||||
};
|
||||
|
||||
|
||||
typedef enum {
|
||||
PERF_STATUS_FAILED = -1, // Perf trampoline is in an invalid state
|
||||
PERF_STATUS_NO_INIT = 0, // Perf trampoline is not initialized
|
||||
PERF_STATUS_OK = 1, // Perf trampoline is ready to be executed
|
||||
} perf_status_t;
|
||||
|
||||
#ifdef PY_HAVE_PERF_TRAMPOLINE
|
||||
struct code_arena_st;
|
||||
|
||||
struct trampoline_api_st {
|
||||
void* (*init_state)(void);
|
||||
void (*write_state)(void* state, const void *code_addr,
|
||||
unsigned int code_size, PyCodeObject* code);
|
||||
int (*free_state)(void* state);
|
||||
void *state;
|
||||
Py_ssize_t code_padding;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
struct _ceval_runtime_state {
|
||||
struct {
|
||||
#ifdef PY_HAVE_PERF_TRAMPOLINE
|
||||
perf_status_t status;
|
||||
int perf_trampoline_type;
|
||||
Py_ssize_t extra_code_index;
|
||||
struct code_arena_st *code_arena;
|
||||
struct trampoline_api_st trampoline_api;
|
||||
FILE *map_file;
|
||||
Py_ssize_t persist_after_fork;
|
||||
#else
|
||||
int _not_used;
|
||||
#endif
|
||||
} perf;
|
||||
/* Pending calls to be made only on the main thread. */
|
||||
// The signal machinery falls back on this
|
||||
// so it must be especially stable and efficient.
|
||||
// For example, we use a preallocated array
|
||||
// for the list of pending calls.
|
||||
struct _pending_calls pending_mainthread;
|
||||
PyMutex sys_trace_profile_mutex;
|
||||
};
|
||||
|
||||
|
||||
#ifdef PY_HAVE_PERF_TRAMPOLINE
|
||||
# define _PyEval_RUNTIME_PERF_INIT \
|
||||
{ \
|
||||
.status = PERF_STATUS_NO_INIT, \
|
||||
.extra_code_index = -1, \
|
||||
.persist_after_fork = 0, \
|
||||
}
|
||||
#else
|
||||
# define _PyEval_RUNTIME_PERF_INIT {0}
|
||||
#endif
|
||||
|
||||
|
||||
struct _ceval_state {
|
||||
/* This variable holds the global instrumentation version. When a thread is
|
||||
running, this value is overlaid onto PyThreadState.eval_breaker so that
|
||||
changes in the instrumentation version will trigger the eval breaker. */
|
||||
uintptr_t instrumentation_version;
|
||||
int recursion_limit;
|
||||
struct _gil_runtime_state *gil;
|
||||
int own_gil;
|
||||
struct _pending_calls pending;
|
||||
};
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CEVAL_STATE_H */
|
||||
596
Dependencies/Python/include/internal/pycore_code.h
vendored
Normal file
596
Dependencies/Python/include/internal/pycore_code.h
vendored
Normal file
@@ -0,0 +1,596 @@
|
||||
#ifndef Py_INTERNAL_CODE_H
|
||||
#define Py_INTERNAL_CODE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_lock.h" // PyMutex
|
||||
#include "pycore_backoff.h" // _Py_BackoffCounter
|
||||
|
||||
|
||||
/* Each instruction in a code object is a fixed-width value,
|
||||
* currently 2 bytes: 1-byte opcode + 1-byte oparg. The EXTENDED_ARG
|
||||
* opcode allows for larger values but the current limit is 3 uses
|
||||
* of EXTENDED_ARG (see Python/compile.c), for a maximum
|
||||
* 32-bit value. This aligns with the note in Python/compile.c
|
||||
* (compiler_addop_i_line) indicating that the max oparg value is
|
||||
* 2**32 - 1, rather than INT_MAX.
|
||||
*/
|
||||
|
||||
typedef union {
|
||||
uint16_t cache;
|
||||
struct {
|
||||
uint8_t code;
|
||||
uint8_t arg;
|
||||
} op;
|
||||
_Py_BackoffCounter counter; // First cache entry of specializable op
|
||||
} _Py_CODEUNIT;
|
||||
|
||||
#define _PyCode_CODE(CO) _Py_RVALUE((_Py_CODEUNIT *)(CO)->co_code_adaptive)
|
||||
#define _PyCode_NBYTES(CO) (Py_SIZE(CO) * (Py_ssize_t)sizeof(_Py_CODEUNIT))
|
||||
|
||||
|
||||
/* These macros only remain defined for compatibility. */
|
||||
#define _Py_OPCODE(word) ((word).op.code)
|
||||
#define _Py_OPARG(word) ((word).op.arg)
|
||||
|
||||
static inline _Py_CODEUNIT
|
||||
_py_make_codeunit(uint8_t opcode, uint8_t oparg)
|
||||
{
|
||||
// No designated initialisers because of C++ compat
|
||||
_Py_CODEUNIT word;
|
||||
word.op.code = opcode;
|
||||
word.op.arg = oparg;
|
||||
return word;
|
||||
}
|
||||
|
||||
static inline void
|
||||
_py_set_opcode(_Py_CODEUNIT *word, uint8_t opcode)
|
||||
{
|
||||
word->op.code = opcode;
|
||||
}
|
||||
|
||||
#define _Py_MAKE_CODEUNIT(opcode, oparg) _py_make_codeunit((opcode), (oparg))
|
||||
#define _Py_SET_OPCODE(word, opcode) _py_set_opcode(&(word), (opcode))
|
||||
|
||||
|
||||
// We hide some of the newer PyCodeObject fields behind macros.
|
||||
// This helps with backporting certain changes to 3.12.
|
||||
#define _PyCode_HAS_EXECUTORS(CODE) \
|
||||
(CODE->co_executors != NULL)
|
||||
#define _PyCode_HAS_INSTRUMENTATION(CODE) \
|
||||
(CODE->_co_instrumentation_version > 0)
|
||||
|
||||
struct _py_code_state {
|
||||
PyMutex mutex;
|
||||
// Interned constants from code objects. Used by the free-threaded build.
|
||||
struct _Py_hashtable_t *constants;
|
||||
};
|
||||
|
||||
extern PyStatus _PyCode_Init(PyInterpreterState *interp);
|
||||
extern void _PyCode_Fini(PyInterpreterState *interp);
|
||||
|
||||
#define CODE_MAX_WATCHERS 8
|
||||
|
||||
/* PEP 659
|
||||
* Specialization and quickening structs and helper functions
|
||||
*/
|
||||
|
||||
|
||||
// Inline caches. If you change the number of cache entries for an instruction,
|
||||
// you must *also* update the number of cache entries in Lib/opcode.py and bump
|
||||
// the magic number in Lib/importlib/_bootstrap_external.py!
|
||||
|
||||
#define CACHE_ENTRIES(cache) (sizeof(cache)/sizeof(_Py_CODEUNIT))
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
uint16_t module_keys_version;
|
||||
uint16_t builtin_keys_version;
|
||||
uint16_t index;
|
||||
} _PyLoadGlobalCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_LOAD_GLOBAL CACHE_ENTRIES(_PyLoadGlobalCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PyBinaryOpCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_BINARY_OP CACHE_ENTRIES(_PyBinaryOpCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PyUnpackSequenceCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE \
|
||||
CACHE_ENTRIES(_PyUnpackSequenceCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PyCompareOpCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_COMPARE_OP CACHE_ENTRIES(_PyCompareOpCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PyBinarySubscrCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_BINARY_SUBSCR CACHE_ENTRIES(_PyBinarySubscrCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PySuperAttrCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_LOAD_SUPER_ATTR CACHE_ENTRIES(_PySuperAttrCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
uint16_t version[2];
|
||||
uint16_t index;
|
||||
} _PyAttrCache;
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
uint16_t type_version[2];
|
||||
union {
|
||||
uint16_t keys_version[2];
|
||||
uint16_t dict_offset;
|
||||
};
|
||||
uint16_t descr[4];
|
||||
} _PyLoadMethodCache;
|
||||
|
||||
|
||||
// MUST be the max(_PyAttrCache, _PyLoadMethodCache)
|
||||
#define INLINE_CACHE_ENTRIES_LOAD_ATTR CACHE_ENTRIES(_PyLoadMethodCache)
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_STORE_ATTR CACHE_ENTRIES(_PyAttrCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
uint16_t func_version[2];
|
||||
} _PyCallCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_CALL CACHE_ENTRIES(_PyCallCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PyStoreSubscrCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_STORE_SUBSCR CACHE_ENTRIES(_PyStoreSubscrCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PyForIterCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_FOR_ITER CACHE_ENTRIES(_PyForIterCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PySendCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_SEND CACHE_ENTRIES(_PySendCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
uint16_t version[2];
|
||||
} _PyToBoolCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_TO_BOOL CACHE_ENTRIES(_PyToBoolCache)
|
||||
|
||||
typedef struct {
|
||||
_Py_BackoffCounter counter;
|
||||
} _PyContainsOpCache;
|
||||
|
||||
#define INLINE_CACHE_ENTRIES_CONTAINS_OP CACHE_ENTRIES(_PyContainsOpCache)
|
||||
|
||||
// Borrowed references to common callables:
|
||||
struct callable_cache {
|
||||
PyObject *isinstance;
|
||||
PyObject *len;
|
||||
PyObject *list_append;
|
||||
PyObject *object__getattribute__;
|
||||
};
|
||||
|
||||
/* "Locals plus" for a code object is the set of locals + cell vars +
|
||||
* free vars. This relates to variable names as well as offsets into
|
||||
* the "fast locals" storage array of execution frames. The compiler
|
||||
* builds the list of names, their offsets, and the corresponding
|
||||
* kind of local.
|
||||
*
|
||||
* Those kinds represent the source of the initial value and the
|
||||
* variable's scope (as related to closures). A "local" is an
|
||||
* argument or other variable defined in the current scope. A "free"
|
||||
* variable is one that is defined in an outer scope and comes from
|
||||
* the function's closure. A "cell" variable is a local that escapes
|
||||
* into an inner function as part of a closure, and thus must be
|
||||
* wrapped in a cell. Any "local" can also be a "cell", but the
|
||||
* "free" kind is mutually exclusive with both.
|
||||
*/
|
||||
|
||||
// Note that these all fit within a byte, as do combinations.
|
||||
// Later, we will use the smaller numbers to differentiate the different
|
||||
// kinds of locals (e.g. pos-only arg, varkwargs, local-only).
|
||||
#define CO_FAST_HIDDEN 0x10
|
||||
#define CO_FAST_LOCAL 0x20
|
||||
#define CO_FAST_CELL 0x40
|
||||
#define CO_FAST_FREE 0x80
|
||||
|
||||
typedef unsigned char _PyLocals_Kind;
|
||||
|
||||
static inline _PyLocals_Kind
|
||||
_PyLocals_GetKind(PyObject *kinds, int i)
|
||||
{
|
||||
assert(PyBytes_Check(kinds));
|
||||
assert(0 <= i && i < PyBytes_GET_SIZE(kinds));
|
||||
char *ptr = PyBytes_AS_STRING(kinds);
|
||||
return (_PyLocals_Kind)(ptr[i]);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyLocals_SetKind(PyObject *kinds, int i, _PyLocals_Kind kind)
|
||||
{
|
||||
assert(PyBytes_Check(kinds));
|
||||
assert(0 <= i && i < PyBytes_GET_SIZE(kinds));
|
||||
char *ptr = PyBytes_AS_STRING(kinds);
|
||||
ptr[i] = (char) kind;
|
||||
}
|
||||
|
||||
|
||||
struct _PyCodeConstructor {
|
||||
/* metadata */
|
||||
PyObject *filename;
|
||||
PyObject *name;
|
||||
PyObject *qualname;
|
||||
int flags;
|
||||
|
||||
/* the code */
|
||||
PyObject *code;
|
||||
int firstlineno;
|
||||
PyObject *linetable;
|
||||
|
||||
/* used by the code */
|
||||
PyObject *consts;
|
||||
PyObject *names;
|
||||
|
||||
/* mapping frame offsets to information */
|
||||
PyObject *localsplusnames; // Tuple of strings
|
||||
PyObject *localspluskinds; // Bytes object, one byte per variable
|
||||
|
||||
/* args (within varnames) */
|
||||
int argcount;
|
||||
int posonlyargcount;
|
||||
// XXX Replace argcount with posorkwargcount (argcount - posonlyargcount).
|
||||
int kwonlyargcount;
|
||||
|
||||
/* needed to create the frame */
|
||||
int stacksize;
|
||||
|
||||
/* used by the eval loop */
|
||||
PyObject *exceptiontable;
|
||||
};
|
||||
|
||||
// Using an "arguments struct" like this is helpful for maintainability
|
||||
// in a case such as this with many parameters. It does bear a risk:
|
||||
// if the struct changes and callers are not updated properly then the
|
||||
// compiler will not catch problems (like a missing argument). This can
|
||||
// cause hard-to-debug problems. The risk is mitigated by the use of
|
||||
// check_code() in codeobject.c. However, we may decide to switch
|
||||
// back to a regular function signature. Regardless, this approach
|
||||
// wouldn't be appropriate if this weren't a strictly internal API.
|
||||
// (See the comments in https://github.com/python/cpython/pull/26258.)
|
||||
extern int _PyCode_Validate(struct _PyCodeConstructor *);
|
||||
extern PyCodeObject* _PyCode_New(struct _PyCodeConstructor *);
|
||||
|
||||
|
||||
/* Private API */
|
||||
|
||||
/* Getters for internal PyCodeObject data. */
|
||||
extern PyObject* _PyCode_GetVarnames(PyCodeObject *);
|
||||
extern PyObject* _PyCode_GetCellvars(PyCodeObject *);
|
||||
extern PyObject* _PyCode_GetFreevars(PyCodeObject *);
|
||||
extern PyObject* _PyCode_GetCode(PyCodeObject *);
|
||||
|
||||
/** API for initializing the line number tables. */
|
||||
extern int _PyCode_InitAddressRange(PyCodeObject* co, PyCodeAddressRange *bounds);
|
||||
|
||||
/** Out of process API for initializing the location table. */
|
||||
extern void _PyLineTable_InitAddressRange(
|
||||
const char *linetable,
|
||||
Py_ssize_t length,
|
||||
int firstlineno,
|
||||
PyCodeAddressRange *range);
|
||||
|
||||
/** API for traversing the line number table. */
|
||||
extern int _PyLineTable_NextAddressRange(PyCodeAddressRange *range);
|
||||
extern int _PyLineTable_PreviousAddressRange(PyCodeAddressRange *range);
|
||||
|
||||
/** API for executors */
|
||||
extern void _PyCode_Clear_Executors(PyCodeObject *code);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// gh-115999 tracks progress on addressing this.
|
||||
#define ENABLE_SPECIALIZATION 0
|
||||
#else
|
||||
#define ENABLE_SPECIALIZATION 1
|
||||
#endif
|
||||
|
||||
/* Specialization functions */
|
||||
|
||||
extern void _Py_Specialize_LoadSuperAttr(PyObject *global_super, PyObject *cls,
|
||||
_Py_CODEUNIT *instr, int load_method);
|
||||
extern void _Py_Specialize_LoadAttr(PyObject *owner, _Py_CODEUNIT *instr,
|
||||
PyObject *name);
|
||||
extern void _Py_Specialize_StoreAttr(PyObject *owner, _Py_CODEUNIT *instr,
|
||||
PyObject *name);
|
||||
extern void _Py_Specialize_LoadGlobal(PyObject *globals, PyObject *builtins,
|
||||
_Py_CODEUNIT *instr, PyObject *name);
|
||||
extern void _Py_Specialize_BinarySubscr(PyObject *sub, PyObject *container,
|
||||
_Py_CODEUNIT *instr);
|
||||
extern void _Py_Specialize_StoreSubscr(PyObject *container, PyObject *sub,
|
||||
_Py_CODEUNIT *instr);
|
||||
extern void _Py_Specialize_Call(PyObject *callable, _Py_CODEUNIT *instr,
|
||||
int nargs);
|
||||
extern void _Py_Specialize_BinaryOp(PyObject *lhs, PyObject *rhs, _Py_CODEUNIT *instr,
|
||||
int oparg, PyObject **locals);
|
||||
extern void _Py_Specialize_CompareOp(PyObject *lhs, PyObject *rhs,
|
||||
_Py_CODEUNIT *instr, int oparg);
|
||||
extern void _Py_Specialize_UnpackSequence(PyObject *seq, _Py_CODEUNIT *instr,
|
||||
int oparg);
|
||||
extern void _Py_Specialize_ForIter(PyObject *iter, _Py_CODEUNIT *instr, int oparg);
|
||||
extern void _Py_Specialize_Send(PyObject *receiver, _Py_CODEUNIT *instr);
|
||||
extern void _Py_Specialize_ToBool(PyObject *value, _Py_CODEUNIT *instr);
|
||||
extern void _Py_Specialize_ContainsOp(PyObject *value, _Py_CODEUNIT *instr);
|
||||
|
||||
#ifdef Py_STATS
|
||||
|
||||
#include "pycore_bitutils.h" // _Py_bit_length
|
||||
|
||||
#define STAT_INC(opname, name) do { if (_Py_stats) _Py_stats->opcode_stats[opname].specialization.name++; } while (0)
|
||||
#define STAT_DEC(opname, name) do { if (_Py_stats) _Py_stats->opcode_stats[opname].specialization.name--; } while (0)
|
||||
#define OPCODE_EXE_INC(opname) do { if (_Py_stats) _Py_stats->opcode_stats[opname].execution_count++; } while (0)
|
||||
#define CALL_STAT_INC(name) do { if (_Py_stats) _Py_stats->call_stats.name++; } while (0)
|
||||
#define OBJECT_STAT_INC(name) do { if (_Py_stats) _Py_stats->object_stats.name++; } while (0)
|
||||
#define OBJECT_STAT_INC_COND(name, cond) \
|
||||
do { if (_Py_stats && cond) _Py_stats->object_stats.name++; } while (0)
|
||||
#define EVAL_CALL_STAT_INC(name) do { if (_Py_stats) _Py_stats->call_stats.eval_calls[name]++; } while (0)
|
||||
#define EVAL_CALL_STAT_INC_IF_FUNCTION(name, callable) \
|
||||
do { if (_Py_stats && PyFunction_Check(callable)) _Py_stats->call_stats.eval_calls[name]++; } while (0)
|
||||
#define GC_STAT_ADD(gen, name, n) do { if (_Py_stats) _Py_stats->gc_stats[(gen)].name += (n); } while (0)
|
||||
#define OPT_STAT_INC(name) do { if (_Py_stats) _Py_stats->optimization_stats.name++; } while (0)
|
||||
#define UOP_STAT_INC(opname, name) do { if (_Py_stats) { assert(opname < 512); _Py_stats->optimization_stats.opcode[opname].name++; } } while (0)
|
||||
#define UOP_PAIR_INC(uopcode, lastuop) \
|
||||
do { \
|
||||
if (lastuop && _Py_stats) { \
|
||||
_Py_stats->optimization_stats.opcode[lastuop].pair_count[uopcode]++; \
|
||||
} \
|
||||
lastuop = uopcode; \
|
||||
} while (0)
|
||||
#define OPT_UNSUPPORTED_OPCODE(opname) do { if (_Py_stats) _Py_stats->optimization_stats.unsupported_opcode[opname]++; } while (0)
|
||||
#define OPT_ERROR_IN_OPCODE(opname) do { if (_Py_stats) _Py_stats->optimization_stats.error_in_opcode[opname]++; } while (0)
|
||||
#define OPT_HIST(length, name) \
|
||||
do { \
|
||||
if (_Py_stats) { \
|
||||
int bucket = _Py_bit_length(length >= 1 ? length - 1 : 0); \
|
||||
bucket = (bucket >= _Py_UOP_HIST_SIZE) ? _Py_UOP_HIST_SIZE - 1 : bucket; \
|
||||
_Py_stats->optimization_stats.name[bucket]++; \
|
||||
} \
|
||||
} while (0)
|
||||
#define RARE_EVENT_STAT_INC(name) do { if (_Py_stats) _Py_stats->rare_event_stats.name++; } while (0)
|
||||
|
||||
// Export for '_opcode' shared extension
|
||||
PyAPI_FUNC(PyObject*) _Py_GetSpecializationStats(void);
|
||||
|
||||
#else
|
||||
#define STAT_INC(opname, name) ((void)0)
|
||||
#define STAT_DEC(opname, name) ((void)0)
|
||||
#define OPCODE_EXE_INC(opname) ((void)0)
|
||||
#define CALL_STAT_INC(name) ((void)0)
|
||||
#define OBJECT_STAT_INC(name) ((void)0)
|
||||
#define OBJECT_STAT_INC_COND(name, cond) ((void)0)
|
||||
#define EVAL_CALL_STAT_INC(name) ((void)0)
|
||||
#define EVAL_CALL_STAT_INC_IF_FUNCTION(name, callable) ((void)0)
|
||||
#define GC_STAT_ADD(gen, name, n) ((void)0)
|
||||
#define OPT_STAT_INC(name) ((void)0)
|
||||
#define UOP_STAT_INC(opname, name) ((void)0)
|
||||
#define UOP_PAIR_INC(uopcode, lastuop) ((void)0)
|
||||
#define OPT_UNSUPPORTED_OPCODE(opname) ((void)0)
|
||||
#define OPT_ERROR_IN_OPCODE(opname) ((void)0)
|
||||
#define OPT_HIST(length, name) ((void)0)
|
||||
#define RARE_EVENT_STAT_INC(name) ((void)0)
|
||||
#endif // !Py_STATS
|
||||
|
||||
// Utility functions for reading/writing 32/64-bit values in the inline caches.
|
||||
// Great care should be taken to ensure that these functions remain correct and
|
||||
// performant! They should compile to just "move" instructions on all supported
|
||||
// compilers and platforms.
|
||||
|
||||
// We use memcpy to let the C compiler handle unaligned accesses and endianness
|
||||
// issues for us. It also seems to produce better code than manual copying for
|
||||
// most compilers (see https://blog.regehr.org/archives/959 for more info).
|
||||
|
||||
static inline void
|
||||
write_u32(uint16_t *p, uint32_t val)
|
||||
{
|
||||
memcpy(p, &val, sizeof(val));
|
||||
}
|
||||
|
||||
static inline void
|
||||
write_u64(uint16_t *p, uint64_t val)
|
||||
{
|
||||
memcpy(p, &val, sizeof(val));
|
||||
}
|
||||
|
||||
static inline void
|
||||
write_obj(uint16_t *p, PyObject *val)
|
||||
{
|
||||
memcpy(p, &val, sizeof(val));
|
||||
}
|
||||
|
||||
static inline uint16_t
|
||||
read_u16(uint16_t *p)
|
||||
{
|
||||
return *p;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
read_u32(uint16_t *p)
|
||||
{
|
||||
uint32_t val;
|
||||
memcpy(&val, p, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
read_u64(uint16_t *p)
|
||||
{
|
||||
uint64_t val;
|
||||
memcpy(&val, p, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline PyObject *
|
||||
read_obj(uint16_t *p)
|
||||
{
|
||||
PyObject *val;
|
||||
memcpy(&val, p, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
/* See Objects/exception_handling_notes.txt for details.
|
||||
*/
|
||||
static inline unsigned char *
|
||||
parse_varint(unsigned char *p, int *result) {
|
||||
int val = p[0] & 63;
|
||||
while (p[0] & 64) {
|
||||
p++;
|
||||
val = (val << 6) | (p[0] & 63);
|
||||
}
|
||||
*result = val;
|
||||
return p+1;
|
||||
}
|
||||
|
||||
static inline int
|
||||
write_varint(uint8_t *ptr, unsigned int val)
|
||||
{
|
||||
int written = 1;
|
||||
while (val >= 64) {
|
||||
*ptr++ = 64 | (val & 63);
|
||||
val >>= 6;
|
||||
written++;
|
||||
}
|
||||
*ptr = (uint8_t)val;
|
||||
return written;
|
||||
}
|
||||
|
||||
static inline int
|
||||
write_signed_varint(uint8_t *ptr, int val)
|
||||
{
|
||||
unsigned int uval;
|
||||
if (val < 0) {
|
||||
// (unsigned int)(-val) has an undefined behavior for INT_MIN
|
||||
uval = ((0 - (unsigned int)val) << 1) | 1;
|
||||
}
|
||||
else {
|
||||
uval = (unsigned int)val << 1;
|
||||
}
|
||||
return write_varint(ptr, uval);
|
||||
}
|
||||
|
||||
static inline int
|
||||
write_location_entry_start(uint8_t *ptr, int code, int length)
|
||||
{
|
||||
assert((code & 15) == code);
|
||||
*ptr = 128 | (uint8_t)(code << 3) | (uint8_t)(length - 1);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/** Counters
|
||||
* The first 16-bit value in each inline cache is a counter.
|
||||
*
|
||||
* When counting executions until the next specialization attempt,
|
||||
* exponential backoff is used to reduce the number of specialization failures.
|
||||
* See pycore_backoff.h for more details.
|
||||
* On a specialization failure, the backoff counter is restarted.
|
||||
*/
|
||||
|
||||
#include "pycore_backoff.h"
|
||||
|
||||
// A value of 1 means that we attempt to specialize the *second* time each
|
||||
// instruction is executed. Executing twice is a much better indicator of
|
||||
// "hotness" than executing once, but additional warmup delays only prevent
|
||||
// specialization. Most types stabilize by the second execution, too:
|
||||
#define ADAPTIVE_WARMUP_VALUE 1
|
||||
#define ADAPTIVE_WARMUP_BACKOFF 1
|
||||
|
||||
// A value of 52 means that we attempt to re-specialize after 53 misses (a prime
|
||||
// number, useful for avoiding artifacts if every nth value is a different type
|
||||
// or something). Setting the backoff to 0 means that the counter is reset to
|
||||
// the same state as a warming-up instruction (value == 1, backoff == 1) after
|
||||
// deoptimization. This isn't strictly necessary, but it is bit easier to reason
|
||||
// about when thinking about the opcode transitions as a state machine:
|
||||
#define ADAPTIVE_COOLDOWN_VALUE 52
|
||||
#define ADAPTIVE_COOLDOWN_BACKOFF 0
|
||||
|
||||
// Can't assert this in pycore_backoff.h because of header order dependencies
|
||||
#if COLD_EXIT_INITIAL_VALUE <= ADAPTIVE_COOLDOWN_VALUE
|
||||
# error "Cold exit value should be larger than adaptive cooldown value"
|
||||
#endif
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
adaptive_counter_bits(uint16_t value, uint16_t backoff) {
|
||||
return make_backoff_counter(value, backoff);
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
adaptive_counter_warmup(void) {
|
||||
return adaptive_counter_bits(ADAPTIVE_WARMUP_VALUE,
|
||||
ADAPTIVE_WARMUP_BACKOFF);
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
adaptive_counter_cooldown(void) {
|
||||
return adaptive_counter_bits(ADAPTIVE_COOLDOWN_VALUE,
|
||||
ADAPTIVE_COOLDOWN_BACKOFF);
|
||||
}
|
||||
|
||||
static inline _Py_BackoffCounter
|
||||
adaptive_counter_backoff(_Py_BackoffCounter counter) {
|
||||
return restart_backoff_counter(counter);
|
||||
}
|
||||
|
||||
|
||||
/* Comparison bit masks. */
|
||||
|
||||
/* Note this evaluates its arguments twice each */
|
||||
#define COMPARISON_BIT(x, y) (1 << (2 * ((x) >= (y)) + ((x) <= (y))))
|
||||
|
||||
/*
|
||||
* The following bits are chosen so that the value of
|
||||
* COMPARSION_BIT(left, right)
|
||||
* masked by the values below will be non-zero if the
|
||||
* comparison is true, and zero if it is false */
|
||||
|
||||
/* This is for values that are unordered, ie. NaN, not types that are unordered, e.g. sets */
|
||||
#define COMPARISON_UNORDERED 1
|
||||
|
||||
#define COMPARISON_LESS_THAN 2
|
||||
#define COMPARISON_GREATER_THAN 4
|
||||
#define COMPARISON_EQUALS 8
|
||||
|
||||
#define COMPARISON_NOT_EQUALS (COMPARISON_UNORDERED | COMPARISON_LESS_THAN | COMPARISON_GREATER_THAN)
|
||||
|
||||
extern int _Py_Instrument(PyCodeObject *co, PyInterpreterState *interp);
|
||||
|
||||
extern int _Py_GetBaseOpcode(PyCodeObject *code, int offset);
|
||||
|
||||
extern int _PyInstruction_GetLength(PyCodeObject *code, int offset);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CODE_H */
|
||||
86
Dependencies/Python/include/internal/pycore_codecs.h
vendored
Normal file
86
Dependencies/Python/include/internal/pycore_codecs.h
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
#ifndef Py_INTERNAL_CODECS_H
|
||||
#define Py_INTERNAL_CODECS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_lock.h" // PyMutex
|
||||
|
||||
/* Initialize codecs-related state for the given interpreter, including
|
||||
registering the first codec search function. Must be called before any other
|
||||
PyCodec-related functions, and while only one thread is active. */
|
||||
extern PyStatus _PyCodec_InitRegistry(PyInterpreterState *interp);
|
||||
|
||||
/* Finalize codecs-related state for the given interpreter. No PyCodec-related
|
||||
functions other than PyCodec_Unregister() may be called after this. */
|
||||
extern void _PyCodec_Fini(PyInterpreterState *interp);
|
||||
|
||||
extern PyObject* _PyCodec_Lookup(const char *encoding);
|
||||
|
||||
/* Text codec specific encoding and decoding API.
|
||||
|
||||
Checks the encoding against a list of codecs which do not
|
||||
implement a str<->bytes encoding before attempting the
|
||||
operation.
|
||||
|
||||
Please note that these APIs are internal and should not
|
||||
be used in Python C extensions.
|
||||
|
||||
XXX (ncoghlan): should we make these, or something like them, public
|
||||
in Python 3.5+?
|
||||
|
||||
*/
|
||||
extern PyObject* _PyCodec_LookupTextEncoding(
|
||||
const char *encoding,
|
||||
const char *alternate_command);
|
||||
|
||||
extern PyObject* _PyCodec_EncodeText(
|
||||
PyObject *object,
|
||||
const char *encoding,
|
||||
const char *errors);
|
||||
|
||||
extern PyObject* _PyCodec_DecodeText(
|
||||
PyObject *object,
|
||||
const char *encoding,
|
||||
const char *errors);
|
||||
|
||||
/* These two aren't actually text encoding specific, but _io.TextIOWrapper
|
||||
* is the only current API consumer.
|
||||
*/
|
||||
extern PyObject* _PyCodecInfo_GetIncrementalDecoder(
|
||||
PyObject *codec_info,
|
||||
const char *errors);
|
||||
|
||||
extern PyObject* _PyCodecInfo_GetIncrementalEncoder(
|
||||
PyObject *codec_info,
|
||||
const char *errors);
|
||||
|
||||
// Per-interpreter state used by codecs.c.
|
||||
struct codecs_state {
|
||||
// A list of callable objects used to search for codecs.
|
||||
PyObject *search_path;
|
||||
|
||||
// A dict mapping codec names to codecs returned from a callable in
|
||||
// search_path.
|
||||
PyObject *search_cache;
|
||||
|
||||
// A dict mapping error handling strategies to functions to implement them.
|
||||
PyObject *error_registry;
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// Used to safely delete a specific item from search_path.
|
||||
PyMutex search_path_mutex;
|
||||
#endif
|
||||
|
||||
// Whether or not the rest of the state is initialized.
|
||||
int initialized;
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CODECS_H */
|
||||
118
Dependencies/Python/include/internal/pycore_compile.h
vendored
Normal file
118
Dependencies/Python/include/internal/pycore_compile.h
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
#ifndef Py_INTERNAL_COMPILE_H
|
||||
#define Py_INTERNAL_COMPILE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_symtable.h" // _Py_SourceLocation
|
||||
#include "pycore_instruction_sequence.h"
|
||||
|
||||
struct _arena; // Type defined in pycore_pyarena.h
|
||||
struct _mod; // Type defined in pycore_ast.h
|
||||
|
||||
// Export for 'test_peg_generator' shared extension
|
||||
PyAPI_FUNC(PyCodeObject*) _PyAST_Compile(
|
||||
struct _mod *mod,
|
||||
PyObject *filename,
|
||||
PyCompilerFlags *flags,
|
||||
int optimize,
|
||||
struct _arena *arena);
|
||||
|
||||
/* AST optimizations */
|
||||
extern int _PyCompile_AstOptimize(
|
||||
struct _mod *mod,
|
||||
PyObject *filename,
|
||||
PyCompilerFlags *flags,
|
||||
int optimize,
|
||||
struct _arena *arena);
|
||||
|
||||
struct _Py_SourceLocation;
|
||||
|
||||
extern int _PyAST_Optimize(
|
||||
struct _mod *,
|
||||
struct _arena *arena,
|
||||
int optimize,
|
||||
int ff_features);
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject *u_name;
|
||||
PyObject *u_qualname; /* dot-separated qualified name (lazy) */
|
||||
|
||||
/* The following fields are dicts that map objects to
|
||||
the index of them in co_XXX. The index is used as
|
||||
the argument for opcodes that refer to those collections.
|
||||
*/
|
||||
PyObject *u_consts; /* all constants */
|
||||
PyObject *u_names; /* all names */
|
||||
PyObject *u_varnames; /* local variables */
|
||||
PyObject *u_cellvars; /* cell variables */
|
||||
PyObject *u_freevars; /* free variables */
|
||||
PyObject *u_fasthidden; /* dict; keys are names that are fast-locals only
|
||||
temporarily within an inlined comprehension. When
|
||||
value is True, treat as fast-local. */
|
||||
|
||||
Py_ssize_t u_argcount; /* number of arguments for block */
|
||||
Py_ssize_t u_posonlyargcount; /* number of positional only arguments for block */
|
||||
Py_ssize_t u_kwonlyargcount; /* number of keyword only arguments for block */
|
||||
|
||||
int u_firstlineno; /* the first lineno of the block */
|
||||
} _PyCompile_CodeUnitMetadata;
|
||||
|
||||
|
||||
/* Utility for a number of growing arrays used in the compiler */
|
||||
int _PyCompile_EnsureArrayLargeEnough(
|
||||
int idx,
|
||||
void **array,
|
||||
int *alloc,
|
||||
int default_alloc,
|
||||
size_t item_size);
|
||||
|
||||
int _PyCompile_ConstCacheMergeOne(PyObject *const_cache, PyObject **obj);
|
||||
|
||||
|
||||
// Export for '_opcode' extension module
|
||||
PyAPI_FUNC(int) _PyCompile_OpcodeIsValid(int opcode);
|
||||
PyAPI_FUNC(int) _PyCompile_OpcodeHasArg(int opcode);
|
||||
PyAPI_FUNC(int) _PyCompile_OpcodeHasConst(int opcode);
|
||||
PyAPI_FUNC(int) _PyCompile_OpcodeHasName(int opcode);
|
||||
PyAPI_FUNC(int) _PyCompile_OpcodeHasJump(int opcode);
|
||||
PyAPI_FUNC(int) _PyCompile_OpcodeHasFree(int opcode);
|
||||
PyAPI_FUNC(int) _PyCompile_OpcodeHasLocal(int opcode);
|
||||
PyAPI_FUNC(int) _PyCompile_OpcodeHasExc(int opcode);
|
||||
|
||||
PyAPI_FUNC(PyObject*) _PyCompile_GetUnaryIntrinsicName(int index);
|
||||
PyAPI_FUNC(PyObject*) _PyCompile_GetBinaryIntrinsicName(int index);
|
||||
|
||||
/* Access compiler internals for unit testing */
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyCompile_CleanDoc(PyObject *doc);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyCompile_CodeGen(
|
||||
PyObject *ast,
|
||||
PyObject *filename,
|
||||
PyCompilerFlags *flags,
|
||||
int optimize,
|
||||
int compile_mode);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyCompile_OptimizeCfg(
|
||||
PyObject *instructions,
|
||||
PyObject *consts,
|
||||
int nlocals);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(PyCodeObject*)
|
||||
_PyCompile_Assemble(_PyCompile_CodeUnitMetadata *umd, PyObject *filename,
|
||||
PyObject *instructions);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_COMPILE_H */
|
||||
25
Dependencies/Python/include/internal/pycore_complexobject.h
vendored
Normal file
25
Dependencies/Python/include/internal/pycore_complexobject.h
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
#ifndef Py_INTERNAL_COMPLEXOBJECT_H
|
||||
#define Py_INTERNAL_COMPLEXOBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_unicodeobject.h" // _PyUnicodeWriter
|
||||
|
||||
/* Format the object based on the format_spec, as defined in PEP 3101
|
||||
(Advanced String Formatting). */
|
||||
extern int _PyComplex_FormatAdvancedWriter(
|
||||
_PyUnicodeWriter *writer,
|
||||
PyObject *obj,
|
||||
PyObject *format_spec,
|
||||
Py_ssize_t start,
|
||||
Py_ssize_t end);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_COMPLEXOBJECT_H
|
||||
93
Dependencies/Python/include/internal/pycore_condvar.h
vendored
Normal file
93
Dependencies/Python/include/internal/pycore_condvar.h
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
#ifndef Py_INTERNAL_CONDVAR_H
|
||||
#define Py_INTERNAL_CONDVAR_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_pythread.h" // _POSIX_THREADS
|
||||
|
||||
|
||||
#ifdef _POSIX_THREADS
|
||||
/*
|
||||
* POSIX support
|
||||
*/
|
||||
#define Py_HAVE_CONDVAR
|
||||
|
||||
#ifdef HAVE_PTHREAD_H
|
||||
# include <pthread.h> // pthread_mutex_t
|
||||
#endif
|
||||
|
||||
#define PyMUTEX_T pthread_mutex_t
|
||||
#define PyCOND_T pthread_cond_t
|
||||
|
||||
#elif defined(NT_THREADS)
|
||||
/*
|
||||
* Windows (XP, 2003 server and later, as well as (hopefully) CE) support
|
||||
*
|
||||
* Emulated condition variables ones that work with XP and later, plus
|
||||
* example native support on VISTA and onwards.
|
||||
*/
|
||||
#define Py_HAVE_CONDVAR
|
||||
|
||||
/* include windows if it hasn't been done before */
|
||||
#ifndef WIN32_LEAN_AND_MEAN
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
#endif
|
||||
#include <windows.h> // CRITICAL_SECTION
|
||||
|
||||
/* options */
|
||||
/* emulated condition variables are provided for those that want
|
||||
* to target Windows XP or earlier. Modify this macro to enable them.
|
||||
*/
|
||||
#ifndef _PY_EMULATED_WIN_CV
|
||||
#define _PY_EMULATED_WIN_CV 0 /* use non-emulated condition variables */
|
||||
#endif
|
||||
|
||||
/* fall back to emulation if targeting earlier than Vista */
|
||||
#if !defined NTDDI_VISTA || NTDDI_VERSION < NTDDI_VISTA
|
||||
#undef _PY_EMULATED_WIN_CV
|
||||
#define _PY_EMULATED_WIN_CV 1
|
||||
#endif
|
||||
|
||||
#if _PY_EMULATED_WIN_CV
|
||||
|
||||
typedef CRITICAL_SECTION PyMUTEX_T;
|
||||
|
||||
/* The ConditionVariable object. From XP onwards it is easily emulated
|
||||
with a Semaphore.
|
||||
Semaphores are available on Windows XP (2003 server) and later.
|
||||
We use a Semaphore rather than an auto-reset event, because although
|
||||
an auto-reset event might appear to solve the lost-wakeup bug (race
|
||||
condition between releasing the outer lock and waiting) because it
|
||||
maintains state even though a wait hasn't happened, there is still
|
||||
a lost wakeup problem if more than one thread are interrupted in the
|
||||
critical place. A semaphore solves that, because its state is
|
||||
counted, not Boolean.
|
||||
Because it is ok to signal a condition variable with no one
|
||||
waiting, we need to keep track of the number of
|
||||
waiting threads. Otherwise, the semaphore's state could rise
|
||||
without bound. This also helps reduce the number of "spurious wakeups"
|
||||
that would otherwise happen.
|
||||
*/
|
||||
|
||||
typedef struct _PyCOND_T
|
||||
{
|
||||
HANDLE sem;
|
||||
int waiting; /* to allow PyCOND_SIGNAL to be a no-op */
|
||||
} PyCOND_T;
|
||||
|
||||
#else /* !_PY_EMULATED_WIN_CV */
|
||||
|
||||
/* Use native Windows primitives if build target is Vista or higher */
|
||||
|
||||
/* SRWLOCK is faster and better than CriticalSection */
|
||||
typedef SRWLOCK PyMUTEX_T;
|
||||
|
||||
typedef CONDITION_VARIABLE PyCOND_T;
|
||||
|
||||
#endif /* _PY_EMULATED_WIN_CV */
|
||||
|
||||
#endif /* _POSIX_THREADS, NT_THREADS */
|
||||
|
||||
#endif /* Py_INTERNAL_CONDVAR_H */
|
||||
61
Dependencies/Python/include/internal/pycore_context.h
vendored
Normal file
61
Dependencies/Python/include/internal/pycore_context.h
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
#ifndef Py_INTERNAL_CONTEXT_H
|
||||
#define Py_INTERNAL_CONTEXT_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_freelist.h" // _PyFreeListState
|
||||
#include "pycore_hamt.h" // PyHamtObject
|
||||
|
||||
|
||||
extern PyTypeObject _PyContextTokenMissing_Type;
|
||||
|
||||
/* runtime lifecycle */
|
||||
|
||||
PyStatus _PyContext_Init(PyInterpreterState *);
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
} _PyContextTokenMissing;
|
||||
|
||||
struct _pycontextobject {
|
||||
PyObject_HEAD
|
||||
PyContext *ctx_prev;
|
||||
PyHamtObject *ctx_vars;
|
||||
PyObject *ctx_weakreflist;
|
||||
int ctx_entered;
|
||||
};
|
||||
|
||||
|
||||
struct _pycontextvarobject {
|
||||
PyObject_HEAD
|
||||
PyObject *var_name;
|
||||
PyObject *var_default;
|
||||
#ifndef Py_GIL_DISABLED
|
||||
PyObject *var_cached;
|
||||
uint64_t var_cached_tsid;
|
||||
uint64_t var_cached_tsver;
|
||||
#endif
|
||||
Py_hash_t var_hash;
|
||||
};
|
||||
|
||||
|
||||
struct _pycontexttokenobject {
|
||||
PyObject_HEAD
|
||||
PyContext *tok_ctx;
|
||||
PyContextVar *tok_var;
|
||||
PyObject *tok_oldval;
|
||||
int tok_used;
|
||||
};
|
||||
|
||||
|
||||
// _testinternalcapi.hamt() used by tests.
|
||||
// Export for '_testcapi' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyContext_NewHamtForTests(void);
|
||||
|
||||
|
||||
#endif /* !Py_INTERNAL_CONTEXT_H */
|
||||
233
Dependencies/Python/include/internal/pycore_critical_section.h
vendored
Normal file
233
Dependencies/Python/include/internal/pycore_critical_section.h
vendored
Normal file
@@ -0,0 +1,233 @@
|
||||
#ifndef Py_INTERNAL_CRITICAL_SECTION_H
|
||||
#define Py_INTERNAL_CRITICAL_SECTION_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_lock.h" // PyMutex
|
||||
#include "pycore_pystate.h" // _PyThreadState_GET()
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// Tagged pointers to critical sections use the two least significant bits to
|
||||
// mark if the pointed-to critical section is inactive and whether it is a
|
||||
// PyCriticalSection2 object.
|
||||
#define _Py_CRITICAL_SECTION_INACTIVE 0x1
|
||||
#define _Py_CRITICAL_SECTION_TWO_MUTEXES 0x2
|
||||
#define _Py_CRITICAL_SECTION_MASK 0x3
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
# define Py_BEGIN_CRITICAL_SECTION_MUT(mutex) \
|
||||
{ \
|
||||
PyCriticalSection _py_cs; \
|
||||
_PyCriticalSection_BeginMutex(&_py_cs, mutex)
|
||||
|
||||
# define Py_BEGIN_CRITICAL_SECTION2_MUT(m1, m2) \
|
||||
{ \
|
||||
PyCriticalSection2 _py_cs2; \
|
||||
_PyCriticalSection2_BeginMutex(&_py_cs2, m1, m2)
|
||||
|
||||
// Specialized version of critical section locking to safely use
|
||||
// PySequence_Fast APIs without the GIL. For performance, the argument *to*
|
||||
// PySequence_Fast() is provided to the macro, not the *result* of
|
||||
// PySequence_Fast(), which would require an extra test to determine if the
|
||||
// lock must be acquired.
|
||||
# define Py_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) \
|
||||
{ \
|
||||
PyObject *_orig_seq = _PyObject_CAST(original); \
|
||||
const bool _should_lock_cs = PyList_CheckExact(_orig_seq); \
|
||||
PyCriticalSection _cs; \
|
||||
if (_should_lock_cs) { \
|
||||
_PyCriticalSection_Begin(&_cs, _orig_seq); \
|
||||
}
|
||||
|
||||
# define Py_END_CRITICAL_SECTION_SEQUENCE_FAST() \
|
||||
if (_should_lock_cs) { \
|
||||
PyCriticalSection_End(&_cs); \
|
||||
} \
|
||||
}
|
||||
|
||||
// Asserts that the mutex is locked. The mutex must be held by the
|
||||
// top-most critical section otherwise there's the possibility
|
||||
// that the mutex would be swalled out in some code paths.
|
||||
#define _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(mutex) \
|
||||
_PyCriticalSection_AssertHeld(mutex)
|
||||
|
||||
// Asserts that the mutex for the given object is locked. The mutex must
|
||||
// be held by the top-most critical section otherwise there's the
|
||||
// possibility that the mutex would be swalled out in some code paths.
|
||||
#ifdef Py_DEBUG
|
||||
|
||||
# define _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op) \
|
||||
if (Py_REFCNT(op) != 1) { \
|
||||
_Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(&_PyObject_CAST(op)->ob_mutex); \
|
||||
}
|
||||
|
||||
#else /* Py_DEBUG */
|
||||
|
||||
# define _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op)
|
||||
|
||||
#endif /* Py_DEBUG */
|
||||
|
||||
#else /* !Py_GIL_DISABLED */
|
||||
// The critical section APIs are no-ops with the GIL.
|
||||
# define Py_BEGIN_CRITICAL_SECTION_MUT(mut) {
|
||||
# define Py_BEGIN_CRITICAL_SECTION2_MUT(m1, m2) {
|
||||
# define Py_BEGIN_CRITICAL_SECTION_SEQUENCE_FAST(original) {
|
||||
# define Py_END_CRITICAL_SECTION_SEQUENCE_FAST() }
|
||||
# define _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(mutex)
|
||||
# define _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op)
|
||||
#endif /* !Py_GIL_DISABLED */
|
||||
|
||||
// Resumes the top-most critical section.
|
||||
PyAPI_FUNC(void)
|
||||
_PyCriticalSection_Resume(PyThreadState *tstate);
|
||||
|
||||
// (private) slow path for locking the mutex
|
||||
PyAPI_FUNC(void)
|
||||
_PyCriticalSection_BeginSlow(PyCriticalSection *c, PyMutex *m);
|
||||
|
||||
PyAPI_FUNC(void)
|
||||
_PyCriticalSection2_BeginSlow(PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2,
|
||||
int is_m1_locked);
|
||||
|
||||
PyAPI_FUNC(void)
|
||||
_PyCriticalSection_SuspendAll(PyThreadState *tstate);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
static inline int
|
||||
_PyCriticalSection_IsActive(uintptr_t tag)
|
||||
{
|
||||
return tag != 0 && (tag & _Py_CRITICAL_SECTION_INACTIVE) == 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyCriticalSection_BeginMutex(PyCriticalSection *c, PyMutex *m)
|
||||
{
|
||||
if (PyMutex_LockFast(&m->_bits)) {
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
c->_cs_mutex = m;
|
||||
c->_cs_prev = tstate->critical_section;
|
||||
tstate->critical_section = (uintptr_t)c;
|
||||
}
|
||||
else {
|
||||
_PyCriticalSection_BeginSlow(c, m);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyCriticalSection_Begin(PyCriticalSection *c, PyObject *op)
|
||||
{
|
||||
_PyCriticalSection_BeginMutex(c, &op->ob_mutex);
|
||||
}
|
||||
#define PyCriticalSection_Begin _PyCriticalSection_Begin
|
||||
|
||||
// Removes the top-most critical section from the thread's stack of critical
|
||||
// sections. If the new top-most critical section is inactive, then it is
|
||||
// resumed.
|
||||
static inline void
|
||||
_PyCriticalSection_Pop(PyCriticalSection *c)
|
||||
{
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
uintptr_t prev = c->_cs_prev;
|
||||
tstate->critical_section = prev;
|
||||
|
||||
if ((prev & _Py_CRITICAL_SECTION_INACTIVE) != 0) {
|
||||
_PyCriticalSection_Resume(tstate);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyCriticalSection_End(PyCriticalSection *c)
|
||||
{
|
||||
PyMutex_Unlock(c->_cs_mutex);
|
||||
_PyCriticalSection_Pop(c);
|
||||
}
|
||||
#define PyCriticalSection_End _PyCriticalSection_End
|
||||
|
||||
static inline void
|
||||
_PyCriticalSection2_BeginMutex(PyCriticalSection2 *c, PyMutex *m1, PyMutex *m2)
|
||||
{
|
||||
if (m1 == m2) {
|
||||
// If the two mutex arguments are the same, treat this as a critical
|
||||
// section with a single mutex.
|
||||
c->_cs_mutex2 = NULL;
|
||||
_PyCriticalSection_BeginMutex(&c->_cs_base, m1);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((uintptr_t)m2 < (uintptr_t)m1) {
|
||||
// Sort the mutexes so that the lower address is locked first.
|
||||
// The exact order does not matter, but we need to acquire the mutexes
|
||||
// in a consistent order to avoid lock ordering deadlocks.
|
||||
PyMutex *tmp = m1;
|
||||
m1 = m2;
|
||||
m2 = tmp;
|
||||
}
|
||||
|
||||
if (PyMutex_LockFast(&m1->_bits)) {
|
||||
if (PyMutex_LockFast(&m2->_bits)) {
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
c->_cs_base._cs_mutex = m1;
|
||||
c->_cs_mutex2 = m2;
|
||||
c->_cs_base._cs_prev = tstate->critical_section;
|
||||
|
||||
uintptr_t p = (uintptr_t)c | _Py_CRITICAL_SECTION_TWO_MUTEXES;
|
||||
tstate->critical_section = p;
|
||||
}
|
||||
else {
|
||||
_PyCriticalSection2_BeginSlow(c, m1, m2, 1);
|
||||
}
|
||||
}
|
||||
else {
|
||||
_PyCriticalSection2_BeginSlow(c, m1, m2, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyCriticalSection2_Begin(PyCriticalSection2 *c, PyObject *a, PyObject *b)
|
||||
{
|
||||
_PyCriticalSection2_BeginMutex(c, &a->ob_mutex, &b->ob_mutex);
|
||||
}
|
||||
#define PyCriticalSection2_Begin _PyCriticalSection2_Begin
|
||||
|
||||
static inline void
|
||||
_PyCriticalSection2_End(PyCriticalSection2 *c)
|
||||
{
|
||||
if (c->_cs_mutex2) {
|
||||
PyMutex_Unlock(c->_cs_mutex2);
|
||||
}
|
||||
PyMutex_Unlock(c->_cs_base._cs_mutex);
|
||||
_PyCriticalSection_Pop(&c->_cs_base);
|
||||
}
|
||||
#define PyCriticalSection2_End _PyCriticalSection2_End
|
||||
|
||||
static inline void
|
||||
_PyCriticalSection_AssertHeld(PyMutex *mutex)
|
||||
{
|
||||
#ifdef Py_DEBUG
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
uintptr_t prev = tstate->critical_section;
|
||||
if (prev & _Py_CRITICAL_SECTION_TWO_MUTEXES) {
|
||||
PyCriticalSection2 *cs = (PyCriticalSection2 *)(prev & ~_Py_CRITICAL_SECTION_MASK);
|
||||
assert(cs != NULL && (cs->_cs_base._cs_mutex == mutex || cs->_cs_mutex2 == mutex));
|
||||
}
|
||||
else {
|
||||
PyCriticalSection *cs = (PyCriticalSection *)(tstate->critical_section & ~_Py_CRITICAL_SECTION_MASK);
|
||||
assert(cs != NULL && cs->_cs_mutex == mutex);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* Py_GIL_DISABLED */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CRITICAL_SECTION_H */
|
||||
340
Dependencies/Python/include/internal/pycore_crossinterp.h
vendored
Normal file
340
Dependencies/Python/include/internal/pycore_crossinterp.h
vendored
Normal file
@@ -0,0 +1,340 @@
|
||||
#ifndef Py_INTERNAL_CROSSINTERP_H
|
||||
#define Py_INTERNAL_CROSSINTERP_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_lock.h" // PyMutex
|
||||
#include "pycore_pyerrors.h"
|
||||
|
||||
/**************/
|
||||
/* exceptions */
|
||||
/**************/
|
||||
|
||||
PyAPI_DATA(PyObject *) PyExc_InterpreterError;
|
||||
PyAPI_DATA(PyObject *) PyExc_InterpreterNotFoundError;
|
||||
|
||||
|
||||
/***************************/
|
||||
/* cross-interpreter calls */
|
||||
/***************************/
|
||||
|
||||
typedef int (*_Py_simple_func)(void *);
|
||||
extern int _Py_CallInInterpreter(
|
||||
PyInterpreterState *interp,
|
||||
_Py_simple_func func,
|
||||
void *arg);
|
||||
extern int _Py_CallInInterpreterAndRawFree(
|
||||
PyInterpreterState *interp,
|
||||
_Py_simple_func func,
|
||||
void *arg);
|
||||
|
||||
|
||||
/**************************/
|
||||
/* cross-interpreter data */
|
||||
/**************************/
|
||||
|
||||
typedef struct _xid _PyCrossInterpreterData;
|
||||
typedef PyObject *(*xid_newobjectfunc)(_PyCrossInterpreterData *);
|
||||
typedef void (*xid_freefunc)(void *);
|
||||
|
||||
// _PyCrossInterpreterData is similar to Py_buffer as an effectively
|
||||
// opaque struct that holds data outside the object machinery. This
|
||||
// is necessary to pass safely between interpreters in the same process.
|
||||
struct _xid {
|
||||
// data is the cross-interpreter-safe derivation of a Python object
|
||||
// (see _PyObject_GetCrossInterpreterData). It will be NULL if the
|
||||
// new_object func (below) encodes the data.
|
||||
void *data;
|
||||
// obj is the Python object from which the data was derived. This
|
||||
// is non-NULL only if the data remains bound to the object in some
|
||||
// way, such that the object must be "released" (via a decref) when
|
||||
// the data is released. In that case the code that sets the field,
|
||||
// likely a registered "crossinterpdatafunc", is responsible for
|
||||
// ensuring it owns the reference (i.e. incref).
|
||||
PyObject *obj;
|
||||
// interp is the ID of the owning interpreter of the original
|
||||
// object. It corresponds to the active interpreter when
|
||||
// _PyObject_GetCrossInterpreterData() was called. This should only
|
||||
// be set by the cross-interpreter machinery.
|
||||
//
|
||||
// We use the ID rather than the PyInterpreterState to avoid issues
|
||||
// with deleted interpreters. Note that IDs are never re-used, so
|
||||
// each one will always correspond to a specific interpreter
|
||||
// (whether still alive or not).
|
||||
int64_t interpid;
|
||||
// new_object is a function that returns a new object in the current
|
||||
// interpreter given the data. The resulting object (a new
|
||||
// reference) will be equivalent to the original object. This field
|
||||
// is required.
|
||||
xid_newobjectfunc new_object;
|
||||
// free is called when the data is released. If it is NULL then
|
||||
// nothing will be done to free the data. For some types this is
|
||||
// okay (e.g. bytes) and for those types this field should be set
|
||||
// to NULL. However, for most the data was allocated just for
|
||||
// cross-interpreter use, so it must be freed when
|
||||
// _PyCrossInterpreterData_Release is called or the memory will
|
||||
// leak. In that case, at the very least this field should be set
|
||||
// to PyMem_RawFree (the default if not explicitly set to NULL).
|
||||
// The call will happen with the original interpreter activated.
|
||||
xid_freefunc free;
|
||||
};
|
||||
|
||||
PyAPI_FUNC(_PyCrossInterpreterData *) _PyCrossInterpreterData_New(void);
|
||||
PyAPI_FUNC(void) _PyCrossInterpreterData_Free(_PyCrossInterpreterData *data);
|
||||
|
||||
#define _PyCrossInterpreterData_DATA(DATA) ((DATA)->data)
|
||||
#define _PyCrossInterpreterData_OBJ(DATA) ((DATA)->obj)
|
||||
#define _PyCrossInterpreterData_INTERPID(DATA) ((DATA)->interpid)
|
||||
// Users should not need getters for "new_object" or "free".
|
||||
|
||||
|
||||
/* defining cross-interpreter data */
|
||||
|
||||
PyAPI_FUNC(void) _PyCrossInterpreterData_Init(
|
||||
_PyCrossInterpreterData *data,
|
||||
PyInterpreterState *interp, void *shared, PyObject *obj,
|
||||
xid_newobjectfunc new_object);
|
||||
PyAPI_FUNC(int) _PyCrossInterpreterData_InitWithSize(
|
||||
_PyCrossInterpreterData *,
|
||||
PyInterpreterState *interp, const size_t, PyObject *,
|
||||
xid_newobjectfunc);
|
||||
PyAPI_FUNC(void) _PyCrossInterpreterData_Clear(
|
||||
PyInterpreterState *, _PyCrossInterpreterData *);
|
||||
|
||||
// Normally the Init* functions are sufficient. The only time
|
||||
// additional initialization might be needed is to set the "free" func,
|
||||
// though that should be infrequent.
|
||||
#define _PyCrossInterpreterData_SET_FREE(DATA, FUNC) \
|
||||
do { \
|
||||
(DATA)->free = (FUNC); \
|
||||
} while (0)
|
||||
// Additionally, some shareable types are essentially light wrappers
|
||||
// around other shareable types. The crossinterpdatafunc of the wrapper
|
||||
// can often be implemented by calling the wrapped object's
|
||||
// crossinterpdatafunc and then changing the "new_object" function.
|
||||
// We have _PyCrossInterpreterData_SET_NEW_OBJECT() here for that,
|
||||
// but might be better to have a function like
|
||||
// _PyCrossInterpreterData_AdaptToWrapper() instead.
|
||||
#define _PyCrossInterpreterData_SET_NEW_OBJECT(DATA, FUNC) \
|
||||
do { \
|
||||
(DATA)->new_object = (FUNC); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* using cross-interpreter data */
|
||||
|
||||
PyAPI_FUNC(int) _PyObject_CheckCrossInterpreterData(PyObject *);
|
||||
PyAPI_FUNC(int) _PyObject_GetCrossInterpreterData(PyObject *, _PyCrossInterpreterData *);
|
||||
PyAPI_FUNC(PyObject *) _PyCrossInterpreterData_NewObject(_PyCrossInterpreterData *);
|
||||
PyAPI_FUNC(int) _PyCrossInterpreterData_Release(_PyCrossInterpreterData *);
|
||||
PyAPI_FUNC(int) _PyCrossInterpreterData_ReleaseAndRawFree(_PyCrossInterpreterData *);
|
||||
|
||||
|
||||
/* cross-interpreter data registry */
|
||||
|
||||
// For now we use a global registry of shareable classes. An
|
||||
// alternative would be to add a tp_* slot for a class's
|
||||
// crossinterpdatafunc. It would be simpler and more efficient.
|
||||
|
||||
typedef int (*crossinterpdatafunc)(PyThreadState *tstate, PyObject *,
|
||||
_PyCrossInterpreterData *);
|
||||
|
||||
struct _xidregitem;
|
||||
|
||||
struct _xidregitem {
|
||||
struct _xidregitem *prev;
|
||||
struct _xidregitem *next;
|
||||
/* This can be a dangling pointer, but only if weakref is set. */
|
||||
PyTypeObject *cls;
|
||||
/* This is NULL for builtin types. */
|
||||
PyObject *weakref;
|
||||
size_t refcount;
|
||||
crossinterpdatafunc getdata;
|
||||
};
|
||||
|
||||
struct _xidregistry {
|
||||
int global; /* builtin types or heap types */
|
||||
int initialized;
|
||||
PyMutex mutex;
|
||||
struct _xidregitem *head;
|
||||
};
|
||||
|
||||
PyAPI_FUNC(int) _PyCrossInterpreterData_RegisterClass(PyTypeObject *, crossinterpdatafunc);
|
||||
PyAPI_FUNC(int) _PyCrossInterpreterData_UnregisterClass(PyTypeObject *);
|
||||
PyAPI_FUNC(crossinterpdatafunc) _PyCrossInterpreterData_Lookup(PyObject *);
|
||||
|
||||
|
||||
/*****************************/
|
||||
/* runtime state & lifecycle */
|
||||
/*****************************/
|
||||
|
||||
struct _xi_runtime_state {
|
||||
// builtin types
|
||||
// XXX Remove this field once we have a tp_* slot.
|
||||
struct _xidregistry registry;
|
||||
};
|
||||
|
||||
struct _xi_state {
|
||||
// heap types
|
||||
// XXX Remove this field once we have a tp_* slot.
|
||||
struct _xidregistry registry;
|
||||
|
||||
// heap types
|
||||
PyObject *PyExc_NotShareableError;
|
||||
};
|
||||
|
||||
extern PyStatus _PyXI_Init(PyInterpreterState *interp);
|
||||
extern void _PyXI_Fini(PyInterpreterState *interp);
|
||||
|
||||
extern PyStatus _PyXI_InitTypes(PyInterpreterState *interp);
|
||||
extern void _PyXI_FiniTypes(PyInterpreterState *interp);
|
||||
|
||||
#define _PyInterpreterState_GetXIState(interp) (&(interp)->xi)
|
||||
|
||||
|
||||
/***************************/
|
||||
/* short-term data sharing */
|
||||
/***************************/
|
||||
|
||||
// Ultimately we'd like to preserve enough information about the
|
||||
// exception and traceback that we could re-constitute (or at least
|
||||
// simulate, a la traceback.TracebackException), and even chain, a copy
|
||||
// of the exception in the calling interpreter.
|
||||
|
||||
typedef struct _excinfo {
|
||||
struct _excinfo_type {
|
||||
PyTypeObject *builtin;
|
||||
const char *name;
|
||||
const char *qualname;
|
||||
const char *module;
|
||||
} type;
|
||||
const char *msg;
|
||||
const char *errdisplay;
|
||||
} _PyXI_excinfo;
|
||||
|
||||
PyAPI_FUNC(int) _PyXI_InitExcInfo(_PyXI_excinfo *info, PyObject *exc);
|
||||
PyAPI_FUNC(PyObject *) _PyXI_FormatExcInfo(_PyXI_excinfo *info);
|
||||
PyAPI_FUNC(PyObject *) _PyXI_ExcInfoAsObject(_PyXI_excinfo *info);
|
||||
PyAPI_FUNC(void) _PyXI_ClearExcInfo(_PyXI_excinfo *info);
|
||||
|
||||
|
||||
typedef enum error_code {
|
||||
_PyXI_ERR_NO_ERROR = 0,
|
||||
_PyXI_ERR_UNCAUGHT_EXCEPTION = -1,
|
||||
_PyXI_ERR_OTHER = -2,
|
||||
_PyXI_ERR_NO_MEMORY = -3,
|
||||
_PyXI_ERR_ALREADY_RUNNING = -4,
|
||||
_PyXI_ERR_MAIN_NS_FAILURE = -5,
|
||||
_PyXI_ERR_APPLY_NS_FAILURE = -6,
|
||||
_PyXI_ERR_NOT_SHAREABLE = -7,
|
||||
} _PyXI_errcode;
|
||||
|
||||
|
||||
typedef struct _sharedexception {
|
||||
// The originating interpreter.
|
||||
PyInterpreterState *interp;
|
||||
// The kind of error to propagate.
|
||||
_PyXI_errcode code;
|
||||
// The exception information to propagate, if applicable.
|
||||
// This is populated only for some error codes,
|
||||
// but always for _PyXI_ERR_UNCAUGHT_EXCEPTION.
|
||||
_PyXI_excinfo uncaught;
|
||||
} _PyXI_error;
|
||||
|
||||
PyAPI_FUNC(PyObject *) _PyXI_ApplyError(_PyXI_error *err);
|
||||
|
||||
|
||||
typedef struct xi_session _PyXI_session;
|
||||
typedef struct _sharedns _PyXI_namespace;
|
||||
|
||||
PyAPI_FUNC(void) _PyXI_FreeNamespace(_PyXI_namespace *ns);
|
||||
PyAPI_FUNC(_PyXI_namespace *) _PyXI_NamespaceFromNames(PyObject *names);
|
||||
PyAPI_FUNC(int) _PyXI_FillNamespaceFromDict(
|
||||
_PyXI_namespace *ns,
|
||||
PyObject *nsobj,
|
||||
_PyXI_session *session);
|
||||
PyAPI_FUNC(int) _PyXI_ApplyNamespace(
|
||||
_PyXI_namespace *ns,
|
||||
PyObject *nsobj,
|
||||
PyObject *dflt);
|
||||
|
||||
|
||||
// A cross-interpreter session involves entering an interpreter
|
||||
// (_PyXI_Enter()), doing some work with it, and finally exiting
|
||||
// that interpreter (_PyXI_Exit()).
|
||||
//
|
||||
// At the boundaries of the session, both entering and exiting,
|
||||
// data may be exchanged between the previous interpreter and the
|
||||
// target one in a thread-safe way that does not violate the
|
||||
// isolation between interpreters. This includes setting objects
|
||||
// in the target's __main__ module on the way in, and capturing
|
||||
// uncaught exceptions on the way out.
|
||||
struct xi_session {
|
||||
// Once a session has been entered, this is the tstate that was
|
||||
// current before the session. If it is different from cur_tstate
|
||||
// then we must have switched interpreters. Either way, this will
|
||||
// be the current tstate once we exit the session.
|
||||
PyThreadState *prev_tstate;
|
||||
// Once a session has been entered, this is the current tstate.
|
||||
// It must be current when the session exits.
|
||||
PyThreadState *init_tstate;
|
||||
// This is true if init_tstate needs cleanup during exit.
|
||||
int own_init_tstate;
|
||||
|
||||
// This is true if, while entering the session, init_thread took
|
||||
// "ownership" of the interpreter's __main__ module. This means
|
||||
// it is the only thread that is allowed to run code there.
|
||||
// (Caveat: for now, users may still run exec() against the
|
||||
// __main__ module's dict, though that isn't advisable.)
|
||||
int running;
|
||||
// This is a cached reference to the __dict__ of the entered
|
||||
// interpreter's __main__ module. It is looked up when at the
|
||||
// beginning of the session as a convenience.
|
||||
PyObject *main_ns;
|
||||
|
||||
// This is set if the interpreter is entered and raised an exception
|
||||
// that needs to be handled in some special way during exit.
|
||||
_PyXI_errcode *error_override;
|
||||
// This is set if exit captured an exception to propagate.
|
||||
_PyXI_error *error;
|
||||
|
||||
// -- pre-allocated memory --
|
||||
_PyXI_error _error;
|
||||
_PyXI_errcode _error_override;
|
||||
};
|
||||
|
||||
PyAPI_FUNC(int) _PyXI_Enter(
|
||||
_PyXI_session *session,
|
||||
PyInterpreterState *interp,
|
||||
PyObject *nsupdates);
|
||||
PyAPI_FUNC(void) _PyXI_Exit(_PyXI_session *session);
|
||||
|
||||
PyAPI_FUNC(PyObject *) _PyXI_ApplyCapturedException(_PyXI_session *session);
|
||||
PyAPI_FUNC(int) _PyXI_HasCapturedException(_PyXI_session *session);
|
||||
|
||||
|
||||
/*************/
|
||||
/* other API */
|
||||
/*************/
|
||||
|
||||
// Export for _testinternalcapi shared extension
|
||||
PyAPI_FUNC(PyInterpreterState *) _PyXI_NewInterpreter(
|
||||
PyInterpreterConfig *config,
|
||||
long *maybe_whence,
|
||||
PyThreadState **p_tstate,
|
||||
PyThreadState **p_save_tstate);
|
||||
PyAPI_FUNC(void) _PyXI_EndInterpreter(
|
||||
PyInterpreterState *interp,
|
||||
PyThreadState *tstate,
|
||||
PyThreadState **p_save_tstate);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CROSSINTERP_H */
|
||||
28
Dependencies/Python/include/internal/pycore_descrobject.h
vendored
Normal file
28
Dependencies/Python/include/internal/pycore_descrobject.h
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
#ifndef Py_INTERNAL_DESCROBJECT_H
|
||||
#define Py_INTERNAL_DESCROBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
PyObject *prop_get;
|
||||
PyObject *prop_set;
|
||||
PyObject *prop_del;
|
||||
PyObject *prop_doc;
|
||||
PyObject *prop_name;
|
||||
int getter_doc;
|
||||
} propertyobject;
|
||||
|
||||
typedef propertyobject _PyPropertyObject;
|
||||
|
||||
extern PyTypeObject _PyMethodWrapper_Type;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_DESCROBJECT_H */
|
||||
340
Dependencies/Python/include/internal/pycore_dict.h
vendored
Normal file
340
Dependencies/Python/include/internal/pycore_dict.h
vendored
Normal file
@@ -0,0 +1,340 @@
|
||||
#ifndef Py_INTERNAL_DICT_H
|
||||
#define Py_INTERNAL_DICT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_freelist.h" // _PyFreeListState
|
||||
#include "pycore_identifier.h" // _Py_Identifier
|
||||
#include "pycore_object.h" // PyManagedDictPointer
|
||||
#include "pycore_pyatomic_ft_wrappers.h" // FT_ATOMIC_LOAD_SSIZE_ACQUIRE
|
||||
|
||||
// Unsafe flavor of PyDict_GetItemWithError(): no error checking
|
||||
extern PyObject* _PyDict_GetItemWithError(PyObject *dp, PyObject *key);
|
||||
|
||||
// Delete an item from a dict if a predicate is true
|
||||
// Returns -1 on error, 1 if the item was deleted, 0 otherwise
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyDict_DelItemIf(PyObject *mp, PyObject *key,
|
||||
int (*predicate)(PyObject *value, void *arg),
|
||||
void *arg);
|
||||
|
||||
// "KnownHash" variants
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyDict_SetItem_KnownHash(PyObject *mp, PyObject *key,
|
||||
PyObject *item, Py_hash_t hash);
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyDict_DelItem_KnownHash(PyObject *mp, PyObject *key,
|
||||
Py_hash_t hash);
|
||||
extern int _PyDict_Contains_KnownHash(PyObject *, PyObject *, Py_hash_t);
|
||||
|
||||
// "Id" variants
|
||||
extern PyObject* _PyDict_GetItemIdWithError(PyObject *dp,
|
||||
_Py_Identifier *key);
|
||||
extern int _PyDict_ContainsId(PyObject *, _Py_Identifier *);
|
||||
extern int _PyDict_SetItemId(PyObject *dp, _Py_Identifier *key, PyObject *item);
|
||||
extern int _PyDict_DelItemId(PyObject *mp, _Py_Identifier *key);
|
||||
|
||||
extern int _PyDict_Next(
|
||||
PyObject *mp, Py_ssize_t *pos, PyObject **key, PyObject **value, Py_hash_t *hash);
|
||||
|
||||
extern int _PyDict_HasOnlyStringKeys(PyObject *mp);
|
||||
|
||||
extern void _PyDict_MaybeUntrack(PyObject *mp);
|
||||
|
||||
// Export for '_ctypes' shared extension
|
||||
PyAPI_FUNC(Py_ssize_t) _PyDict_SizeOf(PyDictObject *);
|
||||
|
||||
#define _PyDict_HasSplitTable(d) ((d)->ma_values != NULL)
|
||||
|
||||
/* Like PyDict_Merge, but override can be 0, 1 or 2. If override is 0,
|
||||
the first occurrence of a key wins, if override is 1, the last occurrence
|
||||
of a key wins, if override is 2, a KeyError with conflicting key as
|
||||
argument is raised.
|
||||
*/
|
||||
PyAPI_FUNC(int) _PyDict_MergeEx(PyObject *mp, PyObject *other, int override);
|
||||
|
||||
extern void _PyDict_DebugMallocStats(FILE *out);
|
||||
|
||||
|
||||
/* _PyDictView */
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
PyDictObject *dv_dict;
|
||||
} _PyDictViewObject;
|
||||
|
||||
extern PyObject* _PyDictView_New(PyObject *, PyTypeObject *);
|
||||
extern PyObject* _PyDictView_Intersect(PyObject* self, PyObject *other);
|
||||
|
||||
/* other API */
|
||||
|
||||
typedef struct {
|
||||
/* Cached hash code of me_key. */
|
||||
Py_hash_t me_hash;
|
||||
PyObject *me_key;
|
||||
PyObject *me_value; /* This field is only meaningful for combined tables */
|
||||
} PyDictKeyEntry;
|
||||
|
||||
typedef struct {
|
||||
PyObject *me_key; /* The key must be Unicode and have hash. */
|
||||
PyObject *me_value; /* This field is only meaningful for combined tables */
|
||||
} PyDictUnicodeEntry;
|
||||
|
||||
extern PyDictKeysObject *_PyDict_NewKeysForClass(void);
|
||||
extern PyObject *_PyDict_FromKeys(PyObject *, PyObject *, PyObject *);
|
||||
|
||||
/* Gets a version number unique to the current state of the keys of dict, if possible.
|
||||
* Returns the version number, or zero if it was not possible to get a version number. */
|
||||
extern uint32_t _PyDictKeys_GetVersionForCurrentState(
|
||||
PyInterpreterState *interp, PyDictKeysObject *dictkeys);
|
||||
|
||||
extern size_t _PyDict_KeysSize(PyDictKeysObject *keys);
|
||||
|
||||
extern void _PyDictKeys_DecRef(PyDictKeysObject *keys);
|
||||
|
||||
/* _Py_dict_lookup() returns index of entry which can be used like DK_ENTRIES(dk)[index].
|
||||
* -1 when no entry found, -3 when compare raises error.
|
||||
*/
|
||||
extern Py_ssize_t _Py_dict_lookup(PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject **value_addr);
|
||||
extern Py_ssize_t _Py_dict_lookup_threadsafe(PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject **value_addr);
|
||||
|
||||
extern Py_ssize_t _PyDict_LookupIndex(PyDictObject *, PyObject *);
|
||||
extern Py_ssize_t _PyDictKeys_StringLookup(PyDictKeysObject* dictkeys, PyObject *key);
|
||||
PyAPI_FUNC(PyObject *)_PyDict_LoadGlobal(PyDictObject *, PyDictObject *, PyObject *);
|
||||
|
||||
/* Consumes references to key and value */
|
||||
PyAPI_FUNC(int) _PyDict_SetItem_Take2(PyDictObject *op, PyObject *key, PyObject *value);
|
||||
extern int _PyDict_SetItem_LockHeld(PyDictObject *dict, PyObject *name, PyObject *value);
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyDict_SetItem_KnownHash_LockHeld(PyDictObject *mp, PyObject *key,
|
||||
PyObject *value, Py_hash_t hash);
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyDict_GetItemRef_KnownHash_LockHeld(PyDictObject *op, PyObject *key, Py_hash_t hash, PyObject **result);
|
||||
extern int _PyDict_GetItemRef_KnownHash(PyDictObject *op, PyObject *key, Py_hash_t hash, PyObject **result);
|
||||
extern int _PyDict_GetItemRef_Unicode_LockHeld(PyDictObject *op, PyObject *key, PyObject **result);
|
||||
extern int _PyObjectDict_SetItem(PyTypeObject *tp, PyObject *obj, PyObject **dictptr, PyObject *name, PyObject *value);
|
||||
|
||||
extern int _PyDict_Pop_KnownHash(
|
||||
PyDictObject *dict,
|
||||
PyObject *key,
|
||||
Py_hash_t hash,
|
||||
PyObject **result);
|
||||
|
||||
#define DKIX_EMPTY (-1)
|
||||
#define DKIX_DUMMY (-2) /* Used internally */
|
||||
#define DKIX_ERROR (-3)
|
||||
#define DKIX_KEY_CHANGED (-4) /* Used internally */
|
||||
|
||||
typedef enum {
|
||||
DICT_KEYS_GENERAL = 0,
|
||||
DICT_KEYS_UNICODE = 1,
|
||||
DICT_KEYS_SPLIT = 2
|
||||
} DictKeysKind;
|
||||
|
||||
/* See dictobject.c for actual layout of DictKeysObject */
|
||||
struct _dictkeysobject {
|
||||
Py_ssize_t dk_refcnt;
|
||||
|
||||
/* Size of the hash table (dk_indices). It must be a power of 2. */
|
||||
uint8_t dk_log2_size;
|
||||
|
||||
/* Size of the hash table (dk_indices) by bytes. */
|
||||
uint8_t dk_log2_index_bytes;
|
||||
|
||||
/* Kind of keys */
|
||||
uint8_t dk_kind;
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
/* Lock used to protect shared keys */
|
||||
PyMutex dk_mutex;
|
||||
#endif
|
||||
|
||||
/* Version number -- Reset to 0 by any modification to keys */
|
||||
uint32_t dk_version;
|
||||
|
||||
/* Number of usable entries in dk_entries. */
|
||||
Py_ssize_t dk_usable;
|
||||
|
||||
/* Number of used entries in dk_entries. */
|
||||
Py_ssize_t dk_nentries;
|
||||
|
||||
|
||||
/* Actual hash table of dk_size entries. It holds indices in dk_entries,
|
||||
or DKIX_EMPTY(-1) or DKIX_DUMMY(-2).
|
||||
|
||||
Indices must be: 0 <= indice < USABLE_FRACTION(dk_size).
|
||||
|
||||
The size in bytes of an indice depends on dk_size:
|
||||
|
||||
- 1 byte if dk_size <= 0xff (char*)
|
||||
- 2 bytes if dk_size <= 0xffff (int16_t*)
|
||||
- 4 bytes if dk_size <= 0xffffffff (int32_t*)
|
||||
- 8 bytes otherwise (int64_t*)
|
||||
|
||||
Dynamically sized, SIZEOF_VOID_P is minimum. */
|
||||
char dk_indices[]; /* char is required to avoid strict aliasing. */
|
||||
|
||||
/* "PyDictKeyEntry or PyDictUnicodeEntry dk_entries[USABLE_FRACTION(DK_SIZE(dk))];" array follows:
|
||||
see the DK_ENTRIES() / DK_UNICODE_ENTRIES() functions below */
|
||||
};
|
||||
|
||||
/* This must be no more than 250, for the prefix size to fit in one byte. */
|
||||
#define SHARED_KEYS_MAX_SIZE 30
|
||||
#define NEXT_LOG2_SHARED_KEYS_MAX_SIZE 6
|
||||
|
||||
/* Layout of dict values:
|
||||
*
|
||||
* The PyObject *values are preceded by an array of bytes holding
|
||||
* the insertion order and size.
|
||||
* [-1] = prefix size. [-2] = used size. size[-2-n...] = insertion order.
|
||||
*/
|
||||
struct _dictvalues {
|
||||
uint8_t capacity;
|
||||
uint8_t size;
|
||||
uint8_t embedded;
|
||||
uint8_t valid;
|
||||
PyObject *values[1];
|
||||
};
|
||||
|
||||
#define DK_LOG_SIZE(dk) _Py_RVALUE((dk)->dk_log2_size)
|
||||
#if SIZEOF_VOID_P > 4
|
||||
#define DK_SIZE(dk) (((int64_t)1)<<DK_LOG_SIZE(dk))
|
||||
#else
|
||||
#define DK_SIZE(dk) (1<<DK_LOG_SIZE(dk))
|
||||
#endif
|
||||
|
||||
static inline void* _DK_ENTRIES(PyDictKeysObject *dk) {
|
||||
int8_t *indices = (int8_t*)(dk->dk_indices);
|
||||
size_t index = (size_t)1 << dk->dk_log2_index_bytes;
|
||||
return (&indices[index]);
|
||||
}
|
||||
|
||||
static inline PyDictKeyEntry* DK_ENTRIES(PyDictKeysObject *dk) {
|
||||
assert(dk->dk_kind == DICT_KEYS_GENERAL);
|
||||
return (PyDictKeyEntry*)_DK_ENTRIES(dk);
|
||||
}
|
||||
static inline PyDictUnicodeEntry* DK_UNICODE_ENTRIES(PyDictKeysObject *dk) {
|
||||
assert(dk->dk_kind != DICT_KEYS_GENERAL);
|
||||
return (PyDictUnicodeEntry*)_DK_ENTRIES(dk);
|
||||
}
|
||||
|
||||
#define DK_IS_UNICODE(dk) ((dk)->dk_kind != DICT_KEYS_GENERAL)
|
||||
|
||||
#define DICT_VERSION_INCREMENT (1 << (DICT_MAX_WATCHERS + DICT_WATCHED_MUTATION_BITS))
|
||||
#define DICT_WATCHER_MASK ((1 << DICT_MAX_WATCHERS) - 1)
|
||||
#define DICT_WATCHER_AND_MODIFICATION_MASK ((1 << (DICT_MAX_WATCHERS + DICT_WATCHED_MUTATION_BITS)) - 1)
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
#define THREAD_LOCAL_DICT_VERSION_COUNT 256
|
||||
#define THREAD_LOCAL_DICT_VERSION_BATCH THREAD_LOCAL_DICT_VERSION_COUNT * DICT_VERSION_INCREMENT
|
||||
|
||||
static inline uint64_t
|
||||
dict_next_version(PyInterpreterState *interp)
|
||||
{
|
||||
PyThreadState *tstate = PyThreadState_GET();
|
||||
uint64_t cur_progress = (tstate->dict_global_version &
|
||||
(THREAD_LOCAL_DICT_VERSION_BATCH - 1));
|
||||
if (cur_progress == 0) {
|
||||
uint64_t next = _Py_atomic_add_uint64(&interp->dict_state.global_version,
|
||||
THREAD_LOCAL_DICT_VERSION_BATCH);
|
||||
tstate->dict_global_version = next;
|
||||
}
|
||||
return tstate->dict_global_version += DICT_VERSION_INCREMENT;
|
||||
}
|
||||
|
||||
#define DICT_NEXT_VERSION(INTERP) dict_next_version(INTERP)
|
||||
|
||||
#else
|
||||
#define DICT_NEXT_VERSION(INTERP) \
|
||||
((INTERP)->dict_state.global_version += DICT_VERSION_INCREMENT)
|
||||
#endif
|
||||
|
||||
void
|
||||
_PyDict_SendEvent(int watcher_bits,
|
||||
PyDict_WatchEvent event,
|
||||
PyDictObject *mp,
|
||||
PyObject *key,
|
||||
PyObject *value);
|
||||
|
||||
static inline uint64_t
|
||||
_PyDict_NotifyEvent(PyInterpreterState *interp,
|
||||
PyDict_WatchEvent event,
|
||||
PyDictObject *mp,
|
||||
PyObject *key,
|
||||
PyObject *value)
|
||||
{
|
||||
assert(Py_REFCNT((PyObject*)mp) > 0);
|
||||
int watcher_bits = mp->ma_version_tag & DICT_WATCHER_MASK;
|
||||
if (watcher_bits) {
|
||||
RARE_EVENT_STAT_INC(watched_dict_modification);
|
||||
_PyDict_SendEvent(watcher_bits, event, mp, key, value);
|
||||
}
|
||||
return DICT_NEXT_VERSION(interp) | (mp->ma_version_tag & DICT_WATCHER_AND_MODIFICATION_MASK);
|
||||
}
|
||||
|
||||
extern PyDictObject *_PyObject_MaterializeManagedDict(PyObject *obj);
|
||||
|
||||
PyAPI_FUNC(PyObject *)_PyDict_FromItems(
|
||||
PyObject *const *keys, Py_ssize_t keys_offset,
|
||||
PyObject *const *values, Py_ssize_t values_offset,
|
||||
Py_ssize_t length);
|
||||
|
||||
static inline uint8_t *
|
||||
get_insertion_order_array(PyDictValues *values)
|
||||
{
|
||||
return (uint8_t *)&values->values[values->capacity];
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyDictValues_AddToInsertionOrder(PyDictValues *values, Py_ssize_t ix)
|
||||
{
|
||||
assert(ix < SHARED_KEYS_MAX_SIZE);
|
||||
int size = values->size;
|
||||
uint8_t *array = get_insertion_order_array(values);
|
||||
assert(size < values->capacity);
|
||||
assert(((uint8_t)ix) == ix);
|
||||
array[size] = (uint8_t)ix;
|
||||
values->size = size+1;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
shared_keys_usable_size(PyDictKeysObject *keys)
|
||||
{
|
||||
// dk_usable will decrease for each instance that is created and each
|
||||
// value that is added. dk_nentries will increase for each value that
|
||||
// is added. We want to always return the right value or larger.
|
||||
// We therefore increase dk_nentries first and we decrease dk_usable
|
||||
// second, and conversely here we read dk_usable first and dk_entries
|
||||
// second (to avoid the case where we read entries before the increment
|
||||
// and read usable after the decrement)
|
||||
Py_ssize_t dk_usable = FT_ATOMIC_LOAD_SSIZE_ACQUIRE(keys->dk_usable);
|
||||
Py_ssize_t dk_nentries = FT_ATOMIC_LOAD_SSIZE_ACQUIRE(keys->dk_nentries);
|
||||
return dk_nentries + dk_usable;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
_PyInlineValuesSize(PyTypeObject *tp)
|
||||
{
|
||||
PyDictKeysObject *keys = ((PyHeapTypeObject*)tp)->ht_cached_keys;
|
||||
assert(keys != NULL);
|
||||
size_t size = shared_keys_usable_size(keys);
|
||||
size_t prefix_size = _Py_SIZE_ROUND_UP(size, sizeof(PyObject *));
|
||||
assert(prefix_size < 256);
|
||||
return prefix_size + (size + 1) * sizeof(PyObject *);
|
||||
}
|
||||
|
||||
int
|
||||
_PyDict_DetachFromObject(PyDictObject *dict, PyObject *obj);
|
||||
|
||||
PyDictObject *_PyObject_MaterializeManagedDict_LockHeld(PyObject *);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_DICT_H */
|
||||
32
Dependencies/Python/include/internal/pycore_dict_state.h
vendored
Normal file
32
Dependencies/Python/include/internal/pycore_dict_state.h
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
#ifndef Py_INTERNAL_DICT_STATE_H
|
||||
#define Py_INTERNAL_DICT_STATE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#define DICT_MAX_WATCHERS 8
|
||||
#define DICT_WATCHED_MUTATION_BITS 4
|
||||
|
||||
struct _Py_dict_state {
|
||||
/*Global counter used to set ma_version_tag field of dictionary.
|
||||
* It is incremented each time that a dictionary is created and each
|
||||
* time that a dictionary is modified. */
|
||||
uint64_t global_version;
|
||||
uint32_t next_keys_version;
|
||||
PyDict_WatchCallback watchers[DICT_MAX_WATCHERS];
|
||||
};
|
||||
|
||||
#define _dict_state_INIT \
|
||||
{ \
|
||||
.next_keys_version = 2, \
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_DICT_STATE_H */
|
||||
75
Dependencies/Python/include/internal/pycore_dtoa.h
vendored
Normal file
75
Dependencies/Python/include/internal/pycore_dtoa.h
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
#ifndef Py_INTERNAL_DTOA_H
|
||||
#define Py_INTERNAL_DTOA_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_pymath.h" // _PY_SHORT_FLOAT_REPR
|
||||
|
||||
|
||||
typedef uint32_t ULong;
|
||||
|
||||
struct
|
||||
Bigint {
|
||||
struct Bigint *next;
|
||||
int k, maxwds, sign, wds;
|
||||
ULong x[1];
|
||||
};
|
||||
|
||||
#if defined(Py_USING_MEMORY_DEBUGGER) || _PY_SHORT_FLOAT_REPR == 0
|
||||
|
||||
struct _dtoa_state {
|
||||
int _not_used;
|
||||
};
|
||||
#define _dtoa_state_INIT(INTERP) \
|
||||
{0}
|
||||
|
||||
#else // !Py_USING_MEMORY_DEBUGGER && _PY_SHORT_FLOAT_REPR != 0
|
||||
|
||||
/* The size of the Bigint freelist */
|
||||
#define Bigint_Kmax 7
|
||||
|
||||
/* The size of the cached powers of 5 array */
|
||||
#define Bigint_Pow5size 8
|
||||
|
||||
#ifndef PRIVATE_MEM
|
||||
#define PRIVATE_MEM 2304
|
||||
#endif
|
||||
#define Bigint_PREALLOC_SIZE \
|
||||
((PRIVATE_MEM+sizeof(double)-1)/sizeof(double))
|
||||
|
||||
struct _dtoa_state {
|
||||
// p5s is an array of powers of 5 of the form:
|
||||
// 5**(2**(i+2)) for 0 <= i < Bigint_Pow5size
|
||||
struct Bigint *p5s[Bigint_Pow5size];
|
||||
// XXX This should be freed during runtime fini.
|
||||
struct Bigint *freelist[Bigint_Kmax+1];
|
||||
double preallocated[Bigint_PREALLOC_SIZE];
|
||||
double *preallocated_next;
|
||||
};
|
||||
#define _dtoa_state_INIT(INTERP) \
|
||||
{ \
|
||||
.preallocated_next = (INTERP)->dtoa.preallocated, \
|
||||
}
|
||||
|
||||
#endif // !Py_USING_MEMORY_DEBUGGER
|
||||
|
||||
|
||||
extern double _Py_dg_strtod(const char *str, char **ptr);
|
||||
extern char* _Py_dg_dtoa(double d, int mode, int ndigits,
|
||||
int *decpt, int *sign, char **rve);
|
||||
extern void _Py_dg_freedtoa(char *s);
|
||||
|
||||
|
||||
extern PyStatus _PyDtoa_Init(PyInterpreterState *interp);
|
||||
extern void _PyDtoa_Fini(PyInterpreterState *interp);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_DTOA_H */
|
||||
30
Dependencies/Python/include/internal/pycore_emscripten_signal.h
vendored
Normal file
30
Dependencies/Python/include/internal/pycore_emscripten_signal.h
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
#ifndef Py_EMSCRIPTEN_SIGNAL_H
|
||||
#define Py_EMSCRIPTEN_SIGNAL_H
|
||||
|
||||
#if defined(__EMSCRIPTEN__)
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
void
|
||||
_Py_CheckEmscriptenSignals(void);
|
||||
|
||||
void
|
||||
_Py_CheckEmscriptenSignalsPeriodically(void);
|
||||
|
||||
#define _Py_CHECK_EMSCRIPTEN_SIGNALS() _Py_CheckEmscriptenSignals()
|
||||
|
||||
#define _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY() _Py_CheckEmscriptenSignalsPeriodically()
|
||||
|
||||
extern int Py_EMSCRIPTEN_SIGNAL_HANDLING;
|
||||
extern int _Py_emscripten_signal_clock;
|
||||
|
||||
#else
|
||||
|
||||
#define _Py_CHECK_EMSCRIPTEN_SIGNALS()
|
||||
#define _Py_CHECK_EMSCRIPTEN_SIGNALS_PERIODICALLY()
|
||||
|
||||
#endif // defined(__EMSCRIPTEN__)
|
||||
|
||||
#endif // ndef Py_EMSCRIPTEN_SIGNAL_H
|
||||
81
Dependencies/Python/include/internal/pycore_emscripten_trampoline.h
vendored
Normal file
81
Dependencies/Python/include/internal/pycore_emscripten_trampoline.h
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
#ifndef Py_EMSCRIPTEN_TRAMPOLINE_H
|
||||
#define Py_EMSCRIPTEN_TRAMPOLINE_H
|
||||
|
||||
#include "pycore_runtime.h" // _PyRuntimeState
|
||||
|
||||
/**
|
||||
* C function call trampolines to mitigate bad function pointer casts.
|
||||
*
|
||||
* Section 6.3.2.3, paragraph 8 reads:
|
||||
*
|
||||
* A pointer to a function of one type may be converted to a pointer to a
|
||||
* function of another type and back again; the result shall compare equal to
|
||||
* the original pointer. If a converted pointer is used to call a function
|
||||
* whose type is not compatible with the pointed-to type, the behavior is
|
||||
* undefined.
|
||||
*
|
||||
* Typical native ABIs ignore additional arguments or fill in missing values
|
||||
* with 0/NULL in function pointer cast. Compilers do not show warnings when a
|
||||
* function pointer is explicitly casted to an incompatible type.
|
||||
*
|
||||
* Bad fpcasts are an issue in WebAssembly. WASM's indirect_call has strict
|
||||
* function signature checks. Argument count, types, and return type must match.
|
||||
*
|
||||
* Third party code unintentionally rely on problematic fpcasts. The call
|
||||
* trampoline mitigates common occurrences of bad fpcasts on Emscripten.
|
||||
*/
|
||||
|
||||
#if defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE)
|
||||
|
||||
void _Py_EmscriptenTrampoline_Init(_PyRuntimeState *runtime);
|
||||
|
||||
PyObject*
|
||||
_PyEM_TrampolineCall_JavaScript(PyCFunctionWithKeywords func,
|
||||
PyObject* self,
|
||||
PyObject* args,
|
||||
PyObject* kw);
|
||||
|
||||
PyObject*
|
||||
_PyEM_TrampolineCall_Reflection(PyCFunctionWithKeywords func,
|
||||
PyObject* self,
|
||||
PyObject* args,
|
||||
PyObject* kw);
|
||||
|
||||
#define _PyEM_TrampolineCall(meth, self, args, kw) \
|
||||
((_PyRuntime.wasm_type_reflection_available) ? \
|
||||
(_PyEM_TrampolineCall_Reflection((PyCFunctionWithKeywords)(meth), (self), (args), (kw))) : \
|
||||
(_PyEM_TrampolineCall_JavaScript((PyCFunctionWithKeywords)(meth), (self), (args), (kw))))
|
||||
|
||||
#define _PyCFunction_TrampolineCall(meth, self, args) \
|
||||
_PyEM_TrampolineCall( \
|
||||
(*(PyCFunctionWithKeywords)(void(*)(void))(meth)), (self), (args), NULL)
|
||||
|
||||
#define _PyCFunctionWithKeywords_TrampolineCall(meth, self, args, kw) \
|
||||
_PyEM_TrampolineCall((meth), (self), (args), (kw))
|
||||
|
||||
#define descr_set_trampoline_call(set, obj, value, closure) \
|
||||
((int)_PyEM_TrampolineCall((PyCFunctionWithKeywords)(set), (obj), (value), (PyObject*)(closure)))
|
||||
|
||||
#define descr_get_trampoline_call(get, obj, closure) \
|
||||
_PyEM_TrampolineCall((PyCFunctionWithKeywords)(get), (obj), (PyObject*)(closure), NULL)
|
||||
|
||||
|
||||
#else // defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE)
|
||||
|
||||
#define _Py_EmscriptenTrampoline_Init(runtime)
|
||||
|
||||
#define _PyCFunction_TrampolineCall(meth, self, args) \
|
||||
(meth)((self), (args))
|
||||
|
||||
#define _PyCFunctionWithKeywords_TrampolineCall(meth, self, args, kw) \
|
||||
(meth)((self), (args), (kw))
|
||||
|
||||
#define descr_set_trampoline_call(set, obj, value, closure) \
|
||||
(set)((obj), (value), (closure))
|
||||
|
||||
#define descr_get_trampoline_call(get, obj, closure) \
|
||||
(get)((obj), (closure))
|
||||
|
||||
#endif // defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE)
|
||||
|
||||
#endif // ndef Py_EMSCRIPTEN_SIGNAL_H
|
||||
40
Dependencies/Python/include/internal/pycore_exceptions.h
vendored
Normal file
40
Dependencies/Python/include/internal/pycore_exceptions.h
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
#ifndef Py_INTERNAL_EXCEPTIONS_H
|
||||
#define Py_INTERNAL_EXCEPTIONS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
/* runtime lifecycle */
|
||||
|
||||
extern PyStatus _PyExc_InitState(PyInterpreterState *);
|
||||
extern PyStatus _PyExc_InitGlobalObjects(PyInterpreterState *);
|
||||
extern int _PyExc_InitTypes(PyInterpreterState *);
|
||||
extern void _PyExc_Fini(PyInterpreterState *);
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
struct _Py_exc_state {
|
||||
// The dict mapping from errno codes to OSError subclasses
|
||||
PyObject *errnomap;
|
||||
PyBaseExceptionObject *memerrors_freelist;
|
||||
int memerrors_numfree;
|
||||
#ifdef Py_GIL_DISABLED
|
||||
PyMutex memerrors_lock;
|
||||
#endif
|
||||
// The ExceptionGroup type
|
||||
PyObject *PyExc_ExceptionGroup;
|
||||
};
|
||||
|
||||
extern void _PyExc_ClearExceptionGroupType(PyInterpreterState *);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_EXCEPTIONS_H */
|
||||
99
Dependencies/Python/include/internal/pycore_faulthandler.h
vendored
Normal file
99
Dependencies/Python/include/internal/pycore_faulthandler.h
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
#ifndef Py_INTERNAL_FAULTHANDLER_H
|
||||
#define Py_INTERNAL_FAULTHANDLER_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_SIGACTION
|
||||
# include <signal.h> // sigaction
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef MS_WINDOWS
|
||||
/* register() is useless on Windows, because only SIGSEGV, SIGABRT and
|
||||
SIGILL can be handled by the process, and these signals can only be used
|
||||
with enable(), not using register() */
|
||||
# define FAULTHANDLER_USER
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef HAVE_SIGACTION
|
||||
/* Using an alternative stack requires sigaltstack()
|
||||
and sigaction() SA_ONSTACK */
|
||||
# ifdef HAVE_SIGALTSTACK
|
||||
# define FAULTHANDLER_USE_ALT_STACK
|
||||
# endif
|
||||
typedef struct sigaction _Py_sighandler_t;
|
||||
#else
|
||||
typedef PyOS_sighandler_t _Py_sighandler_t;
|
||||
#endif // HAVE_SIGACTION
|
||||
|
||||
|
||||
#ifdef FAULTHANDLER_USER
|
||||
struct faulthandler_user_signal {
|
||||
int enabled;
|
||||
PyObject *file;
|
||||
int fd;
|
||||
int all_threads;
|
||||
int chain;
|
||||
_Py_sighandler_t previous;
|
||||
PyInterpreterState *interp;
|
||||
};
|
||||
#endif /* FAULTHANDLER_USER */
|
||||
|
||||
|
||||
struct _faulthandler_runtime_state {
|
||||
struct {
|
||||
int enabled;
|
||||
PyObject *file;
|
||||
int fd;
|
||||
int all_threads;
|
||||
PyInterpreterState *interp;
|
||||
#ifdef MS_WINDOWS
|
||||
void *exc_handler;
|
||||
#endif
|
||||
} fatal_error;
|
||||
|
||||
struct {
|
||||
PyObject *file;
|
||||
int fd;
|
||||
PY_TIMEOUT_T timeout_us; /* timeout in microseconds */
|
||||
int repeat;
|
||||
PyInterpreterState *interp;
|
||||
int exit;
|
||||
char *header;
|
||||
size_t header_len;
|
||||
/* The main thread always holds this lock. It is only released when
|
||||
faulthandler_thread() is interrupted before this thread exits, or at
|
||||
Python exit. */
|
||||
PyThread_type_lock cancel_event;
|
||||
/* released by child thread when joined */
|
||||
PyThread_type_lock running;
|
||||
} thread;
|
||||
|
||||
#ifdef FAULTHANDLER_USER
|
||||
struct faulthandler_user_signal *user_signals;
|
||||
#endif
|
||||
|
||||
#ifdef FAULTHANDLER_USE_ALT_STACK
|
||||
stack_t stack;
|
||||
stack_t old_stack;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define _faulthandler_runtime_state_INIT \
|
||||
{ \
|
||||
.fatal_error = { \
|
||||
.fd = -1, \
|
||||
}, \
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FAULTHANDLER_H */
|
||||
335
Dependencies/Python/include/internal/pycore_fileutils.h
vendored
Normal file
335
Dependencies/Python/include/internal/pycore_fileutils.h
vendored
Normal file
@@ -0,0 +1,335 @@
|
||||
#ifndef Py_INTERNAL_FILEUTILS_H
|
||||
#define Py_INTERNAL_FILEUTILS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include <locale.h> // struct lconv
|
||||
|
||||
|
||||
/* A routine to check if a file descriptor can be select()-ed. */
|
||||
#ifdef _MSC_VER
|
||||
/* On Windows, any socket fd can be select()-ed, no matter how high */
|
||||
#define _PyIsSelectable_fd(FD) (1)
|
||||
#else
|
||||
#define _PyIsSelectable_fd(FD) ((unsigned int)(FD) < (unsigned int)FD_SETSIZE)
|
||||
#endif
|
||||
|
||||
struct _fileutils_state {
|
||||
int force_ascii;
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
_Py_ERROR_UNKNOWN=0,
|
||||
_Py_ERROR_STRICT,
|
||||
_Py_ERROR_SURROGATEESCAPE,
|
||||
_Py_ERROR_REPLACE,
|
||||
_Py_ERROR_IGNORE,
|
||||
_Py_ERROR_BACKSLASHREPLACE,
|
||||
_Py_ERROR_SURROGATEPASS,
|
||||
_Py_ERROR_XMLCHARREFREPLACE,
|
||||
_Py_ERROR_OTHER
|
||||
} _Py_error_handler;
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(_Py_error_handler) _Py_GetErrorHandler(const char *errors);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(int) _Py_DecodeLocaleEx(
|
||||
const char *arg,
|
||||
wchar_t **wstr,
|
||||
size_t *wlen,
|
||||
const char **reason,
|
||||
int current_locale,
|
||||
_Py_error_handler errors);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(int) _Py_EncodeLocaleEx(
|
||||
const wchar_t *text,
|
||||
char **str,
|
||||
size_t *error_pos,
|
||||
const char **reason,
|
||||
int current_locale,
|
||||
_Py_error_handler errors);
|
||||
|
||||
extern char* _Py_EncodeLocaleRaw(
|
||||
const wchar_t *text,
|
||||
size_t *error_pos);
|
||||
|
||||
extern PyObject* _Py_device_encoding(int);
|
||||
|
||||
#if defined(MS_WINDOWS) || defined(__APPLE__)
|
||||
/* On Windows, the count parameter of read() is an int (bpo-9015, bpo-9611).
|
||||
On macOS 10.13, read() and write() with more than INT_MAX bytes
|
||||
fail with EINVAL (bpo-24658). */
|
||||
# define _PY_READ_MAX INT_MAX
|
||||
# define _PY_WRITE_MAX INT_MAX
|
||||
#else
|
||||
/* write() should truncate the input to PY_SSIZE_T_MAX bytes,
|
||||
but it's safer to do it ourself to have a portable behaviour */
|
||||
# define _PY_READ_MAX PY_SSIZE_T_MAX
|
||||
# define _PY_WRITE_MAX PY_SSIZE_T_MAX
|
||||
#endif
|
||||
|
||||
#ifdef MS_WINDOWS
|
||||
struct _Py_stat_struct {
|
||||
uint64_t st_dev;
|
||||
uint64_t st_ino;
|
||||
unsigned short st_mode;
|
||||
int st_nlink;
|
||||
int st_uid;
|
||||
int st_gid;
|
||||
unsigned long st_rdev;
|
||||
__int64 st_size;
|
||||
time_t st_atime;
|
||||
int st_atime_nsec;
|
||||
time_t st_mtime;
|
||||
int st_mtime_nsec;
|
||||
time_t st_ctime;
|
||||
int st_ctime_nsec;
|
||||
time_t st_birthtime;
|
||||
int st_birthtime_nsec;
|
||||
unsigned long st_file_attributes;
|
||||
unsigned long st_reparse_tag;
|
||||
uint64_t st_ino_high;
|
||||
};
|
||||
#else
|
||||
# define _Py_stat_struct stat
|
||||
#endif
|
||||
|
||||
// Export for 'mmap' shared extension
|
||||
PyAPI_FUNC(int) _Py_fstat(
|
||||
int fd,
|
||||
struct _Py_stat_struct *status);
|
||||
|
||||
// Export for 'mmap' shared extension
|
||||
PyAPI_FUNC(int) _Py_fstat_noraise(
|
||||
int fd,
|
||||
struct _Py_stat_struct *status);
|
||||
|
||||
// Export for '_tkinter' shared extension
|
||||
PyAPI_FUNC(int) _Py_stat(
|
||||
PyObject *path,
|
||||
struct stat *status);
|
||||
|
||||
// Export for 'select' shared extension (Solaris newDevPollObject())
|
||||
PyAPI_FUNC(int) _Py_open(
|
||||
const char *pathname,
|
||||
int flags);
|
||||
|
||||
// Export for '_posixsubprocess' shared extension
|
||||
PyAPI_FUNC(int) _Py_open_noraise(
|
||||
const char *pathname,
|
||||
int flags);
|
||||
|
||||
extern FILE* _Py_wfopen(
|
||||
const wchar_t *path,
|
||||
const wchar_t *mode);
|
||||
|
||||
extern Py_ssize_t _Py_read(
|
||||
int fd,
|
||||
void *buf,
|
||||
size_t count);
|
||||
|
||||
// Export for 'select' shared extension (Solaris devpoll_flush())
|
||||
PyAPI_FUNC(Py_ssize_t) _Py_write(
|
||||
int fd,
|
||||
const void *buf,
|
||||
size_t count);
|
||||
|
||||
// Export for '_posixsubprocess' shared extension
|
||||
PyAPI_FUNC(Py_ssize_t) _Py_write_noraise(
|
||||
int fd,
|
||||
const void *buf,
|
||||
size_t count);
|
||||
|
||||
#ifdef HAVE_READLINK
|
||||
extern int _Py_wreadlink(
|
||||
const wchar_t *path,
|
||||
wchar_t *buf,
|
||||
/* Number of characters of 'buf' buffer
|
||||
including the trailing NUL character */
|
||||
size_t buflen);
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_REALPATH
|
||||
extern wchar_t* _Py_wrealpath(
|
||||
const wchar_t *path,
|
||||
wchar_t *resolved_path,
|
||||
/* Number of characters of 'resolved_path' buffer
|
||||
including the trailing NUL character */
|
||||
size_t resolved_path_len);
|
||||
#endif
|
||||
|
||||
extern wchar_t* _Py_wgetcwd(
|
||||
wchar_t *buf,
|
||||
/* Number of characters of 'buf' buffer
|
||||
including the trailing NUL character */
|
||||
size_t buflen);
|
||||
|
||||
extern int _Py_get_inheritable(int fd);
|
||||
|
||||
// Export for '_socket' shared extension
|
||||
PyAPI_FUNC(int) _Py_set_inheritable(int fd, int inheritable,
|
||||
int *atomic_flag_works);
|
||||
|
||||
// Export for '_posixsubprocess' shared extension
|
||||
PyAPI_FUNC(int) _Py_set_inheritable_async_safe(int fd, int inheritable,
|
||||
int *atomic_flag_works);
|
||||
|
||||
// Export for '_socket' shared extension
|
||||
PyAPI_FUNC(int) _Py_dup(int fd);
|
||||
|
||||
extern int _Py_get_blocking(int fd);
|
||||
|
||||
extern int _Py_set_blocking(int fd, int blocking);
|
||||
|
||||
#ifdef MS_WINDOWS
|
||||
extern void* _Py_get_osfhandle_noraise(int fd);
|
||||
|
||||
// Export for '_testconsole' shared extension
|
||||
PyAPI_FUNC(void*) _Py_get_osfhandle(int fd);
|
||||
|
||||
extern int _Py_open_osfhandle_noraise(void *handle, int flags);
|
||||
|
||||
extern int _Py_open_osfhandle(void *handle, int flags);
|
||||
#endif /* MS_WINDOWS */
|
||||
|
||||
// This is used after getting NULL back from Py_DecodeLocale().
|
||||
#define DECODE_LOCALE_ERR(NAME, LEN) \
|
||||
((LEN) == (size_t)-2) \
|
||||
? _PyStatus_ERR("cannot decode " NAME) \
|
||||
: _PyStatus_NO_MEMORY()
|
||||
|
||||
extern int _Py_HasFileSystemDefaultEncodeErrors;
|
||||
|
||||
extern int _Py_DecodeUTF8Ex(
|
||||
const char *arg,
|
||||
Py_ssize_t arglen,
|
||||
wchar_t **wstr,
|
||||
size_t *wlen,
|
||||
const char **reason,
|
||||
_Py_error_handler errors);
|
||||
|
||||
extern int _Py_EncodeUTF8Ex(
|
||||
const wchar_t *text,
|
||||
char **str,
|
||||
size_t *error_pos,
|
||||
const char **reason,
|
||||
int raw_malloc,
|
||||
_Py_error_handler errors);
|
||||
|
||||
extern wchar_t* _Py_DecodeUTF8_surrogateescape(
|
||||
const char *arg,
|
||||
Py_ssize_t arglen,
|
||||
size_t *wlen);
|
||||
|
||||
extern int
|
||||
_Py_wstat(const wchar_t *, struct stat *);
|
||||
|
||||
extern int _Py_GetForceASCII(void);
|
||||
|
||||
/* Reset "force ASCII" mode (if it was initialized).
|
||||
|
||||
This function should be called when Python changes the LC_CTYPE locale,
|
||||
so the "force ASCII" mode can be detected again on the new locale
|
||||
encoding. */
|
||||
extern void _Py_ResetForceASCII(void);
|
||||
|
||||
|
||||
extern int _Py_GetLocaleconvNumeric(
|
||||
struct lconv *lc,
|
||||
PyObject **decimal_point,
|
||||
PyObject **thousands_sep);
|
||||
|
||||
// Export for '_posixsubprocess' (on macOS)
|
||||
PyAPI_FUNC(void) _Py_closerange(int first, int last);
|
||||
|
||||
extern wchar_t* _Py_GetLocaleEncoding(void);
|
||||
extern PyObject* _Py_GetLocaleEncodingObject(void);
|
||||
|
||||
#ifdef HAVE_NON_UNICODE_WCHAR_T_REPRESENTATION
|
||||
extern int _Py_LocaleUsesNonUnicodeWchar(void);
|
||||
|
||||
extern wchar_t* _Py_DecodeNonUnicodeWchar(
|
||||
const wchar_t* native,
|
||||
Py_ssize_t size);
|
||||
|
||||
extern int _Py_EncodeNonUnicodeWchar_InPlace(
|
||||
wchar_t* unicode,
|
||||
Py_ssize_t size);
|
||||
#endif
|
||||
|
||||
extern int _Py_isabs(const wchar_t *path);
|
||||
extern int _Py_abspath(const wchar_t *path, wchar_t **abspath_p);
|
||||
#ifdef MS_WINDOWS
|
||||
extern int _PyOS_getfullpathname(const wchar_t *path, wchar_t **abspath_p);
|
||||
#endif
|
||||
extern wchar_t* _Py_join_relfile(const wchar_t *dirname,
|
||||
const wchar_t *relfile);
|
||||
extern int _Py_add_relfile(wchar_t *dirname,
|
||||
const wchar_t *relfile,
|
||||
size_t bufsize);
|
||||
extern size_t _Py_find_basename(const wchar_t *filename);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(wchar_t*) _Py_normpath(wchar_t *path, Py_ssize_t size);
|
||||
|
||||
extern wchar_t *_Py_normpath_and_size(wchar_t *path, Py_ssize_t size, Py_ssize_t *length);
|
||||
|
||||
// The Windows Games API family does not provide these functions
|
||||
// so provide our own implementations. Remove them in case they get added
|
||||
// to the Games API family
|
||||
#if defined(MS_WINDOWS_GAMES) && !defined(MS_WINDOWS_DESKTOP)
|
||||
#include <winerror.h> // HRESULT
|
||||
|
||||
extern HRESULT PathCchSkipRoot(const wchar_t *pszPath, const wchar_t **ppszRootEnd);
|
||||
#endif /* defined(MS_WINDOWS_GAMES) && !defined(MS_WINDOWS_DESKTOP) */
|
||||
|
||||
extern void _Py_skiproot(const wchar_t *path, Py_ssize_t size, Py_ssize_t *drvsize, Py_ssize_t *rootsize);
|
||||
|
||||
// Macros to protect CRT calls against instant termination when passed an
|
||||
// invalid parameter (bpo-23524). IPH stands for Invalid Parameter Handler.
|
||||
// Usage:
|
||||
//
|
||||
// _Py_BEGIN_SUPPRESS_IPH
|
||||
// ...
|
||||
// _Py_END_SUPPRESS_IPH
|
||||
#if defined _MSC_VER && _MSC_VER >= 1900
|
||||
|
||||
# include <stdlib.h> // _set_thread_local_invalid_parameter_handler()
|
||||
|
||||
extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler;
|
||||
# define _Py_BEGIN_SUPPRESS_IPH \
|
||||
{ _invalid_parameter_handler _Py_old_handler = \
|
||||
_set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler);
|
||||
# define _Py_END_SUPPRESS_IPH \
|
||||
_set_thread_local_invalid_parameter_handler(_Py_old_handler); }
|
||||
#else
|
||||
# define _Py_BEGIN_SUPPRESS_IPH
|
||||
# define _Py_END_SUPPRESS_IPH
|
||||
#endif /* _MSC_VER >= 1900 */
|
||||
|
||||
// Export for 'select' shared extension (Argument Clinic code)
|
||||
PyAPI_FUNC(int) _PyLong_FileDescriptor_Converter(PyObject *, void *);
|
||||
|
||||
// Export for test_peg_generator
|
||||
PyAPI_FUNC(char*) _Py_UniversalNewlineFgetsWithSize(char *, int, FILE*, PyObject *, size_t*);
|
||||
|
||||
extern int _PyFile_Flush(PyObject *);
|
||||
|
||||
#ifndef MS_WINDOWS
|
||||
extern int _Py_GetTicksPerSecond(long *ticks_per_second);
|
||||
#endif
|
||||
|
||||
// Export for '_testcapi' shared extension
|
||||
PyAPI_FUNC(int) _Py_IsValidFD(int fd);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FILEUTILS_H */
|
||||
98
Dependencies/Python/include/internal/pycore_fileutils_windows.h
vendored
Normal file
98
Dependencies/Python/include/internal/pycore_fileutils_windows.h
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
#ifndef Py_INTERNAL_FILEUTILS_WINDOWS_H
|
||||
#define Py_INTERNAL_FILEUTILS_WINDOWS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef MS_WINDOWS
|
||||
|
||||
#if !defined(NTDDI_WIN10_NI) || !(NTDDI_VERSION >= NTDDI_WIN10_NI)
|
||||
typedef struct _FILE_STAT_BASIC_INFORMATION {
|
||||
LARGE_INTEGER FileId;
|
||||
LARGE_INTEGER CreationTime;
|
||||
LARGE_INTEGER LastAccessTime;
|
||||
LARGE_INTEGER LastWriteTime;
|
||||
LARGE_INTEGER ChangeTime;
|
||||
LARGE_INTEGER AllocationSize;
|
||||
LARGE_INTEGER EndOfFile;
|
||||
ULONG FileAttributes;
|
||||
ULONG ReparseTag;
|
||||
ULONG NumberOfLinks;
|
||||
ULONG DeviceType;
|
||||
ULONG DeviceCharacteristics;
|
||||
ULONG Reserved;
|
||||
LARGE_INTEGER VolumeSerialNumber;
|
||||
FILE_ID_128 FileId128;
|
||||
} FILE_STAT_BASIC_INFORMATION;
|
||||
|
||||
typedef enum _FILE_INFO_BY_NAME_CLASS {
|
||||
FileStatByNameInfo,
|
||||
FileStatLxByNameInfo,
|
||||
FileCaseSensitiveByNameInfo,
|
||||
FileStatBasicByNameInfo,
|
||||
MaximumFileInfoByNameClass
|
||||
} FILE_INFO_BY_NAME_CLASS;
|
||||
#endif
|
||||
|
||||
typedef BOOL (WINAPI *PGetFileInformationByName)(
|
||||
PCWSTR FileName,
|
||||
FILE_INFO_BY_NAME_CLASS FileInformationClass,
|
||||
PVOID FileInfoBuffer,
|
||||
ULONG FileInfoBufferSize
|
||||
);
|
||||
|
||||
static inline BOOL _Py_GetFileInformationByName(
|
||||
PCWSTR FileName,
|
||||
FILE_INFO_BY_NAME_CLASS FileInformationClass,
|
||||
PVOID FileInfoBuffer,
|
||||
ULONG FileInfoBufferSize
|
||||
) {
|
||||
static PGetFileInformationByName GetFileInformationByName = NULL;
|
||||
static int GetFileInformationByName_init = -1;
|
||||
|
||||
if (GetFileInformationByName_init < 0) {
|
||||
HMODULE hMod = LoadLibraryW(L"api-ms-win-core-file-l2-1-4");
|
||||
GetFileInformationByName_init = 0;
|
||||
if (hMod) {
|
||||
GetFileInformationByName = (PGetFileInformationByName)GetProcAddress(
|
||||
hMod, "GetFileInformationByName");
|
||||
if (GetFileInformationByName) {
|
||||
GetFileInformationByName_init = 1;
|
||||
} else {
|
||||
FreeLibrary(hMod);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (GetFileInformationByName_init <= 0) {
|
||||
SetLastError(ERROR_NOT_SUPPORTED);
|
||||
return FALSE;
|
||||
}
|
||||
return GetFileInformationByName(FileName, FileInformationClass, FileInfoBuffer, FileInfoBufferSize);
|
||||
}
|
||||
|
||||
static inline BOOL _Py_GetFileInformationByName_ErrorIsTrustworthy(int error)
|
||||
{
|
||||
switch(error) {
|
||||
case ERROR_FILE_NOT_FOUND:
|
||||
case ERROR_PATH_NOT_FOUND:
|
||||
case ERROR_NOT_READY:
|
||||
case ERROR_BAD_NET_NAME:
|
||||
case ERROR_BAD_NETPATH:
|
||||
case ERROR_BAD_PATHNAME:
|
||||
case ERROR_INVALID_NAME:
|
||||
case ERROR_FILENAME_EXCED_RANGE:
|
||||
return TRUE;
|
||||
case ERROR_NOT_SUPPORTED:
|
||||
return FALSE;
|
||||
}
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
62
Dependencies/Python/include/internal/pycore_floatobject.h
vendored
Normal file
62
Dependencies/Python/include/internal/pycore_floatobject.h
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
#ifndef Py_INTERNAL_FLOATOBJECT_H
|
||||
#define Py_INTERNAL_FLOATOBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_freelist.h" // _PyFreeListState
|
||||
#include "pycore_unicodeobject.h" // _PyUnicodeWriter
|
||||
|
||||
/* runtime lifecycle */
|
||||
|
||||
extern void _PyFloat_InitState(PyInterpreterState *);
|
||||
extern PyStatus _PyFloat_InitTypes(PyInterpreterState *);
|
||||
extern void _PyFloat_FiniType(PyInterpreterState *);
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
enum _py_float_format_type {
|
||||
_py_float_format_unknown,
|
||||
_py_float_format_ieee_big_endian,
|
||||
_py_float_format_ieee_little_endian,
|
||||
};
|
||||
|
||||
struct _Py_float_runtime_state {
|
||||
enum _py_float_format_type float_format;
|
||||
enum _py_float_format_type double_format;
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
PyAPI_FUNC(void) _PyFloat_ExactDealloc(PyObject *op);
|
||||
|
||||
|
||||
extern void _PyFloat_DebugMallocStats(FILE* out);
|
||||
|
||||
|
||||
/* Format the object based on the format_spec, as defined in PEP 3101
|
||||
(Advanced String Formatting). */
|
||||
extern int _PyFloat_FormatAdvancedWriter(
|
||||
_PyUnicodeWriter *writer,
|
||||
PyObject *obj,
|
||||
PyObject *format_spec,
|
||||
Py_ssize_t start,
|
||||
Py_ssize_t end);
|
||||
|
||||
extern PyObject* _Py_string_to_number_with_underscores(
|
||||
const char *str, Py_ssize_t len, const char *what, PyObject *obj, void *arg,
|
||||
PyObject *(*innerfunc)(const char *, Py_ssize_t, void *));
|
||||
|
||||
extern double _Py_parse_inf_or_nan(const char *p, char **endptr);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FLOATOBJECT_H */
|
||||
40
Dependencies/Python/include/internal/pycore_flowgraph.h
vendored
Normal file
40
Dependencies/Python/include/internal/pycore_flowgraph.h
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
#ifndef Py_INTERNAL_CFG_H
|
||||
#define Py_INTERNAL_CFG_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_compile.h"
|
||||
#include "pycore_instruction_sequence.h"
|
||||
#include "pycore_opcode_utils.h"
|
||||
|
||||
struct _PyCfgBuilder;
|
||||
|
||||
int _PyCfgBuilder_UseLabel(struct _PyCfgBuilder *g, _PyJumpTargetLabel lbl);
|
||||
int _PyCfgBuilder_Addop(struct _PyCfgBuilder *g, int opcode, int oparg, _Py_SourceLocation loc);
|
||||
|
||||
struct _PyCfgBuilder* _PyCfgBuilder_New(void);
|
||||
void _PyCfgBuilder_Free(struct _PyCfgBuilder *g);
|
||||
int _PyCfgBuilder_CheckSize(struct _PyCfgBuilder* g);
|
||||
|
||||
int _PyCfg_OptimizeCodeUnit(struct _PyCfgBuilder *g, PyObject *consts, PyObject *const_cache,
|
||||
int nlocals, int nparams, int firstlineno);
|
||||
|
||||
int _PyCfg_ToInstructionSequence(struct _PyCfgBuilder *g, _PyInstructionSequence *seq);
|
||||
int _PyCfg_OptimizedCfgToInstructionSequence(struct _PyCfgBuilder *g, _PyCompile_CodeUnitMetadata *umd,
|
||||
int code_flags, int *stackdepth, int *nlocalsplus,
|
||||
_PyInstructionSequence *seq);
|
||||
|
||||
PyCodeObject *
|
||||
_PyAssemble_MakeCodeObject(_PyCompile_CodeUnitMetadata *u, PyObject *const_cache,
|
||||
PyObject *consts, int maxdepth, _PyInstructionSequence *instrs,
|
||||
int nlocalsplus, int code_flags, PyObject *filename);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CFG_H */
|
||||
27
Dependencies/Python/include/internal/pycore_format.h
vendored
Normal file
27
Dependencies/Python/include/internal/pycore_format.h
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
#ifndef Py_INTERNAL_FORMAT_H
|
||||
#define Py_INTERNAL_FORMAT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
/* Format codes
|
||||
* F_LJUST '-'
|
||||
* F_SIGN '+'
|
||||
* F_BLANK ' '
|
||||
* F_ALT '#'
|
||||
* F_ZERO '0'
|
||||
*/
|
||||
#define F_LJUST (1<<0)
|
||||
#define F_SIGN (1<<1)
|
||||
#define F_BLANK (1<<2)
|
||||
#define F_ALT (1<<3)
|
||||
#define F_ZERO (1<<4)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FORMAT_H */
|
||||
330
Dependencies/Python/include/internal/pycore_frame.h
vendored
Normal file
330
Dependencies/Python/include/internal/pycore_frame.h
vendored
Normal file
@@ -0,0 +1,330 @@
|
||||
#ifndef Py_INTERNAL_FRAME_H
|
||||
#define Py_INTERNAL_FRAME_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h> // offsetof()
|
||||
#include "pycore_code.h" // STATS
|
||||
|
||||
/* See Objects/frame_layout.md for an explanation of the frame stack
|
||||
* including explanation of the PyFrameObject and _PyInterpreterFrame
|
||||
* structs. */
|
||||
|
||||
|
||||
struct _frame {
|
||||
PyObject_HEAD
|
||||
PyFrameObject *f_back; /* previous frame, or NULL */
|
||||
struct _PyInterpreterFrame *f_frame; /* points to the frame data */
|
||||
PyObject *f_trace; /* Trace function */
|
||||
int f_lineno; /* Current line number. Only valid if non-zero */
|
||||
char f_trace_lines; /* Emit per-line trace events? */
|
||||
char f_trace_opcodes; /* Emit per-opcode trace events? */
|
||||
PyObject *f_extra_locals; /* Dict for locals set by users using f_locals, could be NULL */
|
||||
/* This is purely for backwards compatibility for PyEval_GetLocals.
|
||||
PyEval_GetLocals requires a borrowed reference so the actual reference
|
||||
is stored here */
|
||||
PyObject *f_locals_cache;
|
||||
/* The frame data, if this frame object owns the frame */
|
||||
PyObject *_f_frame_data[1];
|
||||
};
|
||||
|
||||
extern PyFrameObject* _PyFrame_New_NoTrack(PyCodeObject *code);
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
typedef enum _framestate {
|
||||
FRAME_CREATED = -3,
|
||||
FRAME_SUSPENDED = -2,
|
||||
FRAME_SUSPENDED_YIELD_FROM = -1,
|
||||
FRAME_EXECUTING = 0,
|
||||
FRAME_COMPLETED = 1,
|
||||
FRAME_CLEARED = 4
|
||||
} PyFrameState;
|
||||
|
||||
#define FRAME_STATE_SUSPENDED(S) ((S) == FRAME_SUSPENDED || (S) == FRAME_SUSPENDED_YIELD_FROM)
|
||||
#define FRAME_STATE_FINISHED(S) ((S) >= FRAME_COMPLETED)
|
||||
|
||||
enum _frameowner {
|
||||
FRAME_OWNED_BY_THREAD = 0,
|
||||
FRAME_OWNED_BY_GENERATOR = 1,
|
||||
FRAME_OWNED_BY_FRAME_OBJECT = 2,
|
||||
FRAME_OWNED_BY_CSTACK = 3,
|
||||
};
|
||||
|
||||
typedef struct _PyInterpreterFrame {
|
||||
PyObject *f_executable; /* Strong reference (code object or None) */
|
||||
struct _PyInterpreterFrame *previous;
|
||||
PyObject *f_funcobj; /* Strong reference. Only valid if not on C stack */
|
||||
PyObject *f_globals; /* Borrowed reference. Only valid if not on C stack */
|
||||
PyObject *f_builtins; /* Borrowed reference. Only valid if not on C stack */
|
||||
PyObject *f_locals; /* Strong reference, may be NULL. Only valid if not on C stack */
|
||||
PyFrameObject *frame_obj; /* Strong reference, may be NULL. Only valid if not on C stack */
|
||||
_Py_CODEUNIT *instr_ptr; /* Instruction currently executing (or about to begin) */
|
||||
int stacktop; /* Offset of TOS from localsplus */
|
||||
uint16_t return_offset; /* Only relevant during a function call */
|
||||
char owner;
|
||||
/* Locals and stack */
|
||||
PyObject *localsplus[1];
|
||||
} _PyInterpreterFrame;
|
||||
|
||||
#define _PyInterpreterFrame_LASTI(IF) \
|
||||
((int)((IF)->instr_ptr - _PyCode_CODE(_PyFrame_GetCode(IF))))
|
||||
|
||||
static inline PyCodeObject *_PyFrame_GetCode(_PyInterpreterFrame *f) {
|
||||
assert(PyCode_Check(f->f_executable));
|
||||
return (PyCodeObject *)f->f_executable;
|
||||
}
|
||||
|
||||
static inline PyObject **_PyFrame_Stackbase(_PyInterpreterFrame *f) {
|
||||
return f->localsplus + _PyFrame_GetCode(f)->co_nlocalsplus;
|
||||
}
|
||||
|
||||
static inline PyObject *_PyFrame_StackPeek(_PyInterpreterFrame *f) {
|
||||
assert(f->stacktop > _PyFrame_GetCode(f)->co_nlocalsplus);
|
||||
assert(f->localsplus[f->stacktop-1] != NULL);
|
||||
return f->localsplus[f->stacktop-1];
|
||||
}
|
||||
|
||||
static inline PyObject *_PyFrame_StackPop(_PyInterpreterFrame *f) {
|
||||
assert(f->stacktop > _PyFrame_GetCode(f)->co_nlocalsplus);
|
||||
f->stacktop--;
|
||||
return f->localsplus[f->stacktop];
|
||||
}
|
||||
|
||||
static inline void _PyFrame_StackPush(_PyInterpreterFrame *f, PyObject *value) {
|
||||
f->localsplus[f->stacktop] = value;
|
||||
f->stacktop++;
|
||||
}
|
||||
|
||||
#define FRAME_SPECIALS_SIZE ((int)((sizeof(_PyInterpreterFrame)-1)/sizeof(PyObject *)))
|
||||
|
||||
static inline int
|
||||
_PyFrame_NumSlotsForCodeObject(PyCodeObject *code)
|
||||
{
|
||||
/* This function needs to remain in sync with the calculation of
|
||||
* co_framesize in Tools/build/deepfreeze.py */
|
||||
assert(code->co_framesize >= FRAME_SPECIALS_SIZE);
|
||||
return code->co_framesize - FRAME_SPECIALS_SIZE;
|
||||
}
|
||||
|
||||
static inline void _PyFrame_Copy(_PyInterpreterFrame *src, _PyInterpreterFrame *dest)
|
||||
{
|
||||
assert(src->stacktop >= _PyFrame_GetCode(src)->co_nlocalsplus);
|
||||
*dest = *src;
|
||||
for (int i = 1; i < src->stacktop; i++) {
|
||||
dest->localsplus[i] = src->localsplus[i];
|
||||
}
|
||||
// Don't leave a dangling pointer to the old frame when creating generators
|
||||
// and coroutines:
|
||||
dest->previous = NULL;
|
||||
}
|
||||
|
||||
/* Consumes reference to func and locals.
|
||||
Does not initialize frame->previous, which happens
|
||||
when frame is linked into the frame stack.
|
||||
*/
|
||||
static inline void
|
||||
_PyFrame_Initialize(
|
||||
_PyInterpreterFrame *frame, PyFunctionObject *func,
|
||||
PyObject *locals, PyCodeObject *code, int null_locals_from)
|
||||
{
|
||||
frame->f_funcobj = (PyObject *)func;
|
||||
frame->f_executable = Py_NewRef(code);
|
||||
frame->f_builtins = func->func_builtins;
|
||||
frame->f_globals = func->func_globals;
|
||||
frame->f_locals = locals;
|
||||
frame->stacktop = code->co_nlocalsplus;
|
||||
frame->frame_obj = NULL;
|
||||
frame->instr_ptr = _PyCode_CODE(code);
|
||||
frame->return_offset = 0;
|
||||
frame->owner = FRAME_OWNED_BY_THREAD;
|
||||
|
||||
for (int i = null_locals_from; i < code->co_nlocalsplus; i++) {
|
||||
frame->localsplus[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Gets the pointer to the locals array
|
||||
* that precedes this frame.
|
||||
*/
|
||||
static inline PyObject**
|
||||
_PyFrame_GetLocalsArray(_PyInterpreterFrame *frame)
|
||||
{
|
||||
return frame->localsplus;
|
||||
}
|
||||
|
||||
/* Fetches the stack pointer, and sets stacktop to -1.
|
||||
Having stacktop <= 0 ensures that invalid
|
||||
values are not visible to the cycle GC.
|
||||
We choose -1 rather than 0 to assist debugging. */
|
||||
static inline PyObject**
|
||||
_PyFrame_GetStackPointer(_PyInterpreterFrame *frame)
|
||||
{
|
||||
PyObject **sp = frame->localsplus + frame->stacktop;
|
||||
frame->stacktop = -1;
|
||||
return sp;
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyFrame_SetStackPointer(_PyInterpreterFrame *frame, PyObject **stack_pointer)
|
||||
{
|
||||
frame->stacktop = (int)(stack_pointer - frame->localsplus);
|
||||
}
|
||||
|
||||
/* Determine whether a frame is incomplete.
|
||||
* A frame is incomplete if it is part way through
|
||||
* creating cell objects or a generator or coroutine.
|
||||
*
|
||||
* Frames on the frame stack are incomplete until the
|
||||
* first RESUME instruction.
|
||||
* Frames owned by a generator are always complete.
|
||||
*/
|
||||
static inline bool
|
||||
_PyFrame_IsIncomplete(_PyInterpreterFrame *frame)
|
||||
{
|
||||
if (frame->owner == FRAME_OWNED_BY_CSTACK) {
|
||||
return true;
|
||||
}
|
||||
return frame->owner != FRAME_OWNED_BY_GENERATOR &&
|
||||
frame->instr_ptr < _PyCode_CODE(_PyFrame_GetCode(frame)) + _PyFrame_GetCode(frame)->_co_firsttraceable;
|
||||
}
|
||||
|
||||
static inline _PyInterpreterFrame *
|
||||
_PyFrame_GetFirstComplete(_PyInterpreterFrame *frame)
|
||||
{
|
||||
while (frame && _PyFrame_IsIncomplete(frame)) {
|
||||
frame = frame->previous;
|
||||
}
|
||||
return frame;
|
||||
}
|
||||
|
||||
static inline _PyInterpreterFrame *
|
||||
_PyThreadState_GetFrame(PyThreadState *tstate)
|
||||
{
|
||||
return _PyFrame_GetFirstComplete(tstate->current_frame);
|
||||
}
|
||||
|
||||
/* For use by _PyFrame_GetFrameObject
|
||||
Do not call directly. */
|
||||
PyFrameObject *
|
||||
_PyFrame_MakeAndSetFrameObject(_PyInterpreterFrame *frame);
|
||||
|
||||
/* Gets the PyFrameObject for this frame, lazily
|
||||
* creating it if necessary.
|
||||
* Returns a borrowed reference */
|
||||
static inline PyFrameObject *
|
||||
_PyFrame_GetFrameObject(_PyInterpreterFrame *frame)
|
||||
{
|
||||
|
||||
assert(!_PyFrame_IsIncomplete(frame));
|
||||
PyFrameObject *res = frame->frame_obj;
|
||||
if (res != NULL) {
|
||||
return res;
|
||||
}
|
||||
return _PyFrame_MakeAndSetFrameObject(frame);
|
||||
}
|
||||
|
||||
void
|
||||
_PyFrame_ClearLocals(_PyInterpreterFrame *frame);
|
||||
|
||||
/* Clears all references in the frame.
|
||||
* If take is non-zero, then the _PyInterpreterFrame frame
|
||||
* may be transferred to the frame object it references
|
||||
* instead of being cleared. Either way
|
||||
* the caller no longer owns the references
|
||||
* in the frame.
|
||||
* take should be set to 1 for heap allocated
|
||||
* frames like the ones in generators and coroutines.
|
||||
*/
|
||||
void
|
||||
_PyFrame_ClearExceptCode(_PyInterpreterFrame * frame);
|
||||
|
||||
int
|
||||
_PyFrame_Traverse(_PyInterpreterFrame *frame, visitproc visit, void *arg);
|
||||
|
||||
bool
|
||||
_PyFrame_HasHiddenLocals(_PyInterpreterFrame *frame);
|
||||
|
||||
PyObject *
|
||||
_PyFrame_GetLocals(_PyInterpreterFrame *frame);
|
||||
|
||||
static inline bool
|
||||
_PyThreadState_HasStackSpace(PyThreadState *tstate, int size)
|
||||
{
|
||||
assert(
|
||||
(tstate->datastack_top == NULL && tstate->datastack_limit == NULL)
|
||||
||
|
||||
(tstate->datastack_top != NULL && tstate->datastack_limit != NULL)
|
||||
);
|
||||
return tstate->datastack_top != NULL &&
|
||||
size < tstate->datastack_limit - tstate->datastack_top;
|
||||
}
|
||||
|
||||
extern _PyInterpreterFrame *
|
||||
_PyThreadState_PushFrame(PyThreadState *tstate, size_t size);
|
||||
|
||||
PyAPI_FUNC(void) _PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame *frame);
|
||||
|
||||
/* Pushes a frame without checking for space.
|
||||
* Must be guarded by _PyThreadState_HasStackSpace()
|
||||
* Consumes reference to func. */
|
||||
static inline _PyInterpreterFrame *
|
||||
_PyFrame_PushUnchecked(PyThreadState *tstate, PyFunctionObject *func, int null_locals_from)
|
||||
{
|
||||
CALL_STAT_INC(frames_pushed);
|
||||
PyCodeObject *code = (PyCodeObject *)func->func_code;
|
||||
_PyInterpreterFrame *new_frame = (_PyInterpreterFrame *)tstate->datastack_top;
|
||||
tstate->datastack_top += code->co_framesize;
|
||||
assert(tstate->datastack_top < tstate->datastack_limit);
|
||||
_PyFrame_Initialize(new_frame, func, NULL, code, null_locals_from);
|
||||
return new_frame;
|
||||
}
|
||||
|
||||
/* Pushes a trampoline frame without checking for space.
|
||||
* Must be guarded by _PyThreadState_HasStackSpace() */
|
||||
static inline _PyInterpreterFrame *
|
||||
_PyFrame_PushTrampolineUnchecked(PyThreadState *tstate, PyCodeObject *code, int stackdepth)
|
||||
{
|
||||
CALL_STAT_INC(frames_pushed);
|
||||
_PyInterpreterFrame *frame = (_PyInterpreterFrame *)tstate->datastack_top;
|
||||
tstate->datastack_top += code->co_framesize;
|
||||
assert(tstate->datastack_top < tstate->datastack_limit);
|
||||
frame->f_funcobj = Py_None;
|
||||
frame->f_executable = Py_NewRef(code);
|
||||
#ifdef Py_DEBUG
|
||||
frame->f_builtins = NULL;
|
||||
frame->f_globals = NULL;
|
||||
#endif
|
||||
frame->f_locals = NULL;
|
||||
frame->stacktop = code->co_nlocalsplus + stackdepth;
|
||||
frame->frame_obj = NULL;
|
||||
frame->instr_ptr = _PyCode_CODE(code);
|
||||
frame->owner = FRAME_OWNED_BY_THREAD;
|
||||
frame->return_offset = 0;
|
||||
return frame;
|
||||
}
|
||||
|
||||
static inline
|
||||
PyGenObject *_PyFrame_GetGenerator(_PyInterpreterFrame *frame)
|
||||
{
|
||||
assert(frame->owner == FRAME_OWNED_BY_GENERATOR);
|
||||
size_t offset_in_gen = offsetof(PyGenObject, gi_iframe);
|
||||
return (PyGenObject *)(((char *)frame) - offset_in_gen);
|
||||
}
|
||||
|
||||
PyAPI_FUNC(_PyInterpreterFrame *)
|
||||
_PyEvalFramePushAndInit(PyThreadState *tstate, PyFunctionObject *func,
|
||||
PyObject *locals, PyObject* const* args,
|
||||
size_t argcount, PyObject *kwnames);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FRAME_H */
|
||||
153
Dependencies/Python/include/internal/pycore_freelist.h
vendored
Normal file
153
Dependencies/Python/include/internal/pycore_freelist.h
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
#ifndef Py_INTERNAL_FREELIST_H
|
||||
#define Py_INTERNAL_FREELIST_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// PyTuple_MAXSAVESIZE - largest tuple to save on free list
|
||||
// PyTuple_MAXFREELIST - maximum number of tuples of each size to save
|
||||
|
||||
#ifdef WITH_FREELISTS
|
||||
// with freelists
|
||||
# define PyTuple_MAXSAVESIZE 20
|
||||
# define PyTuple_NFREELISTS PyTuple_MAXSAVESIZE
|
||||
# define PyTuple_MAXFREELIST 2000
|
||||
# define PyList_MAXFREELIST 80
|
||||
# define PyDict_MAXFREELIST 80
|
||||
# define PyFloat_MAXFREELIST 100
|
||||
# define PyContext_MAXFREELIST 255
|
||||
# define _PyAsyncGen_MAXFREELIST 80
|
||||
# define _PyObjectStackChunk_MAXFREELIST 4
|
||||
#else
|
||||
# define PyTuple_NFREELISTS 0
|
||||
# define PyTuple_MAXFREELIST 0
|
||||
# define PyList_MAXFREELIST 0
|
||||
# define PyDict_MAXFREELIST 0
|
||||
# define PyFloat_MAXFREELIST 0
|
||||
# define PyContext_MAXFREELIST 0
|
||||
# define _PyAsyncGen_MAXFREELIST 0
|
||||
# define _PyObjectStackChunk_MAXFREELIST 0
|
||||
#endif
|
||||
|
||||
struct _Py_list_freelist {
|
||||
#ifdef WITH_FREELISTS
|
||||
PyListObject *items[PyList_MAXFREELIST];
|
||||
int numfree;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct _Py_tuple_freelist {
|
||||
#if WITH_FREELISTS
|
||||
/* There is one freelist for each size from 1 to PyTuple_MAXSAVESIZE.
|
||||
The empty tuple is handled separately.
|
||||
|
||||
Each tuple stored in the array is the head of the linked list
|
||||
(and the next available tuple) for that size. The actual tuple
|
||||
object is used as the linked list node, with its first item
|
||||
(ob_item[0]) pointing to the next node (i.e. the previous head).
|
||||
Each linked list is initially NULL. */
|
||||
PyTupleObject *items[PyTuple_NFREELISTS];
|
||||
int numfree[PyTuple_NFREELISTS];
|
||||
#else
|
||||
char _unused; // Empty structs are not allowed.
|
||||
#endif
|
||||
};
|
||||
|
||||
struct _Py_float_freelist {
|
||||
#ifdef WITH_FREELISTS
|
||||
/* Special free list
|
||||
free_list is a singly-linked list of available PyFloatObjects,
|
||||
linked via abuse of their ob_type members. */
|
||||
int numfree;
|
||||
PyFloatObject *items;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct _Py_dict_freelist {
|
||||
#ifdef WITH_FREELISTS
|
||||
/* Dictionary reuse scheme to save calls to malloc and free */
|
||||
PyDictObject *items[PyDict_MAXFREELIST];
|
||||
int numfree;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct _Py_dictkeys_freelist {
|
||||
#ifdef WITH_FREELISTS
|
||||
/* Dictionary keys reuse scheme to save calls to malloc and free */
|
||||
PyDictKeysObject *items[PyDict_MAXFREELIST];
|
||||
int numfree;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct _Py_slice_freelist {
|
||||
#ifdef WITH_FREELISTS
|
||||
/* Using a cache is very effective since typically only a single slice is
|
||||
created and then deleted again. */
|
||||
PySliceObject *slice_cache;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct _Py_context_freelist {
|
||||
#ifdef WITH_FREELISTS
|
||||
// List of free PyContext objects
|
||||
PyContext *items;
|
||||
int numfree;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct _Py_async_gen_freelist {
|
||||
#ifdef WITH_FREELISTS
|
||||
/* Freelists boost performance 6-10%; they also reduce memory
|
||||
fragmentation, as _PyAsyncGenWrappedValue and PyAsyncGenASend
|
||||
are short-living objects that are instantiated for every
|
||||
__anext__() call. */
|
||||
struct _PyAsyncGenWrappedValue* items[_PyAsyncGen_MAXFREELIST];
|
||||
int numfree;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct _Py_async_gen_asend_freelist {
|
||||
#ifdef WITH_FREELISTS
|
||||
struct PyAsyncGenASend* items[_PyAsyncGen_MAXFREELIST];
|
||||
int numfree;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct _PyObjectStackChunk;
|
||||
|
||||
struct _Py_object_stack_freelist {
|
||||
struct _PyObjectStackChunk *items;
|
||||
Py_ssize_t numfree;
|
||||
};
|
||||
|
||||
struct _Py_object_freelists {
|
||||
struct _Py_float_freelist floats;
|
||||
struct _Py_tuple_freelist tuples;
|
||||
struct _Py_list_freelist lists;
|
||||
struct _Py_dict_freelist dicts;
|
||||
struct _Py_dictkeys_freelist dictkeys;
|
||||
struct _Py_slice_freelist slices;
|
||||
struct _Py_context_freelist contexts;
|
||||
struct _Py_async_gen_freelist async_gens;
|
||||
struct _Py_async_gen_asend_freelist async_gen_asends;
|
||||
struct _Py_object_stack_freelist object_stacks;
|
||||
};
|
||||
|
||||
extern void _PyObject_ClearFreeLists(struct _Py_object_freelists *freelists, int is_finalization);
|
||||
extern void _PyTuple_ClearFreeList(struct _Py_object_freelists *freelists, int is_finalization);
|
||||
extern void _PyFloat_ClearFreeList(struct _Py_object_freelists *freelists, int is_finalization);
|
||||
extern void _PyList_ClearFreeList(struct _Py_object_freelists *freelists, int is_finalization);
|
||||
extern void _PySlice_ClearFreeList(struct _Py_object_freelists *freelists, int is_finalization);
|
||||
extern void _PyDict_ClearFreeList(struct _Py_object_freelists *freelists, int is_finalization);
|
||||
extern void _PyAsyncGen_ClearFreeLists(struct _Py_object_freelists *freelists, int is_finalization);
|
||||
extern void _PyContext_ClearFreeList(struct _Py_object_freelists *freelists, int is_finalization);
|
||||
extern void _PyObjectStackChunk_ClearFreeList(struct _Py_object_freelists *freelists, int is_finalization);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FREELIST_H */
|
||||
55
Dependencies/Python/include/internal/pycore_function.h
vendored
Normal file
55
Dependencies/Python/include/internal/pycore_function.h
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
#ifndef Py_INTERNAL_FUNCTION_H
|
||||
#define Py_INTERNAL_FUNCTION_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "pycore_lock.h"
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern PyObject* _PyFunction_Vectorcall(
|
||||
PyObject *func,
|
||||
PyObject *const *stack,
|
||||
size_t nargsf,
|
||||
PyObject *kwnames);
|
||||
|
||||
#define FUNC_MAX_WATCHERS 8
|
||||
|
||||
#define FUNC_VERSION_CACHE_SIZE (1<<12) /* Must be a power of 2 */
|
||||
|
||||
struct _func_version_cache_item {
|
||||
PyFunctionObject *func;
|
||||
PyObject *code;
|
||||
};
|
||||
|
||||
struct _py_func_state {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// Protects next_version
|
||||
PyMutex mutex;
|
||||
#endif
|
||||
|
||||
uint32_t next_version;
|
||||
// Borrowed references to function and code objects whose
|
||||
// func_version % FUNC_VERSION_CACHE_SIZE
|
||||
// once was equal to the index in the table.
|
||||
// They are cleared when the function or code object is deallocated.
|
||||
struct _func_version_cache_item func_version_cache[FUNC_VERSION_CACHE_SIZE];
|
||||
};
|
||||
|
||||
extern PyFunctionObject* _PyFunction_FromConstructor(PyFrameConstructor *constr);
|
||||
|
||||
extern uint32_t _PyFunction_GetVersionForCurrentState(PyFunctionObject *func);
|
||||
PyAPI_FUNC(void) _PyFunction_SetVersion(PyFunctionObject *func, uint32_t version);
|
||||
void _PyFunction_ClearCodeByVersion(uint32_t version);
|
||||
PyFunctionObject *_PyFunction_LookupByVersion(uint32_t version, PyObject **p_code);
|
||||
|
||||
extern PyObject *_Py_set_function_type_params(
|
||||
PyThreadState* unused, PyObject *func, PyObject *type_params);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_FUNCTION_H */
|
||||
365
Dependencies/Python/include/internal/pycore_gc.h
vendored
Normal file
365
Dependencies/Python/include/internal/pycore_gc.h
vendored
Normal file
@@ -0,0 +1,365 @@
|
||||
#ifndef Py_INTERNAL_GC_H
|
||||
#define Py_INTERNAL_GC_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_freelist.h" // _PyFreeListState
|
||||
|
||||
/* GC information is stored BEFORE the object structure. */
|
||||
typedef struct {
|
||||
// Pointer to next object in the list.
|
||||
// 0 means the object is not tracked
|
||||
uintptr_t _gc_next;
|
||||
|
||||
// Pointer to previous object in the list.
|
||||
// Lowest two bits are used for flags documented later.
|
||||
uintptr_t _gc_prev;
|
||||
} PyGC_Head;
|
||||
|
||||
#define _PyGC_Head_UNUSED PyGC_Head
|
||||
|
||||
|
||||
/* Get an object's GC head */
|
||||
static inline PyGC_Head* _Py_AS_GC(PyObject *op) {
|
||||
char *gc = ((char*)op) - sizeof(PyGC_Head);
|
||||
return (PyGC_Head*)gc;
|
||||
}
|
||||
|
||||
/* Get the object given the GC head */
|
||||
static inline PyObject* _Py_FROM_GC(PyGC_Head *gc) {
|
||||
char *op = ((char *)gc) + sizeof(PyGC_Head);
|
||||
return (PyObject *)op;
|
||||
}
|
||||
|
||||
|
||||
/* Bit flags for ob_gc_bits (in Py_GIL_DISABLED builds)
|
||||
*
|
||||
* Setting the bits requires a relaxed store. The per-object lock must also be
|
||||
* held, except when the object is only visible to a single thread (e.g. during
|
||||
* object initialization or destruction).
|
||||
*
|
||||
* Reading the bits requires using a relaxed load, but does not require holding
|
||||
* the per-object lock.
|
||||
*/
|
||||
#ifdef Py_GIL_DISABLED
|
||||
# define _PyGC_BITS_TRACKED (1) // Tracked by the GC
|
||||
# define _PyGC_BITS_FINALIZED (2) // tp_finalize was called
|
||||
# define _PyGC_BITS_UNREACHABLE (4)
|
||||
# define _PyGC_BITS_FROZEN (8)
|
||||
# define _PyGC_BITS_SHARED (16)
|
||||
# define _PyGC_BITS_SHARED_INLINE (32)
|
||||
# define _PyGC_BITS_DEFERRED (64) // Use deferred reference counting
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
static inline void
|
||||
_PyObject_SET_GC_BITS(PyObject *op, uint8_t new_bits)
|
||||
{
|
||||
uint8_t bits = _Py_atomic_load_uint8_relaxed(&op->ob_gc_bits);
|
||||
_Py_atomic_store_uint8_relaxed(&op->ob_gc_bits, bits | new_bits);
|
||||
}
|
||||
|
||||
static inline int
|
||||
_PyObject_HAS_GC_BITS(PyObject *op, uint8_t bits)
|
||||
{
|
||||
return (_Py_atomic_load_uint8_relaxed(&op->ob_gc_bits) & bits) != 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyObject_CLEAR_GC_BITS(PyObject *op, uint8_t bits_to_clear)
|
||||
{
|
||||
uint8_t bits = _Py_atomic_load_uint8_relaxed(&op->ob_gc_bits);
|
||||
_Py_atomic_store_uint8_relaxed(&op->ob_gc_bits, bits & ~bits_to_clear);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* True if the object is currently tracked by the GC. */
|
||||
static inline int _PyObject_GC_IS_TRACKED(PyObject *op) {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_TRACKED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
return (gc->_gc_next != 0);
|
||||
#endif
|
||||
}
|
||||
#define _PyObject_GC_IS_TRACKED(op) _PyObject_GC_IS_TRACKED(_Py_CAST(PyObject*, op))
|
||||
|
||||
/* True if the object may be tracked by the GC in the future, or already is.
|
||||
This can be useful to implement some optimizations. */
|
||||
static inline int _PyObject_GC_MAY_BE_TRACKED(PyObject *obj) {
|
||||
if (!PyObject_IS_GC(obj)) {
|
||||
return 0;
|
||||
}
|
||||
if (PyTuple_CheckExact(obj)) {
|
||||
return _PyObject_GC_IS_TRACKED(obj);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
/* True if memory the object references is shared between
|
||||
* multiple threads and needs special purpose when freeing
|
||||
* those references due to the possibility of in-flight
|
||||
* lock-free reads occurring. The object is responsible
|
||||
* for calling _PyMem_FreeDelayed on the referenced
|
||||
* memory. */
|
||||
static inline int _PyObject_GC_IS_SHARED(PyObject *op) {
|
||||
return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_SHARED);
|
||||
}
|
||||
#define _PyObject_GC_IS_SHARED(op) _PyObject_GC_IS_SHARED(_Py_CAST(PyObject*, op))
|
||||
|
||||
static inline void _PyObject_GC_SET_SHARED(PyObject *op) {
|
||||
_PyObject_SET_GC_BITS(op, _PyGC_BITS_SHARED);
|
||||
}
|
||||
#define _PyObject_GC_SET_SHARED(op) _PyObject_GC_SET_SHARED(_Py_CAST(PyObject*, op))
|
||||
|
||||
/* True if the memory of the object is shared between multiple
|
||||
* threads and needs special purpose when freeing due to
|
||||
* the possibility of in-flight lock-free reads occurring.
|
||||
* Objects with this bit that are GC objects will automatically
|
||||
* delay-freed by PyObject_GC_Del. */
|
||||
static inline int _PyObject_GC_IS_SHARED_INLINE(PyObject *op) {
|
||||
return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_SHARED_INLINE);
|
||||
}
|
||||
#define _PyObject_GC_IS_SHARED_INLINE(op) \
|
||||
_PyObject_GC_IS_SHARED_INLINE(_Py_CAST(PyObject*, op))
|
||||
|
||||
static inline void _PyObject_GC_SET_SHARED_INLINE(PyObject *op) {
|
||||
_PyObject_SET_GC_BITS(op, _PyGC_BITS_SHARED_INLINE);
|
||||
}
|
||||
#define _PyObject_GC_SET_SHARED_INLINE(op) \
|
||||
_PyObject_GC_SET_SHARED_INLINE(_Py_CAST(PyObject*, op))
|
||||
|
||||
#endif
|
||||
|
||||
/* Bit flags for _gc_prev */
|
||||
/* Bit 0 is set when tp_finalize is called */
|
||||
#define _PyGC_PREV_MASK_FINALIZED (1)
|
||||
/* Bit 1 is set when the object is in generation which is GCed currently. */
|
||||
#define _PyGC_PREV_MASK_COLLECTING (2)
|
||||
/* The (N-2) most significant bits contain the real address. */
|
||||
#define _PyGC_PREV_SHIFT (2)
|
||||
#define _PyGC_PREV_MASK (((uintptr_t) -1) << _PyGC_PREV_SHIFT)
|
||||
|
||||
/* set for debugging information */
|
||||
#define _PyGC_DEBUG_STATS (1<<0) /* print collection statistics */
|
||||
#define _PyGC_DEBUG_COLLECTABLE (1<<1) /* print collectable objects */
|
||||
#define _PyGC_DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */
|
||||
#define _PyGC_DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */
|
||||
#define _PyGC_DEBUG_LEAK _PyGC_DEBUG_COLLECTABLE | \
|
||||
_PyGC_DEBUG_UNCOLLECTABLE | \
|
||||
_PyGC_DEBUG_SAVEALL
|
||||
|
||||
typedef enum {
|
||||
// GC was triggered by heap allocation
|
||||
_Py_GC_REASON_HEAP,
|
||||
|
||||
// GC was called during shutdown
|
||||
_Py_GC_REASON_SHUTDOWN,
|
||||
|
||||
// GC was called by gc.collect() or PyGC_Collect()
|
||||
_Py_GC_REASON_MANUAL
|
||||
} _PyGC_Reason;
|
||||
|
||||
// Lowest bit of _gc_next is used for flags only in GC.
|
||||
// But it is always 0 for normal code.
|
||||
static inline PyGC_Head* _PyGCHead_NEXT(PyGC_Head *gc) {
|
||||
uintptr_t next = gc->_gc_next;
|
||||
return (PyGC_Head*)next;
|
||||
}
|
||||
static inline void _PyGCHead_SET_NEXT(PyGC_Head *gc, PyGC_Head *next) {
|
||||
gc->_gc_next = (uintptr_t)next;
|
||||
}
|
||||
|
||||
// Lowest two bits of _gc_prev is used for _PyGC_PREV_MASK_* flags.
|
||||
static inline PyGC_Head* _PyGCHead_PREV(PyGC_Head *gc) {
|
||||
uintptr_t prev = (gc->_gc_prev & _PyGC_PREV_MASK);
|
||||
return (PyGC_Head*)prev;
|
||||
}
|
||||
static inline void _PyGCHead_SET_PREV(PyGC_Head *gc, PyGC_Head *prev) {
|
||||
uintptr_t uprev = (uintptr_t)prev;
|
||||
assert((uprev & ~_PyGC_PREV_MASK) == 0);
|
||||
gc->_gc_prev = ((gc->_gc_prev & ~_PyGC_PREV_MASK) | uprev);
|
||||
}
|
||||
|
||||
static inline int _PyGC_FINALIZED(PyObject *op) {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_FINALIZED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
return ((gc->_gc_prev & _PyGC_PREV_MASK_FINALIZED) != 0);
|
||||
#endif
|
||||
}
|
||||
static inline void _PyGC_SET_FINALIZED(PyObject *op) {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyObject_SET_GC_BITS(op, _PyGC_BITS_FINALIZED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
gc->_gc_prev |= _PyGC_PREV_MASK_FINALIZED;
|
||||
#endif
|
||||
}
|
||||
static inline void _PyGC_CLEAR_FINALIZED(PyObject *op) {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyObject_CLEAR_GC_BITS(op, _PyGC_BITS_FINALIZED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
gc->_gc_prev &= ~_PyGC_PREV_MASK_FINALIZED;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/* GC runtime state */
|
||||
|
||||
/* If we change this, we need to change the default value in the
|
||||
signature of gc.collect. */
|
||||
#define NUM_GENERATIONS 3
|
||||
/*
|
||||
NOTE: about untracking of mutable objects.
|
||||
|
||||
Certain types of container cannot participate in a reference cycle, and
|
||||
so do not need to be tracked by the garbage collector. Untracking these
|
||||
objects reduces the cost of garbage collections. However, determining
|
||||
which objects may be untracked is not free, and the costs must be
|
||||
weighed against the benefits for garbage collection.
|
||||
|
||||
There are two possible strategies for when to untrack a container:
|
||||
|
||||
i) When the container is created.
|
||||
ii) When the container is examined by the garbage collector.
|
||||
|
||||
Tuples containing only immutable objects (integers, strings etc, and
|
||||
recursively, tuples of immutable objects) do not need to be tracked.
|
||||
The interpreter creates a large number of tuples, many of which will
|
||||
not survive until garbage collection. It is therefore not worthwhile
|
||||
to untrack eligible tuples at creation time.
|
||||
|
||||
Instead, all tuples except the empty tuple are tracked when created.
|
||||
During garbage collection it is determined whether any surviving tuples
|
||||
can be untracked. A tuple can be untracked if all of its contents are
|
||||
already not tracked. Tuples are examined for untracking in all garbage
|
||||
collection cycles. It may take more than one cycle to untrack a tuple.
|
||||
|
||||
Dictionaries containing only immutable objects also do not need to be
|
||||
tracked. Dictionaries are untracked when created. If a tracked item is
|
||||
inserted into a dictionary (either as a key or value), the dictionary
|
||||
becomes tracked. During a full garbage collection (all generations),
|
||||
the collector will untrack any dictionaries whose contents are not
|
||||
tracked.
|
||||
|
||||
The module provides the python function is_tracked(obj), which returns
|
||||
the CURRENT tracking status of the object. Subsequent garbage
|
||||
collections may change the tracking status of the object.
|
||||
|
||||
Untracking of certain containers was introduced in issue #4688, and
|
||||
the algorithm was refined in response to issue #14775.
|
||||
*/
|
||||
|
||||
struct gc_generation {
|
||||
PyGC_Head head;
|
||||
int threshold; /* collection threshold */
|
||||
int count; /* count of allocations or collections of younger
|
||||
generations */
|
||||
};
|
||||
|
||||
/* Running stats per generation */
|
||||
struct gc_generation_stats {
|
||||
/* total number of collections */
|
||||
Py_ssize_t collections;
|
||||
/* total number of collected objects */
|
||||
Py_ssize_t collected;
|
||||
/* total number of uncollectable objects (put into gc.garbage) */
|
||||
Py_ssize_t uncollectable;
|
||||
};
|
||||
|
||||
struct _gc_runtime_state {
|
||||
/* List of objects that still need to be cleaned up, singly linked
|
||||
* via their gc headers' gc_prev pointers. */
|
||||
PyObject *trash_delete_later;
|
||||
/* Current call-stack depth of tp_dealloc calls. */
|
||||
int trash_delete_nesting;
|
||||
|
||||
/* Is automatic collection enabled? */
|
||||
int enabled;
|
||||
int debug;
|
||||
/* linked lists of container objects */
|
||||
struct gc_generation generations[NUM_GENERATIONS];
|
||||
PyGC_Head *generation0;
|
||||
/* a permanent generation which won't be collected */
|
||||
struct gc_generation permanent_generation;
|
||||
struct gc_generation_stats generation_stats[NUM_GENERATIONS];
|
||||
/* true if we are currently running the collector */
|
||||
int collecting;
|
||||
/* list of uncollectable objects */
|
||||
PyObject *garbage;
|
||||
/* a list of callbacks to be invoked when collection is performed */
|
||||
PyObject *callbacks;
|
||||
|
||||
/* This is the number of objects that survived the last full
|
||||
collection. It approximates the number of long lived objects
|
||||
tracked by the GC.
|
||||
|
||||
(by "full collection", we mean a collection of the oldest
|
||||
generation). */
|
||||
Py_ssize_t long_lived_total;
|
||||
/* This is the number of objects that survived all "non-full"
|
||||
collections, and are awaiting to undergo a full collection for
|
||||
the first time. */
|
||||
Py_ssize_t long_lived_pending;
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
/* gh-117783: Deferred reference counting is not fully implemented yet, so
|
||||
as a temporary measure we treat objects using deferred reference
|
||||
counting as immortal. The value may be zero, one, or a negative number:
|
||||
0: immortalize deferred RC objects once the first thread is created
|
||||
1: immortalize all deferred RC objects immediately
|
||||
<0: suppressed; don't immortalize objects */
|
||||
int immortalize;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
struct _gc_thread_state {
|
||||
/* Thread-local allocation count. */
|
||||
Py_ssize_t alloc_count;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
extern void _PyGC_InitState(struct _gc_runtime_state *);
|
||||
|
||||
extern Py_ssize_t _PyGC_Collect(PyThreadState *tstate, int generation,
|
||||
_PyGC_Reason reason);
|
||||
extern void _PyGC_CollectNoFail(PyThreadState *tstate);
|
||||
|
||||
/* Freeze objects tracked by the GC and ignore them in future collections. */
|
||||
extern void _PyGC_Freeze(PyInterpreterState *interp);
|
||||
/* Unfreezes objects placing them in the oldest generation */
|
||||
extern void _PyGC_Unfreeze(PyInterpreterState *interp);
|
||||
/* Number of frozen objects */
|
||||
extern Py_ssize_t _PyGC_GetFreezeCount(PyInterpreterState *interp);
|
||||
|
||||
extern PyObject *_PyGC_GetObjects(PyInterpreterState *interp, int generation);
|
||||
extern PyObject *_PyGC_GetReferrers(PyInterpreterState *interp, PyObject *objs);
|
||||
|
||||
// Functions to clear types free lists
|
||||
extern void _PyGC_ClearAllFreeLists(PyInterpreterState *interp);
|
||||
extern void _Py_ScheduleGC(PyThreadState *tstate);
|
||||
extern void _Py_RunGC(PyThreadState *tstate);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// gh-117783: Immortalize objects that use deferred reference counting
|
||||
extern void _PyGC_ImmortalizeDeferredObjects(PyInterpreterState *interp);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_GC_H */
|
||||
32
Dependencies/Python/include/internal/pycore_genobject.h
vendored
Normal file
32
Dependencies/Python/include/internal/pycore_genobject.h
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
#ifndef Py_INTERNAL_GENOBJECT_H
|
||||
#define Py_INTERNAL_GENOBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_freelist.h"
|
||||
|
||||
PyAPI_FUNC(PyObject *)_PyGen_yf(PyGenObject *);
|
||||
extern void _PyGen_Finalize(PyObject *self);
|
||||
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyGen_SetStopIterationValue(PyObject *);
|
||||
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyGen_FetchStopIterationValue(PyObject **);
|
||||
|
||||
PyAPI_FUNC(PyObject *)_PyCoro_GetAwaitableIter(PyObject *o);
|
||||
extern PyObject *_PyAsyncGenValueWrapperNew(PyThreadState *state, PyObject *);
|
||||
|
||||
extern PyTypeObject _PyCoroWrapper_Type;
|
||||
extern PyTypeObject _PyAsyncGenWrappedValue_Type;
|
||||
extern PyTypeObject _PyAsyncGenAThrow_Type;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_GENOBJECT_H */
|
||||
22
Dependencies/Python/include/internal/pycore_getopt.h
vendored
Normal file
22
Dependencies/Python/include/internal/pycore_getopt.h
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
#ifndef Py_INTERNAL_PYGETOPT_H
|
||||
#define Py_INTERNAL_PYGETOPT_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern int _PyOS_opterr;
|
||||
extern Py_ssize_t _PyOS_optind;
|
||||
extern const wchar_t *_PyOS_optarg;
|
||||
|
||||
extern void _PyOS_ResetGetOpt(void);
|
||||
|
||||
typedef struct {
|
||||
const wchar_t *name;
|
||||
int has_arg;
|
||||
int val;
|
||||
} _PyOS_LongOption;
|
||||
|
||||
extern int _PyOS_GetOpt(Py_ssize_t argc, wchar_t * const *argv, int *longindex);
|
||||
|
||||
#endif /* !Py_INTERNAL_PYGETOPT_H */
|
||||
66
Dependencies/Python/include/internal/pycore_gil.h
vendored
Normal file
66
Dependencies/Python/include/internal/pycore_gil.h
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
#ifndef Py_INTERNAL_GIL_H
|
||||
#define Py_INTERNAL_GIL_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_condvar.h" // PyCOND_T
|
||||
|
||||
#ifndef Py_HAVE_CONDVAR
|
||||
# error You need either a POSIX-compatible or a Windows system!
|
||||
#endif
|
||||
|
||||
/* Enable if you want to force the switching of threads at least
|
||||
every `interval`. */
|
||||
#undef FORCE_SWITCHING
|
||||
#define FORCE_SWITCHING
|
||||
|
||||
struct _gil_runtime_state {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
/* If this GIL is disabled, enabled == 0.
|
||||
|
||||
If this GIL is enabled transiently (most likely to initialize a module
|
||||
of unknown safety), enabled indicates the number of active transient
|
||||
requests.
|
||||
|
||||
If this GIL is enabled permanently, enabled == INT_MAX.
|
||||
|
||||
It must not be modified directly; use _PyEval_EnableGILTransiently(),
|
||||
_PyEval_EnableGILPermanently(), and _PyEval_DisableGIL()
|
||||
|
||||
It is always read and written atomically, but a thread can assume its
|
||||
value will be stable as long as that thread is attached or knows that no
|
||||
other threads are attached (e.g., during a stop-the-world.). */
|
||||
int enabled;
|
||||
#endif
|
||||
/* microseconds (the Python API uses seconds, though) */
|
||||
unsigned long interval;
|
||||
/* Last PyThreadState holding / having held the GIL. This helps us
|
||||
know whether anyone else was scheduled after we dropped the GIL. */
|
||||
PyThreadState* last_holder;
|
||||
/* Whether the GIL is already taken (-1 if uninitialized). This is
|
||||
atomic because it can be read without any lock taken in ceval.c. */
|
||||
int locked;
|
||||
/* Number of GIL switches since the beginning. */
|
||||
unsigned long switch_number;
|
||||
/* This condition variable allows one or several threads to wait
|
||||
until the GIL is released. In addition, the mutex also protects
|
||||
the above variables. */
|
||||
PyCOND_T cond;
|
||||
PyMUTEX_T mutex;
|
||||
#ifdef FORCE_SWITCHING
|
||||
/* This condition variable helps the GIL-releasing thread wait for
|
||||
a GIL-awaiting thread to be scheduled and take the GIL. */
|
||||
PyCOND_T switch_cond;
|
||||
PyMUTEX_T switch_mutex;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_GIL_H */
|
||||
105
Dependencies/Python/include/internal/pycore_global_objects.h
vendored
Normal file
105
Dependencies/Python/include/internal/pycore_global_objects.h
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
#ifndef Py_INTERNAL_GLOBAL_OBJECTS_H
|
||||
#define Py_INTERNAL_GLOBAL_OBJECTS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_context.h" // _PyContextTokenMissing
|
||||
#include "pycore_gc.h" // _PyGC_Head_UNUSED
|
||||
#include "pycore_global_strings.h" // struct _Py_global_strings
|
||||
#include "pycore_hamt.h" // PyHamtNode_Bitmap
|
||||
#include "pycore_hashtable.h" // _Py_hashtable_t
|
||||
#include "pycore_typeobject.h" // pytype_slotdef
|
||||
|
||||
|
||||
// These would be in pycore_long.h if it weren't for an include cycle.
|
||||
#define _PY_NSMALLPOSINTS 257
|
||||
#define _PY_NSMALLNEGINTS 5
|
||||
|
||||
|
||||
// Only immutable objects should be considered runtime-global.
|
||||
// All others must be per-interpreter.
|
||||
|
||||
#define _Py_GLOBAL_OBJECT(NAME) \
|
||||
_PyRuntime.static_objects.NAME
|
||||
#define _Py_SINGLETON(NAME) \
|
||||
_Py_GLOBAL_OBJECT(singletons.NAME)
|
||||
|
||||
struct _Py_cached_objects {
|
||||
// XXX We could statically allocate the hashtable.
|
||||
_Py_hashtable_t *interned_strings;
|
||||
};
|
||||
|
||||
struct _Py_static_objects {
|
||||
struct {
|
||||
/* Small integers are preallocated in this array so that they
|
||||
* can be shared.
|
||||
* The integers that are preallocated are those in the range
|
||||
* -_PY_NSMALLNEGINTS (inclusive) to _PY_NSMALLPOSINTS (exclusive).
|
||||
*/
|
||||
PyLongObject small_ints[_PY_NSMALLNEGINTS + _PY_NSMALLPOSINTS];
|
||||
|
||||
PyBytesObject bytes_empty;
|
||||
struct {
|
||||
PyBytesObject ob;
|
||||
char eos;
|
||||
} bytes_characters[256];
|
||||
|
||||
struct _Py_global_strings strings;
|
||||
|
||||
_PyGC_Head_UNUSED _tuple_empty_gc_not_used;
|
||||
PyTupleObject tuple_empty;
|
||||
|
||||
_PyGC_Head_UNUSED _hamt_bitmap_node_empty_gc_not_used;
|
||||
PyHamtNode_Bitmap hamt_bitmap_node_empty;
|
||||
_PyContextTokenMissing context_token_missing;
|
||||
} singletons;
|
||||
};
|
||||
|
||||
#define _Py_INTERP_CACHED_OBJECT(interp, NAME) \
|
||||
(interp)->cached_objects.NAME
|
||||
|
||||
struct _Py_interp_cached_objects {
|
||||
PyObject *interned_strings;
|
||||
|
||||
/* AST */
|
||||
PyObject *_unused_str_replace_inf; // kept in 3.13 for ABI compatibility
|
||||
|
||||
/* object.__reduce__ */
|
||||
PyObject *objreduce;
|
||||
PyObject *type_slots_pname;
|
||||
pytype_slotdef *type_slots_ptrs[MAX_EQUIV];
|
||||
|
||||
/* TypeVar and related types */
|
||||
PyTypeObject *generic_type;
|
||||
PyTypeObject *typevar_type;
|
||||
PyTypeObject *typevartuple_type;
|
||||
PyTypeObject *paramspec_type;
|
||||
PyTypeObject *paramspecargs_type;
|
||||
PyTypeObject *paramspeckwargs_type;
|
||||
};
|
||||
|
||||
#define _Py_INTERP_STATIC_OBJECT(interp, NAME) \
|
||||
(interp)->static_objects.NAME
|
||||
#define _Py_INTERP_SINGLETON(interp, NAME) \
|
||||
_Py_INTERP_STATIC_OBJECT(interp, singletons.NAME)
|
||||
|
||||
struct _Py_interp_static_objects {
|
||||
struct {
|
||||
int _not_used;
|
||||
// hamt_empty is here instead of global because of its weakreflist.
|
||||
_PyGC_Head_UNUSED _hamt_empty_gc_not_used;
|
||||
PyHamtObject hamt_empty;
|
||||
PyBaseExceptionObject last_resort_memory_error;
|
||||
} singletons;
|
||||
};
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_GLOBAL_OBJECTS_H */
|
||||
1554
Dependencies/Python/include/internal/pycore_global_objects_fini_generated.h
vendored
Normal file
1554
Dependencies/Python/include/internal/pycore_global_objects_fini_generated.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
814
Dependencies/Python/include/internal/pycore_global_strings.h
vendored
Normal file
814
Dependencies/Python/include/internal/pycore_global_strings.h
vendored
Normal file
@@ -0,0 +1,814 @@
|
||||
#ifndef Py_INTERNAL_GLOBAL_STRINGS_H
|
||||
#define Py_INTERNAL_GLOBAL_STRINGS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// The data structure & init here are inspired by Tools/build/deepfreeze.py.
|
||||
|
||||
// All field names generated by ASCII_STR() have a common prefix,
|
||||
// to help avoid collisions with keywords, macros, etc.
|
||||
|
||||
#define STRUCT_FOR_ASCII_STR(LITERAL) \
|
||||
struct { \
|
||||
PyASCIIObject _ascii; \
|
||||
uint8_t _data[sizeof(LITERAL)]; \
|
||||
}
|
||||
#define STRUCT_FOR_STR(NAME, LITERAL) \
|
||||
STRUCT_FOR_ASCII_STR(LITERAL) _py_ ## NAME;
|
||||
#define STRUCT_FOR_ID(NAME) \
|
||||
STRUCT_FOR_ASCII_STR(#NAME) _py_ ## NAME;
|
||||
|
||||
// XXX Order by frequency of use?
|
||||
|
||||
/* The following is auto-generated by Tools/build/generate_global_objects.py. */
|
||||
struct _Py_global_strings {
|
||||
struct {
|
||||
STRUCT_FOR_STR(anon_dictcomp, "<dictcomp>")
|
||||
STRUCT_FOR_STR(anon_genexpr, "<genexpr>")
|
||||
STRUCT_FOR_STR(anon_lambda, "<lambda>")
|
||||
STRUCT_FOR_STR(anon_listcomp, "<listcomp>")
|
||||
STRUCT_FOR_STR(anon_module, "<module>")
|
||||
STRUCT_FOR_STR(anon_null, "<NULL>")
|
||||
STRUCT_FOR_STR(anon_setcomp, "<setcomp>")
|
||||
STRUCT_FOR_STR(anon_string, "<string>")
|
||||
STRUCT_FOR_STR(anon_unknown, "<unknown>")
|
||||
STRUCT_FOR_STR(dbl_close_br, "}}")
|
||||
STRUCT_FOR_STR(dbl_open_br, "{{")
|
||||
STRUCT_FOR_STR(dbl_percent, "%%")
|
||||
STRUCT_FOR_STR(defaults, ".defaults")
|
||||
STRUCT_FOR_STR(dot_locals, ".<locals>")
|
||||
STRUCT_FOR_STR(empty, "")
|
||||
STRUCT_FOR_STR(generic_base, ".generic_base")
|
||||
STRUCT_FOR_STR(json_decoder, "json.decoder")
|
||||
STRUCT_FOR_STR(kwdefaults, ".kwdefaults")
|
||||
STRUCT_FOR_STR(list_err, "list index out of range")
|
||||
STRUCT_FOR_STR(str_replace_inf, "1e309")
|
||||
STRUCT_FOR_STR(type_params, ".type_params")
|
||||
STRUCT_FOR_STR(utf_8, "utf-8")
|
||||
} literals;
|
||||
|
||||
struct {
|
||||
STRUCT_FOR_ID(CANCELLED)
|
||||
STRUCT_FOR_ID(FINISHED)
|
||||
STRUCT_FOR_ID(False)
|
||||
STRUCT_FOR_ID(JSONDecodeError)
|
||||
STRUCT_FOR_ID(PENDING)
|
||||
STRUCT_FOR_ID(Py_Repr)
|
||||
STRUCT_FOR_ID(TextIOWrapper)
|
||||
STRUCT_FOR_ID(True)
|
||||
STRUCT_FOR_ID(WarningMessage)
|
||||
STRUCT_FOR_ID(_WindowsConsoleIO)
|
||||
STRUCT_FOR_ID(__IOBase_closed)
|
||||
STRUCT_FOR_ID(__abc_tpflags__)
|
||||
STRUCT_FOR_ID(__abs__)
|
||||
STRUCT_FOR_ID(__abstractmethods__)
|
||||
STRUCT_FOR_ID(__add__)
|
||||
STRUCT_FOR_ID(__aenter__)
|
||||
STRUCT_FOR_ID(__aexit__)
|
||||
STRUCT_FOR_ID(__aiter__)
|
||||
STRUCT_FOR_ID(__all__)
|
||||
STRUCT_FOR_ID(__and__)
|
||||
STRUCT_FOR_ID(__anext__)
|
||||
STRUCT_FOR_ID(__annotations__)
|
||||
STRUCT_FOR_ID(__args__)
|
||||
STRUCT_FOR_ID(__await__)
|
||||
STRUCT_FOR_ID(__bases__)
|
||||
STRUCT_FOR_ID(__bool__)
|
||||
STRUCT_FOR_ID(__buffer__)
|
||||
STRUCT_FOR_ID(__build_class__)
|
||||
STRUCT_FOR_ID(__builtins__)
|
||||
STRUCT_FOR_ID(__bytes__)
|
||||
STRUCT_FOR_ID(__call__)
|
||||
STRUCT_FOR_ID(__cantrace__)
|
||||
STRUCT_FOR_ID(__class__)
|
||||
STRUCT_FOR_ID(__class_getitem__)
|
||||
STRUCT_FOR_ID(__classcell__)
|
||||
STRUCT_FOR_ID(__classdict__)
|
||||
STRUCT_FOR_ID(__classdictcell__)
|
||||
STRUCT_FOR_ID(__complex__)
|
||||
STRUCT_FOR_ID(__contains__)
|
||||
STRUCT_FOR_ID(__copy__)
|
||||
STRUCT_FOR_ID(__ctypes_from_outparam__)
|
||||
STRUCT_FOR_ID(__del__)
|
||||
STRUCT_FOR_ID(__delattr__)
|
||||
STRUCT_FOR_ID(__delete__)
|
||||
STRUCT_FOR_ID(__delitem__)
|
||||
STRUCT_FOR_ID(__dict__)
|
||||
STRUCT_FOR_ID(__dictoffset__)
|
||||
STRUCT_FOR_ID(__dir__)
|
||||
STRUCT_FOR_ID(__divmod__)
|
||||
STRUCT_FOR_ID(__doc__)
|
||||
STRUCT_FOR_ID(__enter__)
|
||||
STRUCT_FOR_ID(__eq__)
|
||||
STRUCT_FOR_ID(__exit__)
|
||||
STRUCT_FOR_ID(__file__)
|
||||
STRUCT_FOR_ID(__firstlineno__)
|
||||
STRUCT_FOR_ID(__float__)
|
||||
STRUCT_FOR_ID(__floordiv__)
|
||||
STRUCT_FOR_ID(__format__)
|
||||
STRUCT_FOR_ID(__fspath__)
|
||||
STRUCT_FOR_ID(__ge__)
|
||||
STRUCT_FOR_ID(__get__)
|
||||
STRUCT_FOR_ID(__getattr__)
|
||||
STRUCT_FOR_ID(__getattribute__)
|
||||
STRUCT_FOR_ID(__getinitargs__)
|
||||
STRUCT_FOR_ID(__getitem__)
|
||||
STRUCT_FOR_ID(__getnewargs__)
|
||||
STRUCT_FOR_ID(__getnewargs_ex__)
|
||||
STRUCT_FOR_ID(__getstate__)
|
||||
STRUCT_FOR_ID(__gt__)
|
||||
STRUCT_FOR_ID(__hash__)
|
||||
STRUCT_FOR_ID(__iadd__)
|
||||
STRUCT_FOR_ID(__iand__)
|
||||
STRUCT_FOR_ID(__ifloordiv__)
|
||||
STRUCT_FOR_ID(__ilshift__)
|
||||
STRUCT_FOR_ID(__imatmul__)
|
||||
STRUCT_FOR_ID(__imod__)
|
||||
STRUCT_FOR_ID(__import__)
|
||||
STRUCT_FOR_ID(__imul__)
|
||||
STRUCT_FOR_ID(__index__)
|
||||
STRUCT_FOR_ID(__init__)
|
||||
STRUCT_FOR_ID(__init_subclass__)
|
||||
STRUCT_FOR_ID(__instancecheck__)
|
||||
STRUCT_FOR_ID(__int__)
|
||||
STRUCT_FOR_ID(__invert__)
|
||||
STRUCT_FOR_ID(__ior__)
|
||||
STRUCT_FOR_ID(__ipow__)
|
||||
STRUCT_FOR_ID(__irshift__)
|
||||
STRUCT_FOR_ID(__isabstractmethod__)
|
||||
STRUCT_FOR_ID(__isub__)
|
||||
STRUCT_FOR_ID(__iter__)
|
||||
STRUCT_FOR_ID(__itruediv__)
|
||||
STRUCT_FOR_ID(__ixor__)
|
||||
STRUCT_FOR_ID(__le__)
|
||||
STRUCT_FOR_ID(__len__)
|
||||
STRUCT_FOR_ID(__length_hint__)
|
||||
STRUCT_FOR_ID(__lltrace__)
|
||||
STRUCT_FOR_ID(__loader__)
|
||||
STRUCT_FOR_ID(__lshift__)
|
||||
STRUCT_FOR_ID(__lt__)
|
||||
STRUCT_FOR_ID(__main__)
|
||||
STRUCT_FOR_ID(__match_args__)
|
||||
STRUCT_FOR_ID(__matmul__)
|
||||
STRUCT_FOR_ID(__missing__)
|
||||
STRUCT_FOR_ID(__mod__)
|
||||
STRUCT_FOR_ID(__module__)
|
||||
STRUCT_FOR_ID(__mro_entries__)
|
||||
STRUCT_FOR_ID(__mul__)
|
||||
STRUCT_FOR_ID(__name__)
|
||||
STRUCT_FOR_ID(__ne__)
|
||||
STRUCT_FOR_ID(__neg__)
|
||||
STRUCT_FOR_ID(__new__)
|
||||
STRUCT_FOR_ID(__newobj__)
|
||||
STRUCT_FOR_ID(__newobj_ex__)
|
||||
STRUCT_FOR_ID(__next__)
|
||||
STRUCT_FOR_ID(__notes__)
|
||||
STRUCT_FOR_ID(__or__)
|
||||
STRUCT_FOR_ID(__orig_class__)
|
||||
STRUCT_FOR_ID(__origin__)
|
||||
STRUCT_FOR_ID(__package__)
|
||||
STRUCT_FOR_ID(__parameters__)
|
||||
STRUCT_FOR_ID(__path__)
|
||||
STRUCT_FOR_ID(__pos__)
|
||||
STRUCT_FOR_ID(__pow__)
|
||||
STRUCT_FOR_ID(__prepare__)
|
||||
STRUCT_FOR_ID(__qualname__)
|
||||
STRUCT_FOR_ID(__radd__)
|
||||
STRUCT_FOR_ID(__rand__)
|
||||
STRUCT_FOR_ID(__rdivmod__)
|
||||
STRUCT_FOR_ID(__reduce__)
|
||||
STRUCT_FOR_ID(__reduce_ex__)
|
||||
STRUCT_FOR_ID(__release_buffer__)
|
||||
STRUCT_FOR_ID(__repr__)
|
||||
STRUCT_FOR_ID(__reversed__)
|
||||
STRUCT_FOR_ID(__rfloordiv__)
|
||||
STRUCT_FOR_ID(__rlshift__)
|
||||
STRUCT_FOR_ID(__rmatmul__)
|
||||
STRUCT_FOR_ID(__rmod__)
|
||||
STRUCT_FOR_ID(__rmul__)
|
||||
STRUCT_FOR_ID(__ror__)
|
||||
STRUCT_FOR_ID(__round__)
|
||||
STRUCT_FOR_ID(__rpow__)
|
||||
STRUCT_FOR_ID(__rrshift__)
|
||||
STRUCT_FOR_ID(__rshift__)
|
||||
STRUCT_FOR_ID(__rsub__)
|
||||
STRUCT_FOR_ID(__rtruediv__)
|
||||
STRUCT_FOR_ID(__rxor__)
|
||||
STRUCT_FOR_ID(__set__)
|
||||
STRUCT_FOR_ID(__set_name__)
|
||||
STRUCT_FOR_ID(__setattr__)
|
||||
STRUCT_FOR_ID(__setitem__)
|
||||
STRUCT_FOR_ID(__setstate__)
|
||||
STRUCT_FOR_ID(__sizeof__)
|
||||
STRUCT_FOR_ID(__slotnames__)
|
||||
STRUCT_FOR_ID(__slots__)
|
||||
STRUCT_FOR_ID(__spec__)
|
||||
STRUCT_FOR_ID(__static_attributes__)
|
||||
STRUCT_FOR_ID(__str__)
|
||||
STRUCT_FOR_ID(__sub__)
|
||||
STRUCT_FOR_ID(__subclasscheck__)
|
||||
STRUCT_FOR_ID(__subclasshook__)
|
||||
STRUCT_FOR_ID(__truediv__)
|
||||
STRUCT_FOR_ID(__trunc__)
|
||||
STRUCT_FOR_ID(__type_params__)
|
||||
STRUCT_FOR_ID(__typing_is_unpacked_typevartuple__)
|
||||
STRUCT_FOR_ID(__typing_prepare_subst__)
|
||||
STRUCT_FOR_ID(__typing_subst__)
|
||||
STRUCT_FOR_ID(__typing_unpacked_tuple_args__)
|
||||
STRUCT_FOR_ID(__warningregistry__)
|
||||
STRUCT_FOR_ID(__weaklistoffset__)
|
||||
STRUCT_FOR_ID(__weakref__)
|
||||
STRUCT_FOR_ID(__xor__)
|
||||
STRUCT_FOR_ID(_abc_impl)
|
||||
STRUCT_FOR_ID(_abstract_)
|
||||
STRUCT_FOR_ID(_active)
|
||||
STRUCT_FOR_ID(_align_)
|
||||
STRUCT_FOR_ID(_annotation)
|
||||
STRUCT_FOR_ID(_anonymous_)
|
||||
STRUCT_FOR_ID(_argtypes_)
|
||||
STRUCT_FOR_ID(_as_parameter_)
|
||||
STRUCT_FOR_ID(_asyncio_future_blocking)
|
||||
STRUCT_FOR_ID(_blksize)
|
||||
STRUCT_FOR_ID(_bootstrap)
|
||||
STRUCT_FOR_ID(_check_retval_)
|
||||
STRUCT_FOR_ID(_dealloc_warn)
|
||||
STRUCT_FOR_ID(_feature_version)
|
||||
STRUCT_FOR_ID(_field_types)
|
||||
STRUCT_FOR_ID(_fields_)
|
||||
STRUCT_FOR_ID(_finalizing)
|
||||
STRUCT_FOR_ID(_find_and_load)
|
||||
STRUCT_FOR_ID(_fix_up_module)
|
||||
STRUCT_FOR_ID(_flags_)
|
||||
STRUCT_FOR_ID(_get_sourcefile)
|
||||
STRUCT_FOR_ID(_handle_fromlist)
|
||||
STRUCT_FOR_ID(_initializing)
|
||||
STRUCT_FOR_ID(_io)
|
||||
STRUCT_FOR_ID(_is_text_encoding)
|
||||
STRUCT_FOR_ID(_length_)
|
||||
STRUCT_FOR_ID(_limbo)
|
||||
STRUCT_FOR_ID(_lock_unlock_module)
|
||||
STRUCT_FOR_ID(_loop)
|
||||
STRUCT_FOR_ID(_needs_com_addref_)
|
||||
STRUCT_FOR_ID(_only_immortal)
|
||||
STRUCT_FOR_ID(_pack_)
|
||||
STRUCT_FOR_ID(_restype_)
|
||||
STRUCT_FOR_ID(_showwarnmsg)
|
||||
STRUCT_FOR_ID(_shutdown)
|
||||
STRUCT_FOR_ID(_slotnames)
|
||||
STRUCT_FOR_ID(_strptime)
|
||||
STRUCT_FOR_ID(_strptime_datetime)
|
||||
STRUCT_FOR_ID(_swappedbytes_)
|
||||
STRUCT_FOR_ID(_type_)
|
||||
STRUCT_FOR_ID(_uninitialized_submodules)
|
||||
STRUCT_FOR_ID(_warn_unawaited_coroutine)
|
||||
STRUCT_FOR_ID(_xoptions)
|
||||
STRUCT_FOR_ID(abs_tol)
|
||||
STRUCT_FOR_ID(access)
|
||||
STRUCT_FOR_ID(aclose)
|
||||
STRUCT_FOR_ID(add)
|
||||
STRUCT_FOR_ID(add_done_callback)
|
||||
STRUCT_FOR_ID(after_in_child)
|
||||
STRUCT_FOR_ID(after_in_parent)
|
||||
STRUCT_FOR_ID(aggregate_class)
|
||||
STRUCT_FOR_ID(alias)
|
||||
STRUCT_FOR_ID(allow_code)
|
||||
STRUCT_FOR_ID(append)
|
||||
STRUCT_FOR_ID(arg)
|
||||
STRUCT_FOR_ID(argdefs)
|
||||
STRUCT_FOR_ID(args)
|
||||
STRUCT_FOR_ID(arguments)
|
||||
STRUCT_FOR_ID(argv)
|
||||
STRUCT_FOR_ID(as_integer_ratio)
|
||||
STRUCT_FOR_ID(asend)
|
||||
STRUCT_FOR_ID(ast)
|
||||
STRUCT_FOR_ID(athrow)
|
||||
STRUCT_FOR_ID(attribute)
|
||||
STRUCT_FOR_ID(authorizer_callback)
|
||||
STRUCT_FOR_ID(autocommit)
|
||||
STRUCT_FOR_ID(backtick)
|
||||
STRUCT_FOR_ID(base)
|
||||
STRUCT_FOR_ID(before)
|
||||
STRUCT_FOR_ID(big)
|
||||
STRUCT_FOR_ID(binary_form)
|
||||
STRUCT_FOR_ID(block)
|
||||
STRUCT_FOR_ID(bound)
|
||||
STRUCT_FOR_ID(buffer)
|
||||
STRUCT_FOR_ID(buffer_callback)
|
||||
STRUCT_FOR_ID(buffer_size)
|
||||
STRUCT_FOR_ID(buffering)
|
||||
STRUCT_FOR_ID(buffers)
|
||||
STRUCT_FOR_ID(bufsize)
|
||||
STRUCT_FOR_ID(builtins)
|
||||
STRUCT_FOR_ID(byteorder)
|
||||
STRUCT_FOR_ID(bytes)
|
||||
STRUCT_FOR_ID(bytes_per_sep)
|
||||
STRUCT_FOR_ID(c_call)
|
||||
STRUCT_FOR_ID(c_exception)
|
||||
STRUCT_FOR_ID(c_return)
|
||||
STRUCT_FOR_ID(cached_datetime_module)
|
||||
STRUCT_FOR_ID(cached_statements)
|
||||
STRUCT_FOR_ID(cadata)
|
||||
STRUCT_FOR_ID(cafile)
|
||||
STRUCT_FOR_ID(call)
|
||||
STRUCT_FOR_ID(call_exception_handler)
|
||||
STRUCT_FOR_ID(call_soon)
|
||||
STRUCT_FOR_ID(callback)
|
||||
STRUCT_FOR_ID(cancel)
|
||||
STRUCT_FOR_ID(capath)
|
||||
STRUCT_FOR_ID(category)
|
||||
STRUCT_FOR_ID(cb_type)
|
||||
STRUCT_FOR_ID(certfile)
|
||||
STRUCT_FOR_ID(check_same_thread)
|
||||
STRUCT_FOR_ID(clear)
|
||||
STRUCT_FOR_ID(close)
|
||||
STRUCT_FOR_ID(closed)
|
||||
STRUCT_FOR_ID(closefd)
|
||||
STRUCT_FOR_ID(closure)
|
||||
STRUCT_FOR_ID(co_argcount)
|
||||
STRUCT_FOR_ID(co_cellvars)
|
||||
STRUCT_FOR_ID(co_code)
|
||||
STRUCT_FOR_ID(co_consts)
|
||||
STRUCT_FOR_ID(co_exceptiontable)
|
||||
STRUCT_FOR_ID(co_filename)
|
||||
STRUCT_FOR_ID(co_firstlineno)
|
||||
STRUCT_FOR_ID(co_flags)
|
||||
STRUCT_FOR_ID(co_freevars)
|
||||
STRUCT_FOR_ID(co_kwonlyargcount)
|
||||
STRUCT_FOR_ID(co_linetable)
|
||||
STRUCT_FOR_ID(co_name)
|
||||
STRUCT_FOR_ID(co_names)
|
||||
STRUCT_FOR_ID(co_nlocals)
|
||||
STRUCT_FOR_ID(co_posonlyargcount)
|
||||
STRUCT_FOR_ID(co_qualname)
|
||||
STRUCT_FOR_ID(co_stacksize)
|
||||
STRUCT_FOR_ID(co_varnames)
|
||||
STRUCT_FOR_ID(code)
|
||||
STRUCT_FOR_ID(col_offset)
|
||||
STRUCT_FOR_ID(command)
|
||||
STRUCT_FOR_ID(comment_factory)
|
||||
STRUCT_FOR_ID(compile_mode)
|
||||
STRUCT_FOR_ID(consts)
|
||||
STRUCT_FOR_ID(context)
|
||||
STRUCT_FOR_ID(contravariant)
|
||||
STRUCT_FOR_ID(cookie)
|
||||
STRUCT_FOR_ID(copy)
|
||||
STRUCT_FOR_ID(copyreg)
|
||||
STRUCT_FOR_ID(coro)
|
||||
STRUCT_FOR_ID(count)
|
||||
STRUCT_FOR_ID(covariant)
|
||||
STRUCT_FOR_ID(cwd)
|
||||
STRUCT_FOR_ID(data)
|
||||
STRUCT_FOR_ID(database)
|
||||
STRUCT_FOR_ID(day)
|
||||
STRUCT_FOR_ID(decode)
|
||||
STRUCT_FOR_ID(decoder)
|
||||
STRUCT_FOR_ID(default)
|
||||
STRUCT_FOR_ID(defaultaction)
|
||||
STRUCT_FOR_ID(delete)
|
||||
STRUCT_FOR_ID(depth)
|
||||
STRUCT_FOR_ID(desired_access)
|
||||
STRUCT_FOR_ID(detect_types)
|
||||
STRUCT_FOR_ID(deterministic)
|
||||
STRUCT_FOR_ID(device)
|
||||
STRUCT_FOR_ID(dict)
|
||||
STRUCT_FOR_ID(dictcomp)
|
||||
STRUCT_FOR_ID(difference_update)
|
||||
STRUCT_FOR_ID(digest)
|
||||
STRUCT_FOR_ID(digest_size)
|
||||
STRUCT_FOR_ID(digestmod)
|
||||
STRUCT_FOR_ID(dir_fd)
|
||||
STRUCT_FOR_ID(discard)
|
||||
STRUCT_FOR_ID(dispatch_table)
|
||||
STRUCT_FOR_ID(displayhook)
|
||||
STRUCT_FOR_ID(dklen)
|
||||
STRUCT_FOR_ID(doc)
|
||||
STRUCT_FOR_ID(dont_inherit)
|
||||
STRUCT_FOR_ID(dst)
|
||||
STRUCT_FOR_ID(dst_dir_fd)
|
||||
STRUCT_FOR_ID(eager_start)
|
||||
STRUCT_FOR_ID(effective_ids)
|
||||
STRUCT_FOR_ID(element_factory)
|
||||
STRUCT_FOR_ID(encode)
|
||||
STRUCT_FOR_ID(encoding)
|
||||
STRUCT_FOR_ID(end)
|
||||
STRUCT_FOR_ID(end_col_offset)
|
||||
STRUCT_FOR_ID(end_lineno)
|
||||
STRUCT_FOR_ID(end_offset)
|
||||
STRUCT_FOR_ID(endpos)
|
||||
STRUCT_FOR_ID(entrypoint)
|
||||
STRUCT_FOR_ID(env)
|
||||
STRUCT_FOR_ID(errors)
|
||||
STRUCT_FOR_ID(event)
|
||||
STRUCT_FOR_ID(eventmask)
|
||||
STRUCT_FOR_ID(exc_type)
|
||||
STRUCT_FOR_ID(exc_value)
|
||||
STRUCT_FOR_ID(excepthook)
|
||||
STRUCT_FOR_ID(exception)
|
||||
STRUCT_FOR_ID(existing_file_name)
|
||||
STRUCT_FOR_ID(exp)
|
||||
STRUCT_FOR_ID(extend)
|
||||
STRUCT_FOR_ID(extra_tokens)
|
||||
STRUCT_FOR_ID(facility)
|
||||
STRUCT_FOR_ID(factory)
|
||||
STRUCT_FOR_ID(false)
|
||||
STRUCT_FOR_ID(family)
|
||||
STRUCT_FOR_ID(fanout)
|
||||
STRUCT_FOR_ID(fd)
|
||||
STRUCT_FOR_ID(fd2)
|
||||
STRUCT_FOR_ID(fdel)
|
||||
STRUCT_FOR_ID(fget)
|
||||
STRUCT_FOR_ID(file)
|
||||
STRUCT_FOR_ID(file_actions)
|
||||
STRUCT_FOR_ID(filename)
|
||||
STRUCT_FOR_ID(fileno)
|
||||
STRUCT_FOR_ID(filepath)
|
||||
STRUCT_FOR_ID(fillvalue)
|
||||
STRUCT_FOR_ID(filter)
|
||||
STRUCT_FOR_ID(filters)
|
||||
STRUCT_FOR_ID(final)
|
||||
STRUCT_FOR_ID(find_class)
|
||||
STRUCT_FOR_ID(fix_imports)
|
||||
STRUCT_FOR_ID(flags)
|
||||
STRUCT_FOR_ID(flush)
|
||||
STRUCT_FOR_ID(fold)
|
||||
STRUCT_FOR_ID(follow_symlinks)
|
||||
STRUCT_FOR_ID(format)
|
||||
STRUCT_FOR_ID(from_param)
|
||||
STRUCT_FOR_ID(fromlist)
|
||||
STRUCT_FOR_ID(fromtimestamp)
|
||||
STRUCT_FOR_ID(fromutc)
|
||||
STRUCT_FOR_ID(fset)
|
||||
STRUCT_FOR_ID(func)
|
||||
STRUCT_FOR_ID(future)
|
||||
STRUCT_FOR_ID(generation)
|
||||
STRUCT_FOR_ID(genexpr)
|
||||
STRUCT_FOR_ID(get)
|
||||
STRUCT_FOR_ID(get_debug)
|
||||
STRUCT_FOR_ID(get_event_loop)
|
||||
STRUCT_FOR_ID(get_loop)
|
||||
STRUCT_FOR_ID(get_source)
|
||||
STRUCT_FOR_ID(getattr)
|
||||
STRUCT_FOR_ID(getstate)
|
||||
STRUCT_FOR_ID(gid)
|
||||
STRUCT_FOR_ID(globals)
|
||||
STRUCT_FOR_ID(groupindex)
|
||||
STRUCT_FOR_ID(groups)
|
||||
STRUCT_FOR_ID(handle)
|
||||
STRUCT_FOR_ID(handle_seq)
|
||||
STRUCT_FOR_ID(has_location)
|
||||
STRUCT_FOR_ID(hash_name)
|
||||
STRUCT_FOR_ID(header)
|
||||
STRUCT_FOR_ID(headers)
|
||||
STRUCT_FOR_ID(hi)
|
||||
STRUCT_FOR_ID(hook)
|
||||
STRUCT_FOR_ID(hour)
|
||||
STRUCT_FOR_ID(ident)
|
||||
STRUCT_FOR_ID(identity_hint)
|
||||
STRUCT_FOR_ID(ignore)
|
||||
STRUCT_FOR_ID(imag)
|
||||
STRUCT_FOR_ID(importlib)
|
||||
STRUCT_FOR_ID(in_fd)
|
||||
STRUCT_FOR_ID(incoming)
|
||||
STRUCT_FOR_ID(indexgroup)
|
||||
STRUCT_FOR_ID(inf)
|
||||
STRUCT_FOR_ID(infer_variance)
|
||||
STRUCT_FOR_ID(inherit_handle)
|
||||
STRUCT_FOR_ID(inheritable)
|
||||
STRUCT_FOR_ID(initial)
|
||||
STRUCT_FOR_ID(initial_bytes)
|
||||
STRUCT_FOR_ID(initial_owner)
|
||||
STRUCT_FOR_ID(initial_state)
|
||||
STRUCT_FOR_ID(initial_value)
|
||||
STRUCT_FOR_ID(initval)
|
||||
STRUCT_FOR_ID(inner_size)
|
||||
STRUCT_FOR_ID(input)
|
||||
STRUCT_FOR_ID(insert_comments)
|
||||
STRUCT_FOR_ID(insert_pis)
|
||||
STRUCT_FOR_ID(instructions)
|
||||
STRUCT_FOR_ID(intern)
|
||||
STRUCT_FOR_ID(intersection)
|
||||
STRUCT_FOR_ID(interval)
|
||||
STRUCT_FOR_ID(is_running)
|
||||
STRUCT_FOR_ID(isatty)
|
||||
STRUCT_FOR_ID(isinstance)
|
||||
STRUCT_FOR_ID(isoformat)
|
||||
STRUCT_FOR_ID(isolation_level)
|
||||
STRUCT_FOR_ID(istext)
|
||||
STRUCT_FOR_ID(item)
|
||||
STRUCT_FOR_ID(items)
|
||||
STRUCT_FOR_ID(iter)
|
||||
STRUCT_FOR_ID(iterable)
|
||||
STRUCT_FOR_ID(iterations)
|
||||
STRUCT_FOR_ID(join)
|
||||
STRUCT_FOR_ID(jump)
|
||||
STRUCT_FOR_ID(keepends)
|
||||
STRUCT_FOR_ID(key)
|
||||
STRUCT_FOR_ID(keyfile)
|
||||
STRUCT_FOR_ID(keys)
|
||||
STRUCT_FOR_ID(kind)
|
||||
STRUCT_FOR_ID(kw)
|
||||
STRUCT_FOR_ID(kw1)
|
||||
STRUCT_FOR_ID(kw2)
|
||||
STRUCT_FOR_ID(kwdefaults)
|
||||
STRUCT_FOR_ID(label)
|
||||
STRUCT_FOR_ID(lambda)
|
||||
STRUCT_FOR_ID(last)
|
||||
STRUCT_FOR_ID(last_exc)
|
||||
STRUCT_FOR_ID(last_node)
|
||||
STRUCT_FOR_ID(last_traceback)
|
||||
STRUCT_FOR_ID(last_type)
|
||||
STRUCT_FOR_ID(last_value)
|
||||
STRUCT_FOR_ID(latin1)
|
||||
STRUCT_FOR_ID(leaf_size)
|
||||
STRUCT_FOR_ID(len)
|
||||
STRUCT_FOR_ID(length)
|
||||
STRUCT_FOR_ID(level)
|
||||
STRUCT_FOR_ID(limit)
|
||||
STRUCT_FOR_ID(line)
|
||||
STRUCT_FOR_ID(line_buffering)
|
||||
STRUCT_FOR_ID(lineno)
|
||||
STRUCT_FOR_ID(listcomp)
|
||||
STRUCT_FOR_ID(little)
|
||||
STRUCT_FOR_ID(lo)
|
||||
STRUCT_FOR_ID(locale)
|
||||
STRUCT_FOR_ID(locals)
|
||||
STRUCT_FOR_ID(logoption)
|
||||
STRUCT_FOR_ID(loop)
|
||||
STRUCT_FOR_ID(manual_reset)
|
||||
STRUCT_FOR_ID(mapping)
|
||||
STRUCT_FOR_ID(match)
|
||||
STRUCT_FOR_ID(max_length)
|
||||
STRUCT_FOR_ID(maxdigits)
|
||||
STRUCT_FOR_ID(maxevents)
|
||||
STRUCT_FOR_ID(maxlen)
|
||||
STRUCT_FOR_ID(maxmem)
|
||||
STRUCT_FOR_ID(maxsplit)
|
||||
STRUCT_FOR_ID(maxvalue)
|
||||
STRUCT_FOR_ID(memLevel)
|
||||
STRUCT_FOR_ID(memlimit)
|
||||
STRUCT_FOR_ID(message)
|
||||
STRUCT_FOR_ID(metaclass)
|
||||
STRUCT_FOR_ID(metadata)
|
||||
STRUCT_FOR_ID(method)
|
||||
STRUCT_FOR_ID(microsecond)
|
||||
STRUCT_FOR_ID(milliseconds)
|
||||
STRUCT_FOR_ID(minute)
|
||||
STRUCT_FOR_ID(mod)
|
||||
STRUCT_FOR_ID(mode)
|
||||
STRUCT_FOR_ID(module)
|
||||
STRUCT_FOR_ID(module_globals)
|
||||
STRUCT_FOR_ID(modules)
|
||||
STRUCT_FOR_ID(month)
|
||||
STRUCT_FOR_ID(mro)
|
||||
STRUCT_FOR_ID(msg)
|
||||
STRUCT_FOR_ID(mutex)
|
||||
STRUCT_FOR_ID(mycmp)
|
||||
STRUCT_FOR_ID(n_arg)
|
||||
STRUCT_FOR_ID(n_fields)
|
||||
STRUCT_FOR_ID(n_sequence_fields)
|
||||
STRUCT_FOR_ID(n_unnamed_fields)
|
||||
STRUCT_FOR_ID(name)
|
||||
STRUCT_FOR_ID(name_from)
|
||||
STRUCT_FOR_ID(namespace_separator)
|
||||
STRUCT_FOR_ID(namespaces)
|
||||
STRUCT_FOR_ID(narg)
|
||||
STRUCT_FOR_ID(ndigits)
|
||||
STRUCT_FOR_ID(nested)
|
||||
STRUCT_FOR_ID(new_file_name)
|
||||
STRUCT_FOR_ID(new_limit)
|
||||
STRUCT_FOR_ID(newline)
|
||||
STRUCT_FOR_ID(newlines)
|
||||
STRUCT_FOR_ID(next)
|
||||
STRUCT_FOR_ID(nlocals)
|
||||
STRUCT_FOR_ID(node_depth)
|
||||
STRUCT_FOR_ID(node_offset)
|
||||
STRUCT_FOR_ID(ns)
|
||||
STRUCT_FOR_ID(nstype)
|
||||
STRUCT_FOR_ID(nt)
|
||||
STRUCT_FOR_ID(null)
|
||||
STRUCT_FOR_ID(number)
|
||||
STRUCT_FOR_ID(obj)
|
||||
STRUCT_FOR_ID(object)
|
||||
STRUCT_FOR_ID(offset)
|
||||
STRUCT_FOR_ID(offset_dst)
|
||||
STRUCT_FOR_ID(offset_src)
|
||||
STRUCT_FOR_ID(on_type_read)
|
||||
STRUCT_FOR_ID(onceregistry)
|
||||
STRUCT_FOR_ID(only_keys)
|
||||
STRUCT_FOR_ID(oparg)
|
||||
STRUCT_FOR_ID(opcode)
|
||||
STRUCT_FOR_ID(open)
|
||||
STRUCT_FOR_ID(opener)
|
||||
STRUCT_FOR_ID(operation)
|
||||
STRUCT_FOR_ID(optimize)
|
||||
STRUCT_FOR_ID(options)
|
||||
STRUCT_FOR_ID(order)
|
||||
STRUCT_FOR_ID(origin)
|
||||
STRUCT_FOR_ID(out_fd)
|
||||
STRUCT_FOR_ID(outgoing)
|
||||
STRUCT_FOR_ID(overlapped)
|
||||
STRUCT_FOR_ID(owner)
|
||||
STRUCT_FOR_ID(pages)
|
||||
STRUCT_FOR_ID(parent)
|
||||
STRUCT_FOR_ID(password)
|
||||
STRUCT_FOR_ID(path)
|
||||
STRUCT_FOR_ID(pattern)
|
||||
STRUCT_FOR_ID(peek)
|
||||
STRUCT_FOR_ID(persistent_id)
|
||||
STRUCT_FOR_ID(persistent_load)
|
||||
STRUCT_FOR_ID(person)
|
||||
STRUCT_FOR_ID(pi_factory)
|
||||
STRUCT_FOR_ID(pid)
|
||||
STRUCT_FOR_ID(policy)
|
||||
STRUCT_FOR_ID(pos)
|
||||
STRUCT_FOR_ID(pos1)
|
||||
STRUCT_FOR_ID(pos2)
|
||||
STRUCT_FOR_ID(posix)
|
||||
STRUCT_FOR_ID(print_file_and_line)
|
||||
STRUCT_FOR_ID(priority)
|
||||
STRUCT_FOR_ID(progress)
|
||||
STRUCT_FOR_ID(progress_handler)
|
||||
STRUCT_FOR_ID(progress_routine)
|
||||
STRUCT_FOR_ID(proto)
|
||||
STRUCT_FOR_ID(protocol)
|
||||
STRUCT_FOR_ID(ps1)
|
||||
STRUCT_FOR_ID(ps2)
|
||||
STRUCT_FOR_ID(query)
|
||||
STRUCT_FOR_ID(quotetabs)
|
||||
STRUCT_FOR_ID(raw)
|
||||
STRUCT_FOR_ID(read)
|
||||
STRUCT_FOR_ID(read1)
|
||||
STRUCT_FOR_ID(readable)
|
||||
STRUCT_FOR_ID(readall)
|
||||
STRUCT_FOR_ID(readinto)
|
||||
STRUCT_FOR_ID(readinto1)
|
||||
STRUCT_FOR_ID(readline)
|
||||
STRUCT_FOR_ID(readonly)
|
||||
STRUCT_FOR_ID(real)
|
||||
STRUCT_FOR_ID(reducer_override)
|
||||
STRUCT_FOR_ID(registry)
|
||||
STRUCT_FOR_ID(rel_tol)
|
||||
STRUCT_FOR_ID(release)
|
||||
STRUCT_FOR_ID(reload)
|
||||
STRUCT_FOR_ID(repl)
|
||||
STRUCT_FOR_ID(replace)
|
||||
STRUCT_FOR_ID(reserved)
|
||||
STRUCT_FOR_ID(reset)
|
||||
STRUCT_FOR_ID(resetids)
|
||||
STRUCT_FOR_ID(return)
|
||||
STRUCT_FOR_ID(reverse)
|
||||
STRUCT_FOR_ID(reversed)
|
||||
STRUCT_FOR_ID(salt)
|
||||
STRUCT_FOR_ID(sched_priority)
|
||||
STRUCT_FOR_ID(scheduler)
|
||||
STRUCT_FOR_ID(second)
|
||||
STRUCT_FOR_ID(security_attributes)
|
||||
STRUCT_FOR_ID(seek)
|
||||
STRUCT_FOR_ID(seekable)
|
||||
STRUCT_FOR_ID(selectors)
|
||||
STRUCT_FOR_ID(self)
|
||||
STRUCT_FOR_ID(send)
|
||||
STRUCT_FOR_ID(sep)
|
||||
STRUCT_FOR_ID(sequence)
|
||||
STRUCT_FOR_ID(server_hostname)
|
||||
STRUCT_FOR_ID(server_side)
|
||||
STRUCT_FOR_ID(session)
|
||||
STRUCT_FOR_ID(setcomp)
|
||||
STRUCT_FOR_ID(setpgroup)
|
||||
STRUCT_FOR_ID(setsid)
|
||||
STRUCT_FOR_ID(setsigdef)
|
||||
STRUCT_FOR_ID(setsigmask)
|
||||
STRUCT_FOR_ID(setstate)
|
||||
STRUCT_FOR_ID(shape)
|
||||
STRUCT_FOR_ID(show_cmd)
|
||||
STRUCT_FOR_ID(signed)
|
||||
STRUCT_FOR_ID(size)
|
||||
STRUCT_FOR_ID(sizehint)
|
||||
STRUCT_FOR_ID(skip_file_prefixes)
|
||||
STRUCT_FOR_ID(sleep)
|
||||
STRUCT_FOR_ID(sock)
|
||||
STRUCT_FOR_ID(sort)
|
||||
STRUCT_FOR_ID(source)
|
||||
STRUCT_FOR_ID(source_traceback)
|
||||
STRUCT_FOR_ID(spam)
|
||||
STRUCT_FOR_ID(src)
|
||||
STRUCT_FOR_ID(src_dir_fd)
|
||||
STRUCT_FOR_ID(stacklevel)
|
||||
STRUCT_FOR_ID(start)
|
||||
STRUCT_FOR_ID(statement)
|
||||
STRUCT_FOR_ID(status)
|
||||
STRUCT_FOR_ID(stderr)
|
||||
STRUCT_FOR_ID(stdin)
|
||||
STRUCT_FOR_ID(stdout)
|
||||
STRUCT_FOR_ID(step)
|
||||
STRUCT_FOR_ID(steps)
|
||||
STRUCT_FOR_ID(store_name)
|
||||
STRUCT_FOR_ID(strategy)
|
||||
STRUCT_FOR_ID(strftime)
|
||||
STRUCT_FOR_ID(strict)
|
||||
STRUCT_FOR_ID(strict_mode)
|
||||
STRUCT_FOR_ID(string)
|
||||
STRUCT_FOR_ID(sub_key)
|
||||
STRUCT_FOR_ID(symmetric_difference_update)
|
||||
STRUCT_FOR_ID(tabsize)
|
||||
STRUCT_FOR_ID(tag)
|
||||
STRUCT_FOR_ID(target)
|
||||
STRUCT_FOR_ID(target_is_directory)
|
||||
STRUCT_FOR_ID(task)
|
||||
STRUCT_FOR_ID(tb_frame)
|
||||
STRUCT_FOR_ID(tb_lasti)
|
||||
STRUCT_FOR_ID(tb_lineno)
|
||||
STRUCT_FOR_ID(tb_next)
|
||||
STRUCT_FOR_ID(tell)
|
||||
STRUCT_FOR_ID(template)
|
||||
STRUCT_FOR_ID(term)
|
||||
STRUCT_FOR_ID(text)
|
||||
STRUCT_FOR_ID(threading)
|
||||
STRUCT_FOR_ID(throw)
|
||||
STRUCT_FOR_ID(timeout)
|
||||
STRUCT_FOR_ID(times)
|
||||
STRUCT_FOR_ID(timetuple)
|
||||
STRUCT_FOR_ID(top)
|
||||
STRUCT_FOR_ID(trace_callback)
|
||||
STRUCT_FOR_ID(traceback)
|
||||
STRUCT_FOR_ID(trailers)
|
||||
STRUCT_FOR_ID(translate)
|
||||
STRUCT_FOR_ID(true)
|
||||
STRUCT_FOR_ID(truncate)
|
||||
STRUCT_FOR_ID(twice)
|
||||
STRUCT_FOR_ID(txt)
|
||||
STRUCT_FOR_ID(type)
|
||||
STRUCT_FOR_ID(type_params)
|
||||
STRUCT_FOR_ID(tz)
|
||||
STRUCT_FOR_ID(tzinfo)
|
||||
STRUCT_FOR_ID(tzname)
|
||||
STRUCT_FOR_ID(uid)
|
||||
STRUCT_FOR_ID(unlink)
|
||||
STRUCT_FOR_ID(unraisablehook)
|
||||
STRUCT_FOR_ID(uri)
|
||||
STRUCT_FOR_ID(usedforsecurity)
|
||||
STRUCT_FOR_ID(value)
|
||||
STRUCT_FOR_ID(values)
|
||||
STRUCT_FOR_ID(version)
|
||||
STRUCT_FOR_ID(volume)
|
||||
STRUCT_FOR_ID(wait_all)
|
||||
STRUCT_FOR_ID(warn_on_full_buffer)
|
||||
STRUCT_FOR_ID(warnings)
|
||||
STRUCT_FOR_ID(warnoptions)
|
||||
STRUCT_FOR_ID(wbits)
|
||||
STRUCT_FOR_ID(week)
|
||||
STRUCT_FOR_ID(weekday)
|
||||
STRUCT_FOR_ID(which)
|
||||
STRUCT_FOR_ID(who)
|
||||
STRUCT_FOR_ID(withdata)
|
||||
STRUCT_FOR_ID(writable)
|
||||
STRUCT_FOR_ID(write)
|
||||
STRUCT_FOR_ID(write_through)
|
||||
STRUCT_FOR_ID(year)
|
||||
STRUCT_FOR_ID(zdict)
|
||||
} identifiers;
|
||||
struct {
|
||||
PyASCIIObject _ascii;
|
||||
uint8_t _data[2];
|
||||
} ascii[128];
|
||||
struct {
|
||||
PyCompactUnicodeObject _latin1;
|
||||
uint8_t _data[2];
|
||||
} latin1[128];
|
||||
};
|
||||
/* End auto-generated code */
|
||||
|
||||
#undef ID
|
||||
#undef STR
|
||||
|
||||
|
||||
#define _Py_ID(NAME) \
|
||||
(_Py_SINGLETON(strings.identifiers._py_ ## NAME._ascii.ob_base))
|
||||
#define _Py_STR(NAME) \
|
||||
(_Py_SINGLETON(strings.literals._py_ ## NAME._ascii.ob_base))
|
||||
#define _Py_LATIN1_CHR(CH) \
|
||||
((CH) < 128 \
|
||||
? (PyObject*)&_Py_SINGLETON(strings).ascii[(CH)] \
|
||||
: (PyObject*)&_Py_SINGLETON(strings).latin1[(CH) - 128])
|
||||
|
||||
/* _Py_DECLARE_STR() should precede all uses of _Py_STR() in a function.
|
||||
|
||||
This is true even if the same string has already been declared
|
||||
elsewhere, even in the same file. Mismatched duplicates are detected
|
||||
by Tools/scripts/generate-global-objects.py.
|
||||
|
||||
Pairing _Py_DECLARE_STR() with every use of _Py_STR() makes sure the
|
||||
string keeps working even if the declaration is removed somewhere
|
||||
else. It also makes it clear what the actual string is at every
|
||||
place it is being used. */
|
||||
#define _Py_DECLARE_STR(name, str)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_GLOBAL_STRINGS_H */
|
||||
134
Dependencies/Python/include/internal/pycore_hamt.h
vendored
Normal file
134
Dependencies/Python/include/internal/pycore_hamt.h
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
#ifndef Py_INTERNAL_HAMT_H
|
||||
#define Py_INTERNAL_HAMT_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
HAMT tree is shaped by hashes of keys. Every group of 5 bits of a hash denotes
|
||||
the exact position of the key in one level of the tree. Since we're using
|
||||
32 bit hashes, we can have at most 7 such levels. Although if there are
|
||||
two distinct keys with equal hashes, they will have to occupy the same
|
||||
cell in the 7th level of the tree -- so we'd put them in a "collision" node.
|
||||
Which brings the total possible tree depth to 8. Read more about the actual
|
||||
layout of the HAMT tree in `hamt.c`.
|
||||
|
||||
This constant is used to define a datastucture for storing iteration state.
|
||||
*/
|
||||
#define _Py_HAMT_MAX_TREE_DEPTH 8
|
||||
|
||||
|
||||
extern PyTypeObject _PyHamt_Type;
|
||||
extern PyTypeObject _PyHamt_ArrayNode_Type;
|
||||
extern PyTypeObject _PyHamt_BitmapNode_Type;
|
||||
extern PyTypeObject _PyHamt_CollisionNode_Type;
|
||||
extern PyTypeObject _PyHamtKeys_Type;
|
||||
extern PyTypeObject _PyHamtValues_Type;
|
||||
extern PyTypeObject _PyHamtItems_Type;
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
#define PyHamt_Check(o) Py_IS_TYPE((o), &_PyHamt_Type)
|
||||
|
||||
|
||||
/* Abstract tree node. */
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
} PyHamtNode;
|
||||
|
||||
|
||||
/* An HAMT immutable mapping collection. */
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
PyHamtNode *h_root;
|
||||
PyObject *h_weakreflist;
|
||||
Py_ssize_t h_count;
|
||||
} PyHamtObject;
|
||||
|
||||
|
||||
typedef struct {
|
||||
PyObject_VAR_HEAD
|
||||
uint32_t b_bitmap;
|
||||
PyObject *b_array[1];
|
||||
} PyHamtNode_Bitmap;
|
||||
|
||||
|
||||
/* A struct to hold the state of depth-first traverse of the tree.
|
||||
|
||||
HAMT is an immutable collection. Iterators will hold a strong reference
|
||||
to it, and every node in the HAMT has strong references to its children.
|
||||
|
||||
So for iterators, we can implement zero allocations and zero reference
|
||||
inc/dec depth-first iteration.
|
||||
|
||||
- i_nodes: an array of seven pointers to tree nodes
|
||||
- i_level: the current node in i_nodes
|
||||
- i_pos: an array of positions within nodes in i_nodes.
|
||||
*/
|
||||
typedef struct {
|
||||
PyHamtNode *i_nodes[_Py_HAMT_MAX_TREE_DEPTH];
|
||||
Py_ssize_t i_pos[_Py_HAMT_MAX_TREE_DEPTH];
|
||||
int8_t i_level;
|
||||
} PyHamtIteratorState;
|
||||
|
||||
|
||||
/* Base iterator object.
|
||||
|
||||
Contains the iteration state, a pointer to the HAMT tree,
|
||||
and a pointer to the 'yield function'. The latter is a simple
|
||||
function that returns a key/value tuple for the 'Items' iterator,
|
||||
just a key for the 'Keys' iterator, and a value for the 'Values'
|
||||
iterator.
|
||||
*/
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
PyHamtObject *hi_obj;
|
||||
PyHamtIteratorState hi_iter;
|
||||
binaryfunc hi_yield;
|
||||
} PyHamtIterator;
|
||||
|
||||
|
||||
/* Create a new HAMT immutable mapping. */
|
||||
PyHamtObject * _PyHamt_New(void);
|
||||
|
||||
/* Return a new collection based on "o", but with an additional
|
||||
key/val pair. */
|
||||
PyHamtObject * _PyHamt_Assoc(PyHamtObject *o, PyObject *key, PyObject *val);
|
||||
|
||||
/* Return a new collection based on "o", but without "key". */
|
||||
PyHamtObject * _PyHamt_Without(PyHamtObject *o, PyObject *key);
|
||||
|
||||
/* Find "key" in the "o" collection.
|
||||
|
||||
Return:
|
||||
- -1: An error occurred.
|
||||
- 0: "key" wasn't found in "o".
|
||||
- 1: "key" is in "o"; "*val" is set to its value (a borrowed ref).
|
||||
*/
|
||||
int _PyHamt_Find(PyHamtObject *o, PyObject *key, PyObject **val);
|
||||
|
||||
/* Check if "v" is equal to "w".
|
||||
|
||||
Return:
|
||||
- 0: v != w
|
||||
- 1: v == w
|
||||
- -1: An error occurred.
|
||||
*/
|
||||
int _PyHamt_Eq(PyHamtObject *v, PyHamtObject *w);
|
||||
|
||||
/* Return the size of "o"; equivalent of "len(o)". */
|
||||
Py_ssize_t _PyHamt_Len(PyHamtObject *o);
|
||||
|
||||
/* Return a Keys iterator over "o". */
|
||||
PyObject * _PyHamt_NewIterKeys(PyHamtObject *o);
|
||||
|
||||
/* Return a Values iterator over "o". */
|
||||
PyObject * _PyHamt_NewIterValues(PyHamtObject *o);
|
||||
|
||||
/* Return a Items iterator over "o". */
|
||||
PyObject * _PyHamt_NewIterItems(PyHamtObject *o);
|
||||
|
||||
#endif /* !Py_INTERNAL_HAMT_H */
|
||||
150
Dependencies/Python/include/internal/pycore_hashtable.h
vendored
Normal file
150
Dependencies/Python/include/internal/pycore_hashtable.h
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
#ifndef Py_INTERNAL_HASHTABLE_H
|
||||
#define Py_INTERNAL_HASHTABLE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
/* Single linked list */
|
||||
|
||||
typedef struct _Py_slist_item_s {
|
||||
struct _Py_slist_item_s *next;
|
||||
} _Py_slist_item_t;
|
||||
|
||||
typedef struct {
|
||||
_Py_slist_item_t *head;
|
||||
} _Py_slist_t;
|
||||
|
||||
#define _Py_SLIST_ITEM_NEXT(ITEM) _Py_RVALUE(((_Py_slist_item_t *)(ITEM))->next)
|
||||
|
||||
#define _Py_SLIST_HEAD(SLIST) _Py_RVALUE(((_Py_slist_t *)(SLIST))->head)
|
||||
|
||||
|
||||
/* _Py_hashtable: table entry */
|
||||
|
||||
typedef struct {
|
||||
/* used by _Py_hashtable_t.buckets to link entries */
|
||||
_Py_slist_item_t _Py_slist_item;
|
||||
|
||||
Py_uhash_t key_hash;
|
||||
void *key;
|
||||
void *value;
|
||||
} _Py_hashtable_entry_t;
|
||||
|
||||
|
||||
/* _Py_hashtable: prototypes */
|
||||
|
||||
/* Forward declaration */
|
||||
struct _Py_hashtable_t;
|
||||
typedef struct _Py_hashtable_t _Py_hashtable_t;
|
||||
|
||||
typedef Py_uhash_t (*_Py_hashtable_hash_func) (const void *key);
|
||||
typedef int (*_Py_hashtable_compare_func) (const void *key1, const void *key2);
|
||||
typedef void (*_Py_hashtable_destroy_func) (void *key);
|
||||
typedef _Py_hashtable_entry_t* (*_Py_hashtable_get_entry_func)(_Py_hashtable_t *ht,
|
||||
const void *key);
|
||||
|
||||
typedef struct {
|
||||
// Allocate a memory block
|
||||
void* (*malloc) (size_t size);
|
||||
|
||||
// Release a memory block
|
||||
void (*free) (void *ptr);
|
||||
} _Py_hashtable_allocator_t;
|
||||
|
||||
|
||||
/* _Py_hashtable: table */
|
||||
struct _Py_hashtable_t {
|
||||
size_t nentries; // Total number of entries in the table
|
||||
size_t nbuckets;
|
||||
_Py_slist_t *buckets;
|
||||
|
||||
_Py_hashtable_get_entry_func get_entry_func;
|
||||
_Py_hashtable_hash_func hash_func;
|
||||
_Py_hashtable_compare_func compare_func;
|
||||
_Py_hashtable_destroy_func key_destroy_func;
|
||||
_Py_hashtable_destroy_func value_destroy_func;
|
||||
_Py_hashtable_allocator_t alloc;
|
||||
};
|
||||
|
||||
// Export _Py_hashtable functions for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(_Py_hashtable_t *) _Py_hashtable_new(
|
||||
_Py_hashtable_hash_func hash_func,
|
||||
_Py_hashtable_compare_func compare_func);
|
||||
|
||||
/* Hash a pointer (void*) */
|
||||
PyAPI_FUNC(Py_uhash_t) _Py_hashtable_hash_ptr(const void *key);
|
||||
|
||||
/* Comparison using memcmp() */
|
||||
PyAPI_FUNC(int) _Py_hashtable_compare_direct(
|
||||
const void *key1,
|
||||
const void *key2);
|
||||
|
||||
PyAPI_FUNC(_Py_hashtable_t *) _Py_hashtable_new_full(
|
||||
_Py_hashtable_hash_func hash_func,
|
||||
_Py_hashtable_compare_func compare_func,
|
||||
_Py_hashtable_destroy_func key_destroy_func,
|
||||
_Py_hashtable_destroy_func value_destroy_func,
|
||||
_Py_hashtable_allocator_t *allocator);
|
||||
|
||||
PyAPI_FUNC(void) _Py_hashtable_destroy(_Py_hashtable_t *ht);
|
||||
|
||||
PyAPI_FUNC(void) _Py_hashtable_clear(_Py_hashtable_t *ht);
|
||||
|
||||
typedef int (*_Py_hashtable_foreach_func) (_Py_hashtable_t *ht,
|
||||
const void *key, const void *value,
|
||||
void *user_data);
|
||||
|
||||
/* Call func() on each entry of the hashtable.
|
||||
Iteration stops if func() result is non-zero, in this case it's the result
|
||||
of the call. Otherwise, the function returns 0. */
|
||||
PyAPI_FUNC(int) _Py_hashtable_foreach(
|
||||
_Py_hashtable_t *ht,
|
||||
_Py_hashtable_foreach_func func,
|
||||
void *user_data);
|
||||
|
||||
PyAPI_FUNC(size_t) _Py_hashtable_size(const _Py_hashtable_t *ht);
|
||||
PyAPI_FUNC(size_t) _Py_hashtable_len(const _Py_hashtable_t *ht);
|
||||
|
||||
/* Add a new entry to the hash. The key must not be present in the hash table.
|
||||
Return 0 on success, -1 on memory error. */
|
||||
PyAPI_FUNC(int) _Py_hashtable_set(
|
||||
_Py_hashtable_t *ht,
|
||||
const void *key,
|
||||
void *value);
|
||||
|
||||
|
||||
/* Get an entry.
|
||||
Return NULL if the key does not exist. */
|
||||
static inline _Py_hashtable_entry_t *
|
||||
_Py_hashtable_get_entry(_Py_hashtable_t *ht, const void *key)
|
||||
{
|
||||
return ht->get_entry_func(ht, key);
|
||||
}
|
||||
|
||||
|
||||
/* Get value from an entry.
|
||||
Return NULL if the entry is not found.
|
||||
|
||||
Use _Py_hashtable_get_entry() to distinguish entry value equal to NULL
|
||||
and entry not found. */
|
||||
PyAPI_FUNC(void*) _Py_hashtable_get(_Py_hashtable_t *ht, const void *key);
|
||||
|
||||
|
||||
/* Remove a key and its associated value without calling key and value destroy
|
||||
functions.
|
||||
|
||||
Return the removed value if the key was found.
|
||||
Return NULL if the key was not found. */
|
||||
PyAPI_FUNC(void*) _Py_hashtable_steal(
|
||||
_Py_hashtable_t *ht,
|
||||
const void *key);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_HASHTABLE_H */
|
||||
20
Dependencies/Python/include/internal/pycore_identifier.h
vendored
Normal file
20
Dependencies/Python/include/internal/pycore_identifier.h
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
/* String Literals: _Py_Identifier API */
|
||||
|
||||
#ifndef Py_INTERNAL_IDENTIFIER_H
|
||||
#define Py_INTERNAL_IDENTIFIER_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern PyObject* _PyType_LookupId(PyTypeObject *, _Py_Identifier *);
|
||||
extern PyObject* _PyObject_LookupSpecialId(PyObject *, _Py_Identifier *);
|
||||
extern int _PyObject_SetAttrId(PyObject *, _Py_Identifier *, PyObject *);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_IDENTIFIER_H
|
||||
213
Dependencies/Python/include/internal/pycore_import.h
vendored
Normal file
213
Dependencies/Python/include/internal/pycore_import.h
vendored
Normal file
@@ -0,0 +1,213 @@
|
||||
#ifndef Py_LIMITED_API
|
||||
#ifndef Py_INTERNAL_IMPORT_H
|
||||
#define Py_INTERNAL_IMPORT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_lock.h" // PyMutex
|
||||
#include "pycore_hashtable.h" // _Py_hashtable_t
|
||||
|
||||
extern int _PyImport_IsInitialized(PyInterpreterState *);
|
||||
|
||||
// Export for 'pyexpat' shared extension
|
||||
PyAPI_FUNC(int) _PyImport_SetModule(PyObject *name, PyObject *module);
|
||||
|
||||
extern int _PyImport_SetModuleString(const char *name, PyObject* module);
|
||||
|
||||
extern void _PyImport_AcquireLock(PyInterpreterState *interp);
|
||||
extern void _PyImport_ReleaseLock(PyInterpreterState *interp);
|
||||
extern void _PyImport_ReInitLock(PyInterpreterState *interp);
|
||||
|
||||
// This is used exclusively for the sys and builtins modules:
|
||||
extern int _PyImport_FixupBuiltin(
|
||||
PyThreadState *tstate,
|
||||
PyObject *mod,
|
||||
const char *name, /* UTF-8 encoded string */
|
||||
PyObject *modules
|
||||
);
|
||||
|
||||
// Export for many shared extensions, like '_json'
|
||||
PyAPI_FUNC(PyObject*) _PyImport_GetModuleAttr(PyObject *, PyObject *);
|
||||
|
||||
// Export for many shared extensions, like '_datetime'
|
||||
PyAPI_FUNC(PyObject*) _PyImport_GetModuleAttrString(const char *, const char *);
|
||||
|
||||
|
||||
struct _import_runtime_state {
|
||||
/* The builtin modules (defined in config.c). */
|
||||
struct _inittab *inittab;
|
||||
/* The most recent value assigned to a PyModuleDef.m_base.m_index.
|
||||
This is incremented each time PyModuleDef_Init() is called,
|
||||
which is just about every time an extension module is imported.
|
||||
See PyInterpreterState.modules_by_index for more info. */
|
||||
Py_ssize_t last_module_index;
|
||||
struct {
|
||||
/* A lock to guard the cache. */
|
||||
PyMutex mutex;
|
||||
/* The actual cache of (filename, name, PyModuleDef) for modules.
|
||||
Only legacy (single-phase init) extension modules are added
|
||||
and only if they support multiple initialization (m_size >- 0)
|
||||
or are imported in the main interpreter.
|
||||
This is initialized lazily in fix_up_extension() in import.c.
|
||||
Modules are added there and looked up in _imp.find_extension(). */
|
||||
_Py_hashtable_t *hashtable;
|
||||
} extensions;
|
||||
/* Package context -- the full module name for package imports */
|
||||
const char * pkgcontext;
|
||||
};
|
||||
|
||||
struct _import_state {
|
||||
/* cached sys.modules dictionary */
|
||||
PyObject *modules;
|
||||
/* This is the list of module objects for all legacy (single-phase init)
|
||||
extension modules ever loaded in this process (i.e. imported
|
||||
in this interpreter or in any other). Py_None stands in for
|
||||
modules that haven't actually been imported in this interpreter.
|
||||
|
||||
A module's index (PyModuleDef.m_base.m_index) is used to look up
|
||||
the corresponding module object for this interpreter, if any.
|
||||
(See PyState_FindModule().) When any extension module
|
||||
is initialized during import, its moduledef gets initialized by
|
||||
PyModuleDef_Init(), and the first time that happens for each
|
||||
PyModuleDef, its index gets set to the current value of
|
||||
a global counter (see _PyRuntimeState.imports.last_module_index).
|
||||
The entry for that index in this interpreter remains unset until
|
||||
the module is actually imported here. (Py_None is used as
|
||||
a placeholder.) Note that multi-phase init modules always get
|
||||
an index for which there will never be a module set.
|
||||
|
||||
This is initialized lazily in PyState_AddModule(), which is also
|
||||
where modules get added. */
|
||||
PyObject *modules_by_index;
|
||||
/* importlib module._bootstrap */
|
||||
PyObject *importlib;
|
||||
/* override for config->use_frozen_modules (for tests)
|
||||
(-1: "off", 1: "on", 0: no override) */
|
||||
int override_frozen_modules;
|
||||
int override_multi_interp_extensions_check;
|
||||
#ifdef HAVE_DLOPEN
|
||||
int dlopenflags;
|
||||
#endif
|
||||
PyObject *import_func;
|
||||
/* The global import lock. */
|
||||
_PyRecursiveMutex lock;
|
||||
/* diagnostic info in PyImport_ImportModuleLevelObject() */
|
||||
struct {
|
||||
int import_level;
|
||||
PyTime_t accumulated;
|
||||
int header;
|
||||
} find_and_load;
|
||||
};
|
||||
|
||||
#ifdef HAVE_DLOPEN
|
||||
# include <dlfcn.h> // RTLD_NOW, RTLD_LAZY
|
||||
# if HAVE_DECL_RTLD_NOW
|
||||
# define _Py_DLOPEN_FLAGS RTLD_NOW
|
||||
# else
|
||||
# define _Py_DLOPEN_FLAGS RTLD_LAZY
|
||||
# endif
|
||||
# define DLOPENFLAGS_INIT .dlopenflags = _Py_DLOPEN_FLAGS,
|
||||
#else
|
||||
# define _Py_DLOPEN_FLAGS 0
|
||||
# define DLOPENFLAGS_INIT
|
||||
#endif
|
||||
|
||||
#define IMPORTS_INIT \
|
||||
{ \
|
||||
DLOPENFLAGS_INIT \
|
||||
.find_and_load = { \
|
||||
.header = 1, \
|
||||
}, \
|
||||
}
|
||||
|
||||
extern void _PyImport_ClearCore(PyInterpreterState *interp);
|
||||
|
||||
extern Py_ssize_t _PyImport_GetNextModuleIndex(void);
|
||||
extern const char * _PyImport_ResolveNameWithPackageContext(const char *name);
|
||||
extern const char * _PyImport_SwapPackageContext(const char *newcontext);
|
||||
|
||||
extern int _PyImport_GetDLOpenFlags(PyInterpreterState *interp);
|
||||
extern void _PyImport_SetDLOpenFlags(PyInterpreterState *interp, int new_val);
|
||||
|
||||
extern PyObject * _PyImport_InitModules(PyInterpreterState *interp);
|
||||
extern PyObject * _PyImport_GetModules(PyInterpreterState *interp);
|
||||
extern void _PyImport_ClearModules(PyInterpreterState *interp);
|
||||
|
||||
extern void _PyImport_ClearModulesByIndex(PyInterpreterState *interp);
|
||||
|
||||
extern int _PyImport_InitDefaultImportFunc(PyInterpreterState *interp);
|
||||
extern int _PyImport_IsDefaultImportFunc(
|
||||
PyInterpreterState *interp,
|
||||
PyObject *func);
|
||||
|
||||
extern PyObject * _PyImport_GetImportlibLoader(
|
||||
PyInterpreterState *interp,
|
||||
const char *loader_name);
|
||||
extern PyObject * _PyImport_GetImportlibExternalLoader(
|
||||
PyInterpreterState *interp,
|
||||
const char *loader_name);
|
||||
extern PyObject * _PyImport_BlessMyLoader(
|
||||
PyInterpreterState *interp,
|
||||
PyObject *module_globals);
|
||||
extern PyObject * _PyImport_ImportlibModuleRepr(
|
||||
PyInterpreterState *interp,
|
||||
PyObject *module);
|
||||
|
||||
|
||||
extern PyStatus _PyImport_Init(void);
|
||||
extern void _PyImport_Fini(void);
|
||||
extern void _PyImport_Fini2(void);
|
||||
|
||||
extern PyStatus _PyImport_InitCore(
|
||||
PyThreadState *tstate,
|
||||
PyObject *sysmod,
|
||||
int importlib);
|
||||
extern PyStatus _PyImport_InitExternal(PyThreadState *tstate);
|
||||
extern void _PyImport_FiniCore(PyInterpreterState *interp);
|
||||
extern void _PyImport_FiniExternal(PyInterpreterState *interp);
|
||||
|
||||
|
||||
extern PyObject* _PyImport_GetBuiltinModuleNames(void);
|
||||
|
||||
struct _module_alias {
|
||||
const char *name; /* ASCII encoded string */
|
||||
const char *orig; /* ASCII encoded string */
|
||||
};
|
||||
|
||||
// Export these 3 symbols for test_ctypes
|
||||
PyAPI_DATA(const struct _frozen*) _PyImport_FrozenBootstrap;
|
||||
PyAPI_DATA(const struct _frozen*) _PyImport_FrozenStdlib;
|
||||
PyAPI_DATA(const struct _frozen*) _PyImport_FrozenTest;
|
||||
|
||||
extern const struct _module_alias * _PyImport_FrozenAliases;
|
||||
|
||||
extern int _PyImport_CheckSubinterpIncompatibleExtensionAllowed(
|
||||
const char *name);
|
||||
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(int) _PyImport_ClearExtension(PyObject *name, PyObject *filename);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// Assuming that the GIL is enabled from a call to
|
||||
// _PyEval_EnableGILTransient(), resolve the transient request depending on the
|
||||
// state of the module argument:
|
||||
// - If module is NULL or a PyModuleObject with md_gil == Py_MOD_GIL_NOT_USED,
|
||||
// call _PyEval_DisableGIL().
|
||||
// - Otherwise, call _PyEval_EnableGILPermanent(). If the GIL was not already
|
||||
// enabled permanently, issue a warning referencing the module's name.
|
||||
//
|
||||
// This function may raise an exception.
|
||||
extern int _PyImport_CheckGILForModule(PyObject *module, PyObject *module_name);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_IMPORT_H */
|
||||
#endif /* !Py_LIMITED_API */
|
||||
139
Dependencies/Python/include/internal/pycore_importdl.h
vendored
Normal file
139
Dependencies/Python/include/internal/pycore_importdl.h
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
#ifndef Py_INTERNAL_IMPORTDL_H
|
||||
#define Py_INTERNAL_IMPORTDL_H
|
||||
|
||||
#include "patchlevel.h" // PY_MAJOR_VERSION
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
extern const char *_PyImport_DynLoadFiletab[];
|
||||
|
||||
|
||||
typedef enum ext_module_kind {
|
||||
_Py_ext_module_kind_UNKNOWN = 0,
|
||||
_Py_ext_module_kind_SINGLEPHASE = 1,
|
||||
_Py_ext_module_kind_MULTIPHASE = 2,
|
||||
_Py_ext_module_kind_INVALID = 3,
|
||||
} _Py_ext_module_kind;
|
||||
|
||||
typedef enum ext_module_origin {
|
||||
_Py_ext_module_origin_CORE = 1,
|
||||
_Py_ext_module_origin_BUILTIN = 2,
|
||||
_Py_ext_module_origin_DYNAMIC = 3,
|
||||
} _Py_ext_module_origin;
|
||||
|
||||
/* Input for loading an extension module. */
|
||||
struct _Py_ext_module_loader_info {
|
||||
PyObject *filename;
|
||||
#ifndef MS_WINDOWS
|
||||
PyObject *filename_encoded;
|
||||
#endif
|
||||
PyObject *name;
|
||||
PyObject *name_encoded;
|
||||
/* path is always a borrowed ref of name or filename,
|
||||
* depending on if it's builtin or not. */
|
||||
PyObject *path;
|
||||
_Py_ext_module_origin origin;
|
||||
const char *hook_prefix;
|
||||
const char *newcontext;
|
||||
};
|
||||
extern void _Py_ext_module_loader_info_clear(
|
||||
struct _Py_ext_module_loader_info *info);
|
||||
extern int _Py_ext_module_loader_info_init(
|
||||
struct _Py_ext_module_loader_info *info,
|
||||
PyObject *name,
|
||||
PyObject *filename,
|
||||
_Py_ext_module_origin origin);
|
||||
extern int _Py_ext_module_loader_info_init_for_core(
|
||||
struct _Py_ext_module_loader_info *p_info,
|
||||
PyObject *name);
|
||||
extern int _Py_ext_module_loader_info_init_for_builtin(
|
||||
struct _Py_ext_module_loader_info *p_info,
|
||||
PyObject *name);
|
||||
#ifdef HAVE_DYNAMIC_LOADING
|
||||
extern int _Py_ext_module_loader_info_init_from_spec(
|
||||
struct _Py_ext_module_loader_info *info,
|
||||
PyObject *spec);
|
||||
#endif
|
||||
|
||||
/* The result from running an extension module's init function. */
|
||||
struct _Py_ext_module_loader_result {
|
||||
PyModuleDef *def;
|
||||
PyObject *module;
|
||||
_Py_ext_module_kind kind;
|
||||
struct _Py_ext_module_loader_result_error *err;
|
||||
struct _Py_ext_module_loader_result_error {
|
||||
enum _Py_ext_module_loader_result_error_kind {
|
||||
_Py_ext_module_loader_result_EXCEPTION = 0,
|
||||
_Py_ext_module_loader_result_ERR_MISSING = 1,
|
||||
_Py_ext_module_loader_result_ERR_UNREPORTED_EXC = 2,
|
||||
_Py_ext_module_loader_result_ERR_UNINITIALIZED = 3,
|
||||
_Py_ext_module_loader_result_ERR_NONASCII_NOT_MULTIPHASE = 4,
|
||||
_Py_ext_module_loader_result_ERR_NOT_MODULE = 5,
|
||||
_Py_ext_module_loader_result_ERR_MISSING_DEF = 6,
|
||||
} kind;
|
||||
PyObject *exc;
|
||||
} _err;
|
||||
};
|
||||
extern void _Py_ext_module_loader_result_clear(
|
||||
struct _Py_ext_module_loader_result *res);
|
||||
extern void _Py_ext_module_loader_result_apply_error(
|
||||
struct _Py_ext_module_loader_result *res,
|
||||
const char *name);
|
||||
|
||||
/* The module init function. */
|
||||
typedef PyObject *(*PyModInitFunction)(void);
|
||||
#ifdef HAVE_DYNAMIC_LOADING
|
||||
extern PyModInitFunction _PyImport_GetModInitFunc(
|
||||
struct _Py_ext_module_loader_info *info,
|
||||
FILE *fp);
|
||||
#endif
|
||||
extern int _PyImport_RunModInitFunc(
|
||||
PyModInitFunction p0,
|
||||
struct _Py_ext_module_loader_info *info,
|
||||
struct _Py_ext_module_loader_result *p_res);
|
||||
|
||||
|
||||
/* Max length of module suffix searched for -- accommodates "module.slb" */
|
||||
#define MAXSUFFIXSIZE 12
|
||||
|
||||
#ifdef MS_WINDOWS
|
||||
#include <windows.h>
|
||||
typedef FARPROC dl_funcptr;
|
||||
|
||||
#ifdef _DEBUG
|
||||
# define PYD_DEBUG_SUFFIX "_d"
|
||||
#else
|
||||
# define PYD_DEBUG_SUFFIX ""
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
# define PYD_THREADING_TAG "t"
|
||||
#else
|
||||
# define PYD_THREADING_TAG ""
|
||||
#endif
|
||||
|
||||
#ifdef PYD_PLATFORM_TAG
|
||||
# define PYD_SOABI "cp" Py_STRINGIFY(PY_MAJOR_VERSION) Py_STRINGIFY(PY_MINOR_VERSION) PYD_THREADING_TAG "-" PYD_PLATFORM_TAG
|
||||
#else
|
||||
# define PYD_SOABI "cp" Py_STRINGIFY(PY_MAJOR_VERSION) Py_STRINGIFY(PY_MINOR_VERSION) PYD_THREADING_TAG
|
||||
#endif
|
||||
|
||||
#define PYD_TAGGED_SUFFIX PYD_DEBUG_SUFFIX "." PYD_SOABI ".pyd"
|
||||
#define PYD_UNTAGGED_SUFFIX PYD_DEBUG_SUFFIX ".pyd"
|
||||
|
||||
#else
|
||||
typedef void (*dl_funcptr)(void);
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_IMPORTDL_H */
|
||||
200
Dependencies/Python/include/internal/pycore_initconfig.h
vendored
Normal file
200
Dependencies/Python/include/internal/pycore_initconfig.h
vendored
Normal file
@@ -0,0 +1,200 @@
|
||||
#ifndef Py_INTERNAL_CORECONFIG_H
|
||||
#define Py_INTERNAL_CORECONFIG_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
/* Forward declaration */
|
||||
struct pyruntimestate;
|
||||
|
||||
/* --- PyStatus ----------------------------------------------- */
|
||||
|
||||
/* Almost all errors causing Python initialization to fail */
|
||||
#ifdef _MSC_VER
|
||||
/* Visual Studio 2015 doesn't implement C99 __func__ in C */
|
||||
# define _PyStatus_GET_FUNC() __FUNCTION__
|
||||
#else
|
||||
# define _PyStatus_GET_FUNC() __func__
|
||||
#endif
|
||||
|
||||
#define _PyStatus_OK() \
|
||||
(PyStatus){._type = _PyStatus_TYPE_OK}
|
||||
/* other fields are set to 0 */
|
||||
#define _PyStatus_ERR(ERR_MSG) \
|
||||
(PyStatus){ \
|
||||
._type = _PyStatus_TYPE_ERROR, \
|
||||
.func = _PyStatus_GET_FUNC(), \
|
||||
.err_msg = (ERR_MSG)}
|
||||
/* other fields are set to 0 */
|
||||
#define _PyStatus_NO_MEMORY_ERRMSG "memory allocation failed"
|
||||
#define _PyStatus_NO_MEMORY() _PyStatus_ERR(_PyStatus_NO_MEMORY_ERRMSG)
|
||||
#define _PyStatus_EXIT(EXITCODE) \
|
||||
(PyStatus){ \
|
||||
._type = _PyStatus_TYPE_EXIT, \
|
||||
.exitcode = (EXITCODE)}
|
||||
#define _PyStatus_IS_ERROR(err) \
|
||||
((err)._type == _PyStatus_TYPE_ERROR)
|
||||
#define _PyStatus_IS_EXIT(err) \
|
||||
((err)._type == _PyStatus_TYPE_EXIT)
|
||||
#define _PyStatus_EXCEPTION(err) \
|
||||
((err)._type != _PyStatus_TYPE_OK)
|
||||
#define _PyStatus_UPDATE_FUNC(err) \
|
||||
do { (err).func = _PyStatus_GET_FUNC(); } while (0)
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(void) _PyErr_SetFromPyStatus(PyStatus status);
|
||||
|
||||
|
||||
/* --- PyWideStringList ------------------------------------------------ */
|
||||
|
||||
#define _PyWideStringList_INIT (PyWideStringList){.length = 0, .items = NULL}
|
||||
|
||||
#ifndef NDEBUG
|
||||
extern int _PyWideStringList_CheckConsistency(const PyWideStringList *list);
|
||||
#endif
|
||||
extern void _PyWideStringList_Clear(PyWideStringList *list);
|
||||
extern int _PyWideStringList_Copy(PyWideStringList *list,
|
||||
const PyWideStringList *list2);
|
||||
extern PyStatus _PyWideStringList_Extend(PyWideStringList *list,
|
||||
const PyWideStringList *list2);
|
||||
extern PyObject* _PyWideStringList_AsList(const PyWideStringList *list);
|
||||
|
||||
|
||||
/* --- _PyArgv ---------------------------------------------------- */
|
||||
|
||||
typedef struct _PyArgv {
|
||||
Py_ssize_t argc;
|
||||
int use_bytes_argv;
|
||||
char * const *bytes_argv;
|
||||
wchar_t * const *wchar_argv;
|
||||
} _PyArgv;
|
||||
|
||||
extern PyStatus _PyArgv_AsWstrList(const _PyArgv *args,
|
||||
PyWideStringList *list);
|
||||
|
||||
|
||||
/* --- Helper functions ------------------------------------------- */
|
||||
|
||||
extern int _Py_str_to_int(
|
||||
const char *str,
|
||||
int *result);
|
||||
extern const wchar_t* _Py_get_xoption(
|
||||
const PyWideStringList *xoptions,
|
||||
const wchar_t *name);
|
||||
extern const char* _Py_GetEnv(
|
||||
int use_environment,
|
||||
const char *name);
|
||||
extern void _Py_get_env_flag(
|
||||
int use_environment,
|
||||
int *flag,
|
||||
const char *name);
|
||||
|
||||
/* Py_GetArgcArgv() helper */
|
||||
extern void _Py_ClearArgcArgv(void);
|
||||
|
||||
|
||||
/* --- _PyPreCmdline ------------------------------------------------- */
|
||||
|
||||
typedef struct {
|
||||
PyWideStringList argv;
|
||||
PyWideStringList xoptions; /* "-X value" option */
|
||||
int isolated; /* -I option */
|
||||
int use_environment; /* -E option */
|
||||
int dev_mode; /* -X dev and PYTHONDEVMODE */
|
||||
int warn_default_encoding; /* -X warn_default_encoding and PYTHONWARNDEFAULTENCODING */
|
||||
} _PyPreCmdline;
|
||||
|
||||
#define _PyPreCmdline_INIT \
|
||||
(_PyPreCmdline){ \
|
||||
.use_environment = -1, \
|
||||
.isolated = -1, \
|
||||
.dev_mode = -1}
|
||||
/* Note: _PyPreCmdline_INIT sets other fields to 0/NULL */
|
||||
|
||||
extern void _PyPreCmdline_Clear(_PyPreCmdline *cmdline);
|
||||
extern PyStatus _PyPreCmdline_SetArgv(_PyPreCmdline *cmdline,
|
||||
const _PyArgv *args);
|
||||
extern PyStatus _PyPreCmdline_SetConfig(
|
||||
const _PyPreCmdline *cmdline,
|
||||
PyConfig *config);
|
||||
extern PyStatus _PyPreCmdline_Read(_PyPreCmdline *cmdline,
|
||||
const PyPreConfig *preconfig);
|
||||
|
||||
|
||||
/* --- PyPreConfig ----------------------------------------------- */
|
||||
|
||||
// Export for '_testembed' program
|
||||
PyAPI_FUNC(void) _PyPreConfig_InitCompatConfig(PyPreConfig *preconfig);
|
||||
|
||||
extern void _PyPreConfig_InitFromConfig(
|
||||
PyPreConfig *preconfig,
|
||||
const PyConfig *config);
|
||||
extern PyStatus _PyPreConfig_InitFromPreConfig(
|
||||
PyPreConfig *preconfig,
|
||||
const PyPreConfig *config2);
|
||||
extern PyObject* _PyPreConfig_AsDict(const PyPreConfig *preconfig);
|
||||
extern void _PyPreConfig_GetConfig(PyPreConfig *preconfig,
|
||||
const PyConfig *config);
|
||||
extern PyStatus _PyPreConfig_Read(PyPreConfig *preconfig,
|
||||
const _PyArgv *args);
|
||||
extern PyStatus _PyPreConfig_Write(const PyPreConfig *preconfig);
|
||||
|
||||
|
||||
/* --- PyConfig ---------------------------------------------- */
|
||||
|
||||
typedef enum {
|
||||
/* Py_Initialize() API: backward compatibility with Python 3.6 and 3.7 */
|
||||
_PyConfig_INIT_COMPAT = 1,
|
||||
_PyConfig_INIT_PYTHON = 2,
|
||||
_PyConfig_INIT_ISOLATED = 3
|
||||
} _PyConfigInitEnum;
|
||||
|
||||
typedef enum {
|
||||
/* For now, this means the GIL is enabled.
|
||||
|
||||
gh-116329: This will eventually change to "the GIL is disabled but can
|
||||
be reenabled by loading an incompatible extension module." */
|
||||
_PyConfig_GIL_DEFAULT = -1,
|
||||
|
||||
/* The GIL has been forced off or on, and will not be affected by module loading. */
|
||||
_PyConfig_GIL_DISABLE = 0,
|
||||
_PyConfig_GIL_ENABLE = 1,
|
||||
} _PyConfigGILEnum;
|
||||
|
||||
// Export for '_testembed' program
|
||||
PyAPI_FUNC(void) _PyConfig_InitCompatConfig(PyConfig *config);
|
||||
|
||||
extern PyStatus _PyConfig_Copy(
|
||||
PyConfig *config,
|
||||
const PyConfig *config2);
|
||||
extern PyStatus _PyConfig_InitPathConfig(
|
||||
PyConfig *config,
|
||||
int compute_path_config);
|
||||
extern PyStatus _PyConfig_InitImportConfig(PyConfig *config);
|
||||
extern PyStatus _PyConfig_Read(PyConfig *config, int compute_path_config);
|
||||
extern PyStatus _PyConfig_Write(const PyConfig *config,
|
||||
struct pyruntimestate *runtime);
|
||||
extern PyStatus _PyConfig_SetPyArgv(
|
||||
PyConfig *config,
|
||||
const _PyArgv *args);
|
||||
|
||||
|
||||
extern void _Py_DumpPathConfig(PyThreadState *tstate);
|
||||
|
||||
|
||||
/* --- Function used for testing ---------------------------------- */
|
||||
|
||||
// Export these functions for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyConfig_AsDict(const PyConfig *config);
|
||||
PyAPI_FUNC(int) _PyConfig_FromDict(PyConfig *config, PyObject *dict);
|
||||
PyAPI_FUNC(PyObject*) _Py_Get_Getpath_CodeObject(void);
|
||||
PyAPI_FUNC(PyObject*) _Py_GetConfigsAsDict(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_CORECONFIG_H */
|
||||
73
Dependencies/Python/include/internal/pycore_instruction_sequence.h
vendored
Normal file
73
Dependencies/Python/include/internal/pycore_instruction_sequence.h
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
#ifndef Py_INTERNAL_INSTRUCTION_SEQUENCE_H
|
||||
#define Py_INTERNAL_INSTRUCTION_SEQUENCE_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_symtable.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
typedef struct {
|
||||
int h_label;
|
||||
int h_startdepth;
|
||||
int h_preserve_lasti;
|
||||
} _PyExceptHandlerInfo;
|
||||
|
||||
typedef struct {
|
||||
int i_opcode;
|
||||
int i_oparg;
|
||||
_Py_SourceLocation i_loc;
|
||||
_PyExceptHandlerInfo i_except_handler_info;
|
||||
|
||||
/* Temporary fields, used by the assembler and in instr_sequence_to_cfg */
|
||||
int i_target;
|
||||
int i_offset;
|
||||
} _PyInstruction;
|
||||
|
||||
typedef struct instruction_sequence {
|
||||
PyObject_HEAD
|
||||
_PyInstruction *s_instrs;
|
||||
int s_allocated;
|
||||
int s_used;
|
||||
|
||||
int s_next_free_label; /* next free label id */
|
||||
|
||||
/* Map of a label id to instruction offset (index into s_instrs).
|
||||
* If s_labelmap is NULL, then each label id is the offset itself.
|
||||
*/
|
||||
int *s_labelmap;
|
||||
int s_labelmap_size;
|
||||
|
||||
/* PyList of instruction sequences of nested functions */
|
||||
PyObject *s_nested;
|
||||
} _PyInstructionSequence;
|
||||
|
||||
typedef struct {
|
||||
int id;
|
||||
} _PyJumpTargetLabel;
|
||||
|
||||
PyAPI_FUNC(PyObject*)_PyInstructionSequence_New(void);
|
||||
|
||||
int _PyInstructionSequence_UseLabel(_PyInstructionSequence *seq, int lbl);
|
||||
int _PyInstructionSequence_Addop(_PyInstructionSequence *seq,
|
||||
int opcode, int oparg,
|
||||
_Py_SourceLocation loc);
|
||||
_PyJumpTargetLabel _PyInstructionSequence_NewLabel(_PyInstructionSequence *seq);
|
||||
int _PyInstructionSequence_ApplyLabelMap(_PyInstructionSequence *seq);
|
||||
int _PyInstructionSequence_InsertInstruction(_PyInstructionSequence *seq, int pos,
|
||||
int opcode, int oparg, _Py_SourceLocation loc);
|
||||
int _PyInstructionSequence_AddNested(_PyInstructionSequence *seq, _PyInstructionSequence *nested);
|
||||
void PyInstructionSequence_Fini(_PyInstructionSequence *seq);
|
||||
|
||||
extern PyTypeObject _PyInstructionSequence_Type;
|
||||
#define _PyInstructionSequence_Check(v) Py_IS_TYPE((v), &_PyInstructionSequence_Type)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_INSTRUCTION_SEQUENCE_H */
|
||||
75
Dependencies/Python/include/internal/pycore_instruments.h
vendored
Normal file
75
Dependencies/Python/include/internal/pycore_instruments.h
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
#ifndef Py_INTERNAL_INSTRUMENT_H
|
||||
#define Py_INTERNAL_INSTRUMENT_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_frame.h" // _PyInterpreterFrame
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define PY_MONITORING_TOOL_IDS 8
|
||||
|
||||
typedef uint32_t _PyMonitoringEventSet;
|
||||
|
||||
/* Tool IDs */
|
||||
|
||||
/* These are defined in PEP 669 for convenience to avoid clashes */
|
||||
#define PY_MONITORING_DEBUGGER_ID 0
|
||||
#define PY_MONITORING_COVERAGE_ID 1
|
||||
#define PY_MONITORING_PROFILER_ID 2
|
||||
#define PY_MONITORING_OPTIMIZER_ID 5
|
||||
|
||||
/* Internal IDs used to suuport sys.setprofile() and sys.settrace() */
|
||||
#define PY_MONITORING_SYS_PROFILE_ID 6
|
||||
#define PY_MONITORING_SYS_TRACE_ID 7
|
||||
|
||||
|
||||
PyObject *_PyMonitoring_RegisterCallback(int tool_id, int event_id, PyObject *obj);
|
||||
|
||||
int _PyMonitoring_SetEvents(int tool_id, _PyMonitoringEventSet events);
|
||||
int _PyMonitoring_SetLocalEvents(PyCodeObject *code, int tool_id, _PyMonitoringEventSet events);
|
||||
int _PyMonitoring_GetLocalEvents(PyCodeObject *code, int tool_id, _PyMonitoringEventSet *events);
|
||||
|
||||
extern int
|
||||
_Py_call_instrumentation(PyThreadState *tstate, int event,
|
||||
_PyInterpreterFrame *frame, _Py_CODEUNIT *instr);
|
||||
|
||||
extern int
|
||||
_Py_call_instrumentation_line(PyThreadState *tstate, _PyInterpreterFrame* frame,
|
||||
_Py_CODEUNIT *instr, _Py_CODEUNIT *prev);
|
||||
|
||||
extern int
|
||||
_Py_call_instrumentation_instruction(
|
||||
PyThreadState *tstate, _PyInterpreterFrame* frame, _Py_CODEUNIT *instr);
|
||||
|
||||
_Py_CODEUNIT *
|
||||
_Py_call_instrumentation_jump(
|
||||
PyThreadState *tstate, int event,
|
||||
_PyInterpreterFrame *frame, _Py_CODEUNIT *instr, _Py_CODEUNIT *target);
|
||||
|
||||
extern int
|
||||
_Py_call_instrumentation_arg(PyThreadState *tstate, int event,
|
||||
_PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg);
|
||||
|
||||
extern int
|
||||
_Py_call_instrumentation_2args(PyThreadState *tstate, int event,
|
||||
_PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg0, PyObject *arg1);
|
||||
|
||||
extern void
|
||||
_Py_call_instrumentation_exc2(PyThreadState *tstate, int event,
|
||||
_PyInterpreterFrame *frame, _Py_CODEUNIT *instr, PyObject *arg0, PyObject *arg1);
|
||||
|
||||
extern int
|
||||
_Py_Instrumentation_GetLine(PyCodeObject *code, int index);
|
||||
|
||||
extern PyObject _PyInstrumentation_MISSING;
|
||||
extern PyObject _PyInstrumentation_DISABLE;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_INSTRUMENT_H */
|
||||
423
Dependencies/Python/include/internal/pycore_interp.h
vendored
Normal file
423
Dependencies/Python/include/internal/pycore_interp.h
vendored
Normal file
@@ -0,0 +1,423 @@
|
||||
#ifndef Py_INTERNAL_INTERP_H
|
||||
#define Py_INTERNAL_INTERP_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include <stdbool.h> // bool
|
||||
|
||||
#include "pycore_ast_state.h" // struct ast_state
|
||||
#include "pycore_atexit.h" // struct atexit_state
|
||||
#include "pycore_ceval_state.h" // struct _ceval_state
|
||||
#include "pycore_code.h" // struct callable_cache
|
||||
#include "pycore_codecs.h" // struct codecs_state
|
||||
#include "pycore_context.h" // struct _Py_context_state
|
||||
#include "pycore_crossinterp.h" // struct _xidregistry
|
||||
#include "pycore_dict_state.h" // struct _Py_dict_state
|
||||
#include "pycore_dtoa.h" // struct _dtoa_state
|
||||
#include "pycore_exceptions.h" // struct _Py_exc_state
|
||||
#include "pycore_floatobject.h" // struct _Py_float_state
|
||||
#include "pycore_function.h" // FUNC_MAX_WATCHERS
|
||||
#include "pycore_gc.h" // struct _gc_runtime_state
|
||||
#include "pycore_genobject.h" // struct _Py_async_gen_state
|
||||
#include "pycore_global_objects.h"// struct _Py_interp_cached_objects
|
||||
#include "pycore_import.h" // struct _import_state
|
||||
#include "pycore_instruments.h" // _PY_MONITORING_EVENTS
|
||||
#include "pycore_list.h" // struct _Py_list_state
|
||||
#include "pycore_mimalloc.h" // struct _mimalloc_interp_state
|
||||
#include "pycore_object_state.h" // struct _py_object_state
|
||||
#include "pycore_optimizer.h" // _PyOptimizerObject
|
||||
#include "pycore_obmalloc.h" // struct _obmalloc_state
|
||||
#include "pycore_qsbr.h" // struct _qsbr_state
|
||||
#include "pycore_tstate.h" // _PyThreadStateImpl
|
||||
#include "pycore_tuple.h" // struct _Py_tuple_state
|
||||
#include "pycore_typeobject.h" // struct types_state
|
||||
#include "pycore_unicodeobject.h" // struct _Py_unicode_state
|
||||
#include "pycore_warnings.h" // struct _warnings_runtime_state
|
||||
|
||||
|
||||
struct _Py_long_state {
|
||||
int max_str_digits;
|
||||
};
|
||||
|
||||
// Support for stop-the-world events. This exists in both the PyRuntime struct
|
||||
// for global pauses and in each PyInterpreterState for per-interpreter pauses.
|
||||
struct _stoptheworld_state {
|
||||
PyMutex mutex; // Serializes stop-the-world attempts.
|
||||
|
||||
// NOTE: The below fields are protected by HEAD_LOCK(runtime), not by the
|
||||
// above mutex.
|
||||
bool requested; // Set when a pause is requested.
|
||||
bool world_stopped; // Set when the world is stopped.
|
||||
bool is_global; // Set when contained in PyRuntime struct.
|
||||
|
||||
PyEvent stop_event; // Set when thread_countdown reaches zero.
|
||||
Py_ssize_t thread_countdown; // Number of threads that must pause.
|
||||
|
||||
PyThreadState *requester; // Thread that requested the pause (may be NULL).
|
||||
};
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// This should be prime but otherwise the choice is arbitrary. A larger value
|
||||
// increases concurrency at the expense of memory.
|
||||
# define NUM_WEAKREF_LIST_LOCKS 127
|
||||
#endif
|
||||
|
||||
/* cross-interpreter data registry */
|
||||
|
||||
/* Tracks some rare events per-interpreter, used by the optimizer to turn on/off
|
||||
specific optimizations. */
|
||||
typedef struct _rare_events {
|
||||
/* Setting an object's class, obj.__class__ = ... */
|
||||
uint8_t set_class;
|
||||
/* Setting the bases of a class, cls.__bases__ = ... */
|
||||
uint8_t set_bases;
|
||||
/* Setting the PEP 523 frame eval function, _PyInterpreterState_SetFrameEvalFunc() */
|
||||
uint8_t set_eval_frame_func;
|
||||
/* Modifying the builtins, __builtins__.__dict__[var] = ... */
|
||||
uint8_t builtin_dict;
|
||||
/* Modifying a function, e.g. func.__defaults__ = ..., etc. */
|
||||
uint8_t func_modification;
|
||||
} _rare_events;
|
||||
|
||||
/* interpreter state */
|
||||
|
||||
/* PyInterpreterState holds the global state for one of the runtime's
|
||||
interpreters. Typically the initial (main) interpreter is the only one.
|
||||
|
||||
The PyInterpreterState typedef is in Include/pytypedefs.h.
|
||||
*/
|
||||
struct _is {
|
||||
|
||||
/* This struct contains the eval_breaker,
|
||||
* which is by far the hottest field in this struct
|
||||
* and should be placed at the beginning. */
|
||||
struct _ceval_state ceval;
|
||||
|
||||
PyInterpreterState *next;
|
||||
|
||||
int64_t id;
|
||||
int64_t id_refcount;
|
||||
int requires_idref;
|
||||
PyThread_type_lock id_mutex;
|
||||
|
||||
#define _PyInterpreterState_WHENCE_NOTSET -1
|
||||
#define _PyInterpreterState_WHENCE_UNKNOWN 0
|
||||
#define _PyInterpreterState_WHENCE_RUNTIME 1
|
||||
#define _PyInterpreterState_WHENCE_LEGACY_CAPI 2
|
||||
#define _PyInterpreterState_WHENCE_CAPI 3
|
||||
#define _PyInterpreterState_WHENCE_XI 4
|
||||
#define _PyInterpreterState_WHENCE_STDLIB 5
|
||||
#define _PyInterpreterState_WHENCE_MAX 5
|
||||
long _whence;
|
||||
|
||||
/* Has been initialized to a safe state.
|
||||
|
||||
In order to be effective, this must be set to 0 during or right
|
||||
after allocation. */
|
||||
int _initialized;
|
||||
/* Has been fully initialized via pylifecycle.c. */
|
||||
int _ready;
|
||||
int finalizing;
|
||||
|
||||
uintptr_t last_restart_version;
|
||||
struct pythreads {
|
||||
uint64_t next_unique_id;
|
||||
/* The linked list of threads, newest first. */
|
||||
PyThreadState *head;
|
||||
/* The thread currently executing in the __main__ module, if any. */
|
||||
PyThreadState *main;
|
||||
/* Used in Modules/_threadmodule.c. */
|
||||
Py_ssize_t count;
|
||||
/* Support for runtime thread stack size tuning.
|
||||
A value of 0 means using the platform's default stack size
|
||||
or the size specified by the THREAD_STACK_SIZE macro. */
|
||||
/* Used in Python/thread.c. */
|
||||
size_t stacksize;
|
||||
} threads;
|
||||
|
||||
/* Reference to the _PyRuntime global variable. This field exists
|
||||
to not have to pass runtime in addition to tstate to a function.
|
||||
Get runtime from tstate: tstate->interp->runtime. */
|
||||
struct pyruntimestate *runtime;
|
||||
|
||||
/* Set by Py_EndInterpreter().
|
||||
|
||||
Use _PyInterpreterState_GetFinalizing()
|
||||
and _PyInterpreterState_SetFinalizing()
|
||||
to access it, don't access it directly. */
|
||||
PyThreadState* _finalizing;
|
||||
/* The ID of the OS thread in which we are finalizing. */
|
||||
unsigned long _finalizing_id;
|
||||
|
||||
struct _gc_runtime_state gc;
|
||||
|
||||
/* The following fields are here to avoid allocation during init.
|
||||
The data is exposed through PyInterpreterState pointer fields.
|
||||
These fields should not be accessed directly outside of init.
|
||||
|
||||
All other PyInterpreterState pointer fields are populated when
|
||||
needed and default to NULL.
|
||||
|
||||
For now there are some exceptions to that rule, which require
|
||||
allocation during init. These will be addressed on a case-by-case
|
||||
basis. Also see _PyRuntimeState regarding the various mutex fields.
|
||||
*/
|
||||
|
||||
// Dictionary of the sys module
|
||||
PyObject *sysdict;
|
||||
|
||||
// Dictionary of the builtins module
|
||||
PyObject *builtins;
|
||||
|
||||
struct _import_state imports;
|
||||
|
||||
/* The per-interpreter GIL, which might not be used. */
|
||||
struct _gil_runtime_state _gil;
|
||||
|
||||
/* ---------- IMPORTANT ---------------------------
|
||||
The fields above this line are declared as early as
|
||||
possible to facilitate out-of-process observability
|
||||
tools. */
|
||||
|
||||
struct codecs_state codecs;
|
||||
|
||||
PyConfig config;
|
||||
unsigned long feature_flags;
|
||||
|
||||
PyObject *dict; /* Stores per-interpreter state */
|
||||
|
||||
PyObject *sysdict_copy;
|
||||
PyObject *builtins_copy;
|
||||
// Initialized to _PyEval_EvalFrameDefault().
|
||||
_PyFrameEvalFunction eval_frame;
|
||||
|
||||
PyFunction_WatchCallback func_watchers[FUNC_MAX_WATCHERS];
|
||||
// One bit is set for each non-NULL entry in func_watchers
|
||||
uint8_t active_func_watchers;
|
||||
|
||||
Py_ssize_t co_extra_user_count;
|
||||
freefunc co_extra_freefuncs[MAX_CO_EXTRA_USERS];
|
||||
|
||||
/* cross-interpreter data and utils */
|
||||
struct _xi_state xi;
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
PyObject *before_forkers;
|
||||
PyObject *after_forkers_parent;
|
||||
PyObject *after_forkers_child;
|
||||
#endif
|
||||
|
||||
struct _warnings_runtime_state warnings;
|
||||
struct atexit_state atexit;
|
||||
struct _stoptheworld_state stoptheworld;
|
||||
struct _qsbr_shared qsbr;
|
||||
|
||||
#if defined(Py_GIL_DISABLED)
|
||||
struct _mimalloc_interp_state mimalloc;
|
||||
struct _brc_state brc; // biased reference counting state
|
||||
PyMutex weakref_locks[NUM_WEAKREF_LIST_LOCKS];
|
||||
#endif
|
||||
|
||||
// Per-interpreter state for the obmalloc allocator. For the main
|
||||
// interpreter and for all interpreters that don't have their
|
||||
// own obmalloc state, this points to the static structure in
|
||||
// obmalloc.c obmalloc_state_main. For other interpreters, it is
|
||||
// heap allocated by _PyMem_init_obmalloc() and freed when the
|
||||
// interpreter structure is freed. In the case of a heap allocated
|
||||
// obmalloc state, it is not safe to hold on to or use memory after
|
||||
// the interpreter is freed. The obmalloc state corresponding to
|
||||
// that allocated memory is gone. See free_obmalloc_arenas() for
|
||||
// more comments.
|
||||
struct _obmalloc_state *obmalloc;
|
||||
|
||||
PyObject *audit_hooks;
|
||||
PyType_WatchCallback type_watchers[TYPE_MAX_WATCHERS];
|
||||
PyCode_WatchCallback code_watchers[CODE_MAX_WATCHERS];
|
||||
// One bit is set for each non-NULL entry in code_watchers
|
||||
uint8_t active_code_watchers;
|
||||
|
||||
struct _py_object_state object_state;
|
||||
struct _Py_unicode_state unicode;
|
||||
struct _Py_long_state long_state;
|
||||
struct _dtoa_state dtoa;
|
||||
struct _py_func_state func_state;
|
||||
struct _py_code_state code_state;
|
||||
|
||||
struct _Py_dict_state dict_state;
|
||||
struct _Py_exc_state exc_state;
|
||||
struct _Py_mem_interp_free_queue mem_free_queue;
|
||||
|
||||
struct ast_state ast;
|
||||
struct types_state types;
|
||||
struct callable_cache callable_cache;
|
||||
_PyOptimizerObject *optimizer;
|
||||
_PyExecutorObject *executor_list_head;
|
||||
|
||||
_rare_events rare_events;
|
||||
PyDict_WatchCallback builtins_dict_watcher;
|
||||
|
||||
_Py_GlobalMonitors monitors;
|
||||
bool sys_profile_initialized;
|
||||
bool sys_trace_initialized;
|
||||
Py_ssize_t sys_profiling_threads; /* Count of threads with c_profilefunc set */
|
||||
Py_ssize_t sys_tracing_threads; /* Count of threads with c_tracefunc set */
|
||||
PyObject *monitoring_callables[PY_MONITORING_TOOL_IDS][_PY_MONITORING_EVENTS];
|
||||
PyObject *monitoring_tool_names[PY_MONITORING_TOOL_IDS];
|
||||
|
||||
struct _Py_interp_cached_objects cached_objects;
|
||||
struct _Py_interp_static_objects static_objects;
|
||||
|
||||
/* the initial PyInterpreterState.threads.head */
|
||||
_PyThreadStateImpl _initial_thread;
|
||||
Py_ssize_t _interactive_src_count;
|
||||
// In 3.14+ this is interp->threads.preallocated.
|
||||
_PyThreadStateImpl *threads_preallocated;
|
||||
};
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
extern void _PyInterpreterState_Clear(PyThreadState *tstate);
|
||||
|
||||
|
||||
static inline PyThreadState*
|
||||
_PyInterpreterState_GetFinalizing(PyInterpreterState *interp) {
|
||||
return (PyThreadState*)_Py_atomic_load_ptr_relaxed(&interp->_finalizing);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
_PyInterpreterState_GetFinalizingID(PyInterpreterState *interp) {
|
||||
return _Py_atomic_load_ulong_relaxed(&interp->_finalizing_id);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyInterpreterState_SetFinalizing(PyInterpreterState *interp, PyThreadState *tstate) {
|
||||
_Py_atomic_store_ptr_relaxed(&interp->_finalizing, tstate);
|
||||
if (tstate == NULL) {
|
||||
_Py_atomic_store_ulong_relaxed(&interp->_finalizing_id, 0);
|
||||
}
|
||||
else {
|
||||
// XXX Re-enable this assert once gh-109860 is fixed.
|
||||
//assert(tstate->thread_id == PyThread_get_thread_ident());
|
||||
_Py_atomic_store_ulong_relaxed(&interp->_finalizing_id,
|
||||
tstate->thread_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Exports for the _testinternalcapi module.
|
||||
PyAPI_FUNC(int64_t) _PyInterpreterState_ObjectToID(PyObject *);
|
||||
PyAPI_FUNC(PyInterpreterState *) _PyInterpreterState_LookUpID(int64_t);
|
||||
PyAPI_FUNC(PyInterpreterState *) _PyInterpreterState_LookUpIDObject(PyObject *);
|
||||
PyAPI_FUNC(int) _PyInterpreterState_IDInitref(PyInterpreterState *);
|
||||
PyAPI_FUNC(int) _PyInterpreterState_IDIncref(PyInterpreterState *);
|
||||
PyAPI_FUNC(void) _PyInterpreterState_IDDecref(PyInterpreterState *);
|
||||
|
||||
PyAPI_FUNC(int) _PyInterpreterState_IsReady(PyInterpreterState *interp);
|
||||
|
||||
PyAPI_FUNC(long) _PyInterpreterState_GetWhence(PyInterpreterState *interp);
|
||||
extern void _PyInterpreterState_SetWhence(
|
||||
PyInterpreterState *interp,
|
||||
long whence);
|
||||
|
||||
extern const PyConfig* _PyInterpreterState_GetConfig(PyInterpreterState *interp);
|
||||
|
||||
// Get a copy of the current interpreter configuration.
|
||||
//
|
||||
// Return 0 on success. Raise an exception and return -1 on error.
|
||||
//
|
||||
// The caller must initialize 'config', using PyConfig_InitPythonConfig()
|
||||
// for example.
|
||||
//
|
||||
// Python must be preinitialized to call this method.
|
||||
// The caller must hold the GIL.
|
||||
//
|
||||
// Once done with the configuration, PyConfig_Clear() must be called to clear
|
||||
// it.
|
||||
//
|
||||
// Export for '_testinternalcapi' shared extension.
|
||||
PyAPI_FUNC(int) _PyInterpreterState_GetConfigCopy(
|
||||
struct PyConfig *config);
|
||||
|
||||
// Set the configuration of the current interpreter.
|
||||
//
|
||||
// This function should be called during or just after the Python
|
||||
// initialization.
|
||||
//
|
||||
// Update the sys module with the new configuration. If the sys module was
|
||||
// modified directly after the Python initialization, these changes are lost.
|
||||
//
|
||||
// Some configuration like faulthandler or warnoptions can be updated in the
|
||||
// configuration, but don't reconfigure Python (don't enable/disable
|
||||
// faulthandler and don't reconfigure warnings filters).
|
||||
//
|
||||
// Return 0 on success. Raise an exception and return -1 on error.
|
||||
//
|
||||
// The configuration should come from _PyInterpreterState_GetConfigCopy().
|
||||
//
|
||||
// Export for '_testinternalcapi' shared extension.
|
||||
PyAPI_FUNC(int) _PyInterpreterState_SetConfig(
|
||||
const struct PyConfig *config);
|
||||
|
||||
|
||||
/*
|
||||
Runtime Feature Flags
|
||||
|
||||
Each flag indicate whether or not a specific runtime feature
|
||||
is available in a given context. For example, forking the process
|
||||
might not be allowed in the current interpreter (i.e. os.fork() would fail).
|
||||
*/
|
||||
|
||||
/* Set if the interpreter share obmalloc runtime state
|
||||
with the main interpreter. */
|
||||
#define Py_RTFLAGS_USE_MAIN_OBMALLOC (1UL << 5)
|
||||
|
||||
/* Set if import should check a module for subinterpreter support. */
|
||||
#define Py_RTFLAGS_MULTI_INTERP_EXTENSIONS (1UL << 8)
|
||||
|
||||
/* Set if threads are allowed. */
|
||||
#define Py_RTFLAGS_THREADS (1UL << 10)
|
||||
|
||||
/* Set if daemon threads are allowed. */
|
||||
#define Py_RTFLAGS_DAEMON_THREADS (1UL << 11)
|
||||
|
||||
/* Set if os.fork() is allowed. */
|
||||
#define Py_RTFLAGS_FORK (1UL << 15)
|
||||
|
||||
/* Set if os.exec*() is allowed. */
|
||||
#define Py_RTFLAGS_EXEC (1UL << 16)
|
||||
|
||||
extern int _PyInterpreterState_HasFeature(PyInterpreterState *interp,
|
||||
unsigned long feature);
|
||||
|
||||
PyAPI_FUNC(PyStatus) _PyInterpreterState_New(
|
||||
PyThreadState *tstate,
|
||||
PyInterpreterState **pinterp);
|
||||
|
||||
|
||||
#define RARE_EVENT_INTERP_INC(interp, name) \
|
||||
do { \
|
||||
/* saturating add */ \
|
||||
int val = FT_ATOMIC_LOAD_UINT8_RELAXED(interp->rare_events.name); \
|
||||
if (val < UINT8_MAX) { \
|
||||
FT_ATOMIC_STORE_UINT8(interp->rare_events.name, val + 1); \
|
||||
} \
|
||||
RARE_EVENT_STAT_INC(name); \
|
||||
} while (0); \
|
||||
|
||||
#define RARE_EVENT_INC(name) \
|
||||
do { \
|
||||
PyInterpreterState *interp = PyInterpreterState_Get(); \
|
||||
RARE_EVENT_INTERP_INC(interp, name); \
|
||||
} while (0); \
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_INTERP_H */
|
||||
51
Dependencies/Python/include/internal/pycore_intrinsics.h
vendored
Normal file
51
Dependencies/Python/include/internal/pycore_intrinsics.h
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
#ifndef Py_INTERNAL_INTRINSIC_H
|
||||
#define Py_INTERNAL_INTRINSIC_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
/* Unary Functions: */
|
||||
#define INTRINSIC_1_INVALID 0
|
||||
#define INTRINSIC_PRINT 1
|
||||
#define INTRINSIC_IMPORT_STAR 2
|
||||
#define INTRINSIC_STOPITERATION_ERROR 3
|
||||
#define INTRINSIC_ASYNC_GEN_WRAP 4
|
||||
#define INTRINSIC_UNARY_POSITIVE 5
|
||||
#define INTRINSIC_LIST_TO_TUPLE 6
|
||||
#define INTRINSIC_TYPEVAR 7
|
||||
#define INTRINSIC_PARAMSPEC 8
|
||||
#define INTRINSIC_TYPEVARTUPLE 9
|
||||
#define INTRINSIC_SUBSCRIPT_GENERIC 10
|
||||
#define INTRINSIC_TYPEALIAS 11
|
||||
|
||||
#define MAX_INTRINSIC_1 11
|
||||
|
||||
|
||||
/* Binary Functions: */
|
||||
#define INTRINSIC_2_INVALID 0
|
||||
#define INTRINSIC_PREP_RERAISE_STAR 1
|
||||
#define INTRINSIC_TYPEVAR_WITH_BOUND 2
|
||||
#define INTRINSIC_TYPEVAR_WITH_CONSTRAINTS 3
|
||||
#define INTRINSIC_SET_FUNCTION_TYPE_PARAMS 4
|
||||
#define INTRINSIC_SET_TYPEPARAM_DEFAULT 5
|
||||
|
||||
#define MAX_INTRINSIC_2 5
|
||||
|
||||
typedef PyObject *(*intrinsic_func1)(PyThreadState* tstate, PyObject *value);
|
||||
typedef PyObject *(*intrinsic_func2)(PyThreadState* tstate, PyObject *value1, PyObject *value2);
|
||||
|
||||
typedef struct {
|
||||
intrinsic_func1 func;
|
||||
const char *name;
|
||||
} intrinsic_func1_info;
|
||||
|
||||
typedef struct {
|
||||
intrinsic_func2 func;
|
||||
const char *name;
|
||||
} intrinsic_func2_info;
|
||||
|
||||
PyAPI_DATA(const intrinsic_func1_info) _PyIntrinsics_UnaryFunctions[];
|
||||
PyAPI_DATA(const intrinsic_func2_info) _PyIntrinsics_BinaryFunctions[];
|
||||
|
||||
#endif // !Py_INTERNAL_INTRINSIC_H
|
||||
25
Dependencies/Python/include/internal/pycore_jit.h
vendored
Normal file
25
Dependencies/Python/include/internal/pycore_jit.h
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
#ifndef Py_INTERNAL_JIT_H
|
||||
#define Py_INTERNAL_JIT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef _Py_JIT
|
||||
|
||||
typedef _Py_CODEUNIT *(*jit_func)(_PyInterpreterFrame *frame, PyObject **stack_pointer, PyThreadState *tstate);
|
||||
|
||||
int _PyJIT_Compile(_PyExecutorObject *executor, const _PyUOpInstruction *trace, size_t length);
|
||||
void _PyJIT_Free(_PyExecutorObject *executor);
|
||||
|
||||
#endif // _Py_JIT
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // !Py_INTERNAL_JIT_H
|
||||
66
Dependencies/Python/include/internal/pycore_list.h
vendored
Normal file
66
Dependencies/Python/include/internal/pycore_list.h
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
#ifndef Py_INTERNAL_LIST_H
|
||||
#define Py_INTERNAL_LIST_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_freelist.h" // _PyFreeListState
|
||||
|
||||
PyAPI_FUNC(PyObject*) _PyList_Extend(PyListObject *, PyObject *);
|
||||
extern void _PyList_DebugMallocStats(FILE *out);
|
||||
|
||||
#define _PyList_ITEMS(op) _Py_RVALUE(_PyList_CAST(op)->ob_item)
|
||||
|
||||
PyAPI_FUNC(int)
|
||||
_PyList_AppendTakeRefListResize(PyListObject *self, PyObject *newitem);
|
||||
|
||||
// In free-threaded build: self should be locked by the caller, if it should be thread-safe.
|
||||
static inline int
|
||||
_PyList_AppendTakeRef(PyListObject *self, PyObject *newitem)
|
||||
{
|
||||
assert(self != NULL && newitem != NULL);
|
||||
assert(PyList_Check(self));
|
||||
Py_ssize_t len = Py_SIZE(self);
|
||||
Py_ssize_t allocated = self->allocated;
|
||||
assert((size_t)len + 1 < PY_SSIZE_T_MAX);
|
||||
if (allocated > len) {
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_Py_atomic_store_ptr_release(&self->ob_item[len], newitem);
|
||||
#else
|
||||
PyList_SET_ITEM(self, len, newitem);
|
||||
#endif
|
||||
Py_SET_SIZE(self, len + 1);
|
||||
return 0;
|
||||
}
|
||||
return _PyList_AppendTakeRefListResize(self, newitem);
|
||||
}
|
||||
|
||||
// Repeat the bytes of a buffer in place
|
||||
static inline void
|
||||
_Py_memory_repeat(char* dest, Py_ssize_t len_dest, Py_ssize_t len_src)
|
||||
{
|
||||
assert(len_src > 0);
|
||||
Py_ssize_t copied = len_src;
|
||||
while (copied < len_dest) {
|
||||
Py_ssize_t bytes_to_copy = Py_MIN(copied, len_dest - copied);
|
||||
memcpy(dest + copied, dest, bytes_to_copy);
|
||||
copied += bytes_to_copy;
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
Py_ssize_t it_index;
|
||||
PyListObject *it_seq; /* Set to NULL when iterator is exhausted */
|
||||
} _PyListIterObject;
|
||||
|
||||
PyAPI_FUNC(PyObject *)_PyList_FromArraySteal(PyObject *const *src, Py_ssize_t n);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_LIST_H */
|
||||
106
Dependencies/Python/include/internal/pycore_llist.h
vendored
Normal file
106
Dependencies/Python/include/internal/pycore_llist.h
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
// A doubly-linked list that can be embedded in a struct.
|
||||
//
|
||||
// Usage:
|
||||
// struct llist_node head = LLIST_INIT(head);
|
||||
// typedef struct {
|
||||
// ...
|
||||
// struct llist_node node;
|
||||
// ...
|
||||
// } MyObj;
|
||||
//
|
||||
// llist_insert_tail(&head, &obj->node);
|
||||
// llist_remove(&obj->node);
|
||||
//
|
||||
// struct llist_node *node;
|
||||
// llist_for_each(node, &head) {
|
||||
// MyObj *obj = llist_data(node, MyObj, node);
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
|
||||
#ifndef Py_INTERNAL_LLIST_H
|
||||
#define Py_INTERNAL_LLIST_H
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "Py_BUILD_CORE must be defined to include this header"
|
||||
#endif
|
||||
|
||||
struct llist_node {
|
||||
struct llist_node *next;
|
||||
struct llist_node *prev;
|
||||
};
|
||||
|
||||
// Get the struct containing a node.
|
||||
#define llist_data(node, type, member) (_Py_CONTAINER_OF(node, type, member))
|
||||
|
||||
// Iterate over a list.
|
||||
#define llist_for_each(node, head) \
|
||||
for (node = (head)->next; node != (head); node = node->next)
|
||||
|
||||
// Iterate over a list, but allow removal of the current node.
|
||||
#define llist_for_each_safe(node, head) \
|
||||
for (struct llist_node *_next = (node = (head)->next, node->next); \
|
||||
node != (head); node = _next, _next = node->next)
|
||||
|
||||
#define LLIST_INIT(head) { &head, &head }
|
||||
|
||||
static inline void
|
||||
llist_init(struct llist_node *head)
|
||||
{
|
||||
head->next = head;
|
||||
head->prev = head;
|
||||
}
|
||||
|
||||
// Returns 1 if the list is empty, 0 otherwise.
|
||||
static inline int
|
||||
llist_empty(struct llist_node *head)
|
||||
{
|
||||
return head->next == head;
|
||||
}
|
||||
|
||||
// Appends to the tail of the list.
|
||||
static inline void
|
||||
llist_insert_tail(struct llist_node *head, struct llist_node *node)
|
||||
{
|
||||
node->prev = head->prev;
|
||||
node->next = head;
|
||||
head->prev->next = node;
|
||||
head->prev = node;
|
||||
}
|
||||
|
||||
// Remove a node from the list.
|
||||
static inline void
|
||||
llist_remove(struct llist_node *node)
|
||||
{
|
||||
struct llist_node *prev = node->prev;
|
||||
struct llist_node *next = node->next;
|
||||
prev->next = next;
|
||||
next->prev = prev;
|
||||
node->prev = NULL;
|
||||
node->next = NULL;
|
||||
}
|
||||
|
||||
// Append all nodes from head2 onto head1. head2 is left empty.
|
||||
static inline void
|
||||
llist_concat(struct llist_node *head1, struct llist_node *head2)
|
||||
{
|
||||
if (!llist_empty(head2)) {
|
||||
head1->prev->next = head2->next;
|
||||
head2->next->prev = head1->prev;
|
||||
|
||||
head1->prev = head2->prev;
|
||||
head2->prev->next = head1;
|
||||
llist_init(head2);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_LLIST_H */
|
||||
241
Dependencies/Python/include/internal/pycore_lock.h
vendored
Normal file
241
Dependencies/Python/include/internal/pycore_lock.h
vendored
Normal file
@@ -0,0 +1,241 @@
|
||||
// Lightweight locks and other synchronization mechanisms.
|
||||
//
|
||||
// These implementations are based on WebKit's WTF::Lock. See
|
||||
// https://webkit.org/blog/6161/locking-in-webkit/ for a description of the
|
||||
// design.
|
||||
#ifndef Py_INTERNAL_LOCK_H
|
||||
#define Py_INTERNAL_LOCK_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
//_Py_UNLOCKED is defined as 0 and _Py_LOCKED as 1 in Include/cpython/lock.h
|
||||
#define _Py_HAS_PARKED 2
|
||||
#define _Py_ONCE_INITIALIZED 4
|
||||
|
||||
static inline int
|
||||
PyMutex_LockFast(uint8_t *lock_bits)
|
||||
{
|
||||
uint8_t expected = _Py_UNLOCKED;
|
||||
return _Py_atomic_compare_exchange_uint8(lock_bits, &expected, _Py_LOCKED);
|
||||
}
|
||||
|
||||
// Checks if the mutex is currently locked.
|
||||
static inline int
|
||||
PyMutex_IsLocked(PyMutex *m)
|
||||
{
|
||||
return (_Py_atomic_load_uint8(&m->_bits) & _Py_LOCKED) != 0;
|
||||
}
|
||||
|
||||
// Re-initializes the mutex after a fork to the unlocked state.
|
||||
static inline void
|
||||
_PyMutex_at_fork_reinit(PyMutex *m)
|
||||
{
|
||||
memset(m, 0, sizeof(*m));
|
||||
}
|
||||
|
||||
typedef enum _PyLockFlags {
|
||||
// Do not detach/release the GIL when waiting on the lock.
|
||||
_Py_LOCK_DONT_DETACH = 0,
|
||||
|
||||
// Detach/release the GIL while waiting on the lock.
|
||||
_PY_LOCK_DETACH = 1,
|
||||
|
||||
// Handle signals if interrupted while waiting on the lock.
|
||||
_PY_LOCK_HANDLE_SIGNALS = 2,
|
||||
} _PyLockFlags;
|
||||
|
||||
// Lock a mutex with an optional timeout and additional options. See
|
||||
// _PyLockFlags for details.
|
||||
extern PyLockStatus
|
||||
_PyMutex_LockTimed(PyMutex *m, PyTime_t timeout_ns, _PyLockFlags flags);
|
||||
|
||||
// Lock a mutex with aditional options. See _PyLockFlags for details.
|
||||
static inline void
|
||||
PyMutex_LockFlags(PyMutex *m, _PyLockFlags flags)
|
||||
{
|
||||
uint8_t expected = _Py_UNLOCKED;
|
||||
if (!_Py_atomic_compare_exchange_uint8(&m->_bits, &expected, _Py_LOCKED)) {
|
||||
_PyMutex_LockTimed(m, -1, flags);
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock a mutex, returns 0 if the mutex is not locked (used for improved
|
||||
// error messages).
|
||||
extern int _PyMutex_TryUnlock(PyMutex *m);
|
||||
|
||||
|
||||
// PyEvent is a one-time event notification
|
||||
typedef struct {
|
||||
uint8_t v;
|
||||
} PyEvent;
|
||||
|
||||
// Check if the event is set without blocking. Returns 1 if the event is set or
|
||||
// 0 otherwise.
|
||||
PyAPI_FUNC(int) _PyEvent_IsSet(PyEvent *evt);
|
||||
|
||||
// Set the event and notify any waiting threads.
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(void) _PyEvent_Notify(PyEvent *evt);
|
||||
|
||||
// Wait for the event to be set. If the event is already set, then this returns
|
||||
// immediately.
|
||||
PyAPI_FUNC(void) PyEvent_Wait(PyEvent *evt);
|
||||
|
||||
// Wait for the event to be set, or until the timeout expires. If the event is
|
||||
// already set, then this returns immediately. Returns 1 if the event was set,
|
||||
// and 0 if the timeout expired or thread was interrupted. If `detach` is
|
||||
// true, then the thread will detach/release the GIL while waiting.
|
||||
PyAPI_FUNC(int)
|
||||
PyEvent_WaitTimed(PyEvent *evt, PyTime_t timeout_ns, int detach);
|
||||
|
||||
// _PyRawMutex implements a word-sized mutex that that does not depend on the
|
||||
// parking lot API, and therefore can be used in the parking lot
|
||||
// implementation.
|
||||
//
|
||||
// The mutex uses a packed representation: the least significant bit is used to
|
||||
// indicate whether the mutex is locked or not. The remaining bits are either
|
||||
// zero or a pointer to a `struct raw_mutex_entry` (see lock.c).
|
||||
typedef struct {
|
||||
uintptr_t v;
|
||||
} _PyRawMutex;
|
||||
|
||||
// Slow paths for lock/unlock
|
||||
extern void _PyRawMutex_LockSlow(_PyRawMutex *m);
|
||||
extern void _PyRawMutex_UnlockSlow(_PyRawMutex *m);
|
||||
|
||||
static inline void
|
||||
_PyRawMutex_Lock(_PyRawMutex *m)
|
||||
{
|
||||
uintptr_t unlocked = _Py_UNLOCKED;
|
||||
if (_Py_atomic_compare_exchange_uintptr(&m->v, &unlocked, _Py_LOCKED)) {
|
||||
return;
|
||||
}
|
||||
_PyRawMutex_LockSlow(m);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyRawMutex_Unlock(_PyRawMutex *m)
|
||||
{
|
||||
uintptr_t locked = _Py_LOCKED;
|
||||
if (_Py_atomic_compare_exchange_uintptr(&m->v, &locked, _Py_UNLOCKED)) {
|
||||
return;
|
||||
}
|
||||
_PyRawMutex_UnlockSlow(m);
|
||||
}
|
||||
|
||||
// Type signature for one-time initialization functions. The function should
|
||||
// return 0 on success and -1 on failure.
|
||||
typedef int _Py_once_fn_t(void *arg);
|
||||
|
||||
// (private) slow path for one time initialization
|
||||
PyAPI_FUNC(int)
|
||||
_PyOnceFlag_CallOnceSlow(_PyOnceFlag *flag, _Py_once_fn_t *fn, void *arg);
|
||||
|
||||
// Calls `fn` once using `flag`. The `arg` is passed to the call to `fn`.
|
||||
//
|
||||
// Returns 0 on success and -1 on failure.
|
||||
//
|
||||
// If `fn` returns 0 (success), then subsequent calls immediately return 0.
|
||||
// If `fn` returns -1 (failure), then subsequent calls will retry the call.
|
||||
static inline int
|
||||
_PyOnceFlag_CallOnce(_PyOnceFlag *flag, _Py_once_fn_t *fn, void *arg)
|
||||
{
|
||||
if (_Py_atomic_load_uint8(&flag->v) == _Py_ONCE_INITIALIZED) {
|
||||
return 0;
|
||||
}
|
||||
return _PyOnceFlag_CallOnceSlow(flag, fn, arg);
|
||||
}
|
||||
|
||||
// A recursive mutex. The mutex should zero-initialized.
|
||||
typedef struct {
|
||||
PyMutex mutex;
|
||||
unsigned long long thread; // i.e., PyThread_get_thread_ident_ex()
|
||||
size_t level;
|
||||
} _PyRecursiveMutex;
|
||||
|
||||
PyAPI_FUNC(int) _PyRecursiveMutex_IsLockedByCurrentThread(_PyRecursiveMutex *m);
|
||||
PyAPI_FUNC(void) _PyRecursiveMutex_Lock(_PyRecursiveMutex *m);
|
||||
PyAPI_FUNC(void) _PyRecursiveMutex_Unlock(_PyRecursiveMutex *m);
|
||||
|
||||
|
||||
// A readers-writer (RW) lock. The lock supports multiple concurrent readers or
|
||||
// a single writer. The lock is write-preferring: if a writer is waiting while
|
||||
// the lock is read-locked then, new readers will be blocked. This avoids
|
||||
// starvation of writers.
|
||||
//
|
||||
// In C++, the equivalent synchronization primitive is std::shared_mutex
|
||||
// with shared ("read") and exclusive ("write") locking.
|
||||
//
|
||||
// The two least significant bits are used to indicate if the lock is
|
||||
// write-locked and if there are parked threads (either readers or writers)
|
||||
// waiting to acquire the lock. The remaining bits are used to indicate the
|
||||
// number of readers holding the lock.
|
||||
//
|
||||
// 0b000..00000: unlocked
|
||||
// 0bnnn..nnn00: nnn..nnn readers holding the lock
|
||||
// 0bnnn..nnn10: nnn..nnn readers holding the lock and a writer is waiting
|
||||
// 0b00000..010: unlocked with awoken writer about to acquire lock
|
||||
// 0b00000..001: write-locked
|
||||
// 0b00000..011: write-locked and readers or other writers are waiting
|
||||
//
|
||||
// Note that reader_count must be zero if the lock is held by a writer, and
|
||||
// vice versa. The lock can only be held by readers or a writer, but not both.
|
||||
//
|
||||
// The design is optimized for simplicity of the implementation. The lock is
|
||||
// not fair: if fairness is desired, use an additional PyMutex to serialize
|
||||
// writers. The lock is also not reentrant.
|
||||
typedef struct {
|
||||
uintptr_t bits;
|
||||
} _PyRWMutex;
|
||||
|
||||
// Read lock (i.e., shared lock)
|
||||
PyAPI_FUNC(void) _PyRWMutex_RLock(_PyRWMutex *rwmutex);
|
||||
PyAPI_FUNC(void) _PyRWMutex_RUnlock(_PyRWMutex *rwmutex);
|
||||
|
||||
// Write lock (i.e., exclusive lock)
|
||||
PyAPI_FUNC(void) _PyRWMutex_Lock(_PyRWMutex *rwmutex);
|
||||
PyAPI_FUNC(void) _PyRWMutex_Unlock(_PyRWMutex *rwmutex);
|
||||
|
||||
// Similar to linux seqlock: https://en.wikipedia.org/wiki/Seqlock
|
||||
// We use a sequence number to lock the writer, an even sequence means we're unlocked, an odd
|
||||
// sequence means we're locked. Readers will read the sequence before attempting to read the
|
||||
// underlying data and then read the sequence number again after reading the data. If the
|
||||
// sequence has not changed the data is valid.
|
||||
//
|
||||
// Differs a little bit in that we use CAS on sequence as the lock, instead of a separate spin lock.
|
||||
// The writer can also detect that the undelering data has not changed and abandon the write
|
||||
// and restore the previous sequence.
|
||||
typedef struct {
|
||||
uint32_t sequence;
|
||||
} _PySeqLock;
|
||||
|
||||
// Lock the sequence lock for the writer
|
||||
PyAPI_FUNC(void) _PySeqLock_LockWrite(_PySeqLock *seqlock);
|
||||
|
||||
// Unlock the sequence lock and move to the next sequence number.
|
||||
PyAPI_FUNC(void) _PySeqLock_UnlockWrite(_PySeqLock *seqlock);
|
||||
|
||||
// Abandon the current update indicating that no mutations have occurred
|
||||
// and restore the previous sequence value.
|
||||
PyAPI_FUNC(void) _PySeqLock_AbandonWrite(_PySeqLock *seqlock);
|
||||
|
||||
// Begin a read operation and return the current sequence number.
|
||||
PyAPI_FUNC(uint32_t) _PySeqLock_BeginRead(_PySeqLock *seqlock);
|
||||
|
||||
// End the read operation and confirm that the sequence number has not changed.
|
||||
// Returns 1 if the read was successful or 0 if the read should be retried.
|
||||
PyAPI_FUNC(int) _PySeqLock_EndRead(_PySeqLock *seqlock, uint32_t previous);
|
||||
|
||||
// Check if the lock was held during a fork and clear the lock. Returns 1
|
||||
// if the lock was held and any associated data should be cleared.
|
||||
PyAPI_FUNC(int) _PySeqLock_AfterFork(_PySeqLock *seqlock);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_LOCK_H */
|
||||
310
Dependencies/Python/include/internal/pycore_long.h
vendored
Normal file
310
Dependencies/Python/include/internal/pycore_long.h
vendored
Normal file
@@ -0,0 +1,310 @@
|
||||
#ifndef Py_INTERNAL_LONG_H
|
||||
#define Py_INTERNAL_LONG_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_bytesobject.h" // _PyBytesWriter
|
||||
#include "pycore_global_objects.h"// _PY_NSMALLNEGINTS
|
||||
#include "pycore_runtime.h" // _PyRuntime
|
||||
|
||||
/*
|
||||
* Default int base conversion size limitation: Denial of Service prevention.
|
||||
*
|
||||
* Chosen such that this isn't wildly slow on modern hardware and so that
|
||||
* everyone's existing deployed numpy test suite passes before
|
||||
* https://github.com/numpy/numpy/issues/22098 is widely available.
|
||||
*
|
||||
* $ python -m timeit -s 's = "1"*4300' 'int(s)'
|
||||
* 2000 loops, best of 5: 125 usec per loop
|
||||
* $ python -m timeit -s 's = "1"*4300; v = int(s)' 'str(v)'
|
||||
* 1000 loops, best of 5: 311 usec per loop
|
||||
* (zen2 cloud VM)
|
||||
*
|
||||
* 4300 decimal digits fits a ~14284 bit number.
|
||||
*/
|
||||
#define _PY_LONG_DEFAULT_MAX_STR_DIGITS 4300
|
||||
/*
|
||||
* Threshold for max digits check. For performance reasons int() and
|
||||
* int.__str__() don't checks values that are smaller than this
|
||||
* threshold. Acts as a guaranteed minimum size limit for bignums that
|
||||
* applications can expect from CPython.
|
||||
*
|
||||
* % python -m timeit -s 's = "1"*640; v = int(s)' 'str(int(s))'
|
||||
* 20000 loops, best of 5: 12 usec per loop
|
||||
*
|
||||
* "640 digits should be enough for anyone." - gps
|
||||
* fits a ~2126 bit decimal number.
|
||||
*/
|
||||
#define _PY_LONG_MAX_STR_DIGITS_THRESHOLD 640
|
||||
|
||||
#if ((_PY_LONG_DEFAULT_MAX_STR_DIGITS != 0) && \
|
||||
(_PY_LONG_DEFAULT_MAX_STR_DIGITS < _PY_LONG_MAX_STR_DIGITS_THRESHOLD))
|
||||
# error "_PY_LONG_DEFAULT_MAX_STR_DIGITS smaller than threshold."
|
||||
#endif
|
||||
|
||||
/* runtime lifecycle */
|
||||
|
||||
extern PyStatus _PyLong_InitTypes(PyInterpreterState *);
|
||||
extern void _PyLong_FiniTypes(PyInterpreterState *interp);
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
#define _PyLong_SMALL_INTS _Py_SINGLETON(small_ints)
|
||||
|
||||
// _PyLong_GetZero() and _PyLong_GetOne() must always be available
|
||||
// _PyLong_FromUnsignedChar must always be available
|
||||
#if _PY_NSMALLPOSINTS < 257
|
||||
# error "_PY_NSMALLPOSINTS must be greater than or equal to 257"
|
||||
#endif
|
||||
|
||||
// Return a reference to the immortal zero singleton.
|
||||
// The function cannot return NULL.
|
||||
static inline PyObject* _PyLong_GetZero(void)
|
||||
{ return (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS]; }
|
||||
|
||||
// Return a reference to the immortal one singleton.
|
||||
// The function cannot return NULL.
|
||||
static inline PyObject* _PyLong_GetOne(void)
|
||||
{ return (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS+1]; }
|
||||
|
||||
static inline PyObject* _PyLong_FromUnsignedChar(unsigned char i)
|
||||
{
|
||||
return (PyObject *)&_PyLong_SMALL_INTS[_PY_NSMALLNEGINTS+i];
|
||||
}
|
||||
|
||||
// _PyLong_Frexp returns a double x and an exponent e such that the
|
||||
// true value is approximately equal to x * 2**e. e is >= 0. x is
|
||||
// 0.0 if and only if the input is 0 (in which case, e and x are both
|
||||
// zeroes); otherwise, 0.5 <= abs(x) < 1.0. On overflow, which is
|
||||
// possible if the number of bits doesn't fit into a Py_ssize_t, sets
|
||||
// OverflowError and returns -1.0 for x, 0 for e.
|
||||
//
|
||||
// Export for 'math' shared extension
|
||||
PyAPI_DATA(double) _PyLong_Frexp(PyLongObject *a, Py_ssize_t *e);
|
||||
|
||||
extern PyObject* _PyLong_FromBytes(const char *, Py_ssize_t, int);
|
||||
|
||||
// _PyLong_DivmodNear. Given integers a and b, compute the nearest
|
||||
// integer q to the exact quotient a / b, rounding to the nearest even integer
|
||||
// in the case of a tie. Return (q, r), where r = a - q*b. The remainder r
|
||||
// will satisfy abs(r) <= abs(b)/2, with equality possible only if q is
|
||||
// even.
|
||||
//
|
||||
// Export for '_datetime' shared extension.
|
||||
PyAPI_DATA(PyObject*) _PyLong_DivmodNear(PyObject *, PyObject *);
|
||||
|
||||
// _PyLong_Format: Convert the long to a string object with given base,
|
||||
// appending a base prefix of 0[box] if base is 2, 8 or 16.
|
||||
// Export for '_tkinter' shared extension.
|
||||
PyAPI_DATA(PyObject*) _PyLong_Format(PyObject *obj, int base);
|
||||
|
||||
// Export for 'math' shared extension
|
||||
PyAPI_DATA(PyObject*) _PyLong_Rshift(PyObject *, size_t);
|
||||
|
||||
// Export for 'math' shared extension
|
||||
PyAPI_DATA(PyObject*) _PyLong_Lshift(PyObject *, size_t);
|
||||
|
||||
PyAPI_FUNC(PyObject*) _PyLong_Add(PyLongObject *left, PyLongObject *right);
|
||||
PyAPI_FUNC(PyObject*) _PyLong_Multiply(PyLongObject *left, PyLongObject *right);
|
||||
PyAPI_FUNC(PyObject*) _PyLong_Subtract(PyLongObject *left, PyLongObject *right);
|
||||
|
||||
// Export for 'binascii' shared extension.
|
||||
PyAPI_DATA(unsigned char) _PyLong_DigitValue[256];
|
||||
|
||||
/* Format the object based on the format_spec, as defined in PEP 3101
|
||||
(Advanced String Formatting). */
|
||||
extern int _PyLong_FormatAdvancedWriter(
|
||||
_PyUnicodeWriter *writer,
|
||||
PyObject *obj,
|
||||
PyObject *format_spec,
|
||||
Py_ssize_t start,
|
||||
Py_ssize_t end);
|
||||
|
||||
extern int _PyLong_FormatWriter(
|
||||
_PyUnicodeWriter *writer,
|
||||
PyObject *obj,
|
||||
int base,
|
||||
int alternate);
|
||||
|
||||
extern char* _PyLong_FormatBytesWriter(
|
||||
_PyBytesWriter *writer,
|
||||
char *str,
|
||||
PyObject *obj,
|
||||
int base,
|
||||
int alternate);
|
||||
|
||||
// Argument converters used by Argument Clinic
|
||||
|
||||
// Export for 'select' shared extension (Argument Clinic code)
|
||||
PyAPI_FUNC(int) _PyLong_UnsignedShort_Converter(PyObject *, void *);
|
||||
|
||||
// Export for '_testclinic' shared extension (Argument Clinic code)
|
||||
PyAPI_FUNC(int) _PyLong_UnsignedInt_Converter(PyObject *, void *);
|
||||
|
||||
// Export for '_blake2' shared extension (Argument Clinic code)
|
||||
PyAPI_FUNC(int) _PyLong_UnsignedLong_Converter(PyObject *, void *);
|
||||
|
||||
// Export for '_blake2' shared extension (Argument Clinic code)
|
||||
PyAPI_FUNC(int) _PyLong_UnsignedLongLong_Converter(PyObject *, void *);
|
||||
|
||||
// Export for '_testclinic' shared extension (Argument Clinic code)
|
||||
PyAPI_FUNC(int) _PyLong_Size_t_Converter(PyObject *, void *);
|
||||
|
||||
/* Long value tag bits:
|
||||
* 0-1: Sign bits value = (1-sign), ie. negative=2, positive=0, zero=1.
|
||||
* 2: Reserved for immortality bit
|
||||
* 3+ Unsigned digit count
|
||||
*/
|
||||
#define SIGN_MASK 3
|
||||
#define SIGN_ZERO 1
|
||||
#define SIGN_NEGATIVE 2
|
||||
#define NON_SIZE_BITS 3
|
||||
|
||||
/* The functions _PyLong_IsCompact and _PyLong_CompactValue are defined
|
||||
* in Include/cpython/longobject.h, since they need to be inline.
|
||||
*
|
||||
* "Compact" values have at least one bit to spare,
|
||||
* so that addition and subtraction can be performed on the values
|
||||
* without risk of overflow.
|
||||
*
|
||||
* The inline functions need tag bits.
|
||||
* For readability, rather than do `#define SIGN_MASK _PyLong_SIGN_MASK`
|
||||
* we define them to the numbers in both places and then assert that
|
||||
* they're the same.
|
||||
*/
|
||||
#if SIGN_MASK != _PyLong_SIGN_MASK
|
||||
# error "SIGN_MASK does not match _PyLong_SIGN_MASK"
|
||||
#endif
|
||||
#if NON_SIZE_BITS != _PyLong_NON_SIZE_BITS
|
||||
# error "NON_SIZE_BITS does not match _PyLong_NON_SIZE_BITS"
|
||||
#endif
|
||||
|
||||
/* All *compact" values are guaranteed to fit into
|
||||
* a Py_ssize_t with at least one bit to spare.
|
||||
* In other words, for 64 bit machines, compact
|
||||
* will be signed 63 (or fewer) bit values
|
||||
*/
|
||||
|
||||
/* Return 1 if the argument is compact int */
|
||||
static inline int
|
||||
_PyLong_IsNonNegativeCompact(const PyLongObject* op) {
|
||||
assert(PyLong_Check(op));
|
||||
return op->long_value.lv_tag <= (1 << NON_SIZE_BITS);
|
||||
}
|
||||
|
||||
|
||||
static inline int
|
||||
_PyLong_BothAreCompact(const PyLongObject* a, const PyLongObject* b) {
|
||||
assert(PyLong_Check(a));
|
||||
assert(PyLong_Check(b));
|
||||
return (a->long_value.lv_tag | b->long_value.lv_tag) < (2 << NON_SIZE_BITS);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
_PyLong_IsZero(const PyLongObject *op)
|
||||
{
|
||||
return (op->long_value.lv_tag & SIGN_MASK) == SIGN_ZERO;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
_PyLong_IsNegative(const PyLongObject *op)
|
||||
{
|
||||
return (op->long_value.lv_tag & SIGN_MASK) == SIGN_NEGATIVE;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
_PyLong_IsPositive(const PyLongObject *op)
|
||||
{
|
||||
return (op->long_value.lv_tag & SIGN_MASK) == 0;
|
||||
}
|
||||
|
||||
static inline Py_ssize_t
|
||||
_PyLong_DigitCount(const PyLongObject *op)
|
||||
{
|
||||
assert(PyLong_Check(op));
|
||||
return op->long_value.lv_tag >> NON_SIZE_BITS;
|
||||
}
|
||||
|
||||
/* Equivalent to _PyLong_DigitCount(op) * _PyLong_NonCompactSign(op) */
|
||||
static inline Py_ssize_t
|
||||
_PyLong_SignedDigitCount(const PyLongObject *op)
|
||||
{
|
||||
assert(PyLong_Check(op));
|
||||
Py_ssize_t sign = 1 - (op->long_value.lv_tag & SIGN_MASK);
|
||||
return sign * (Py_ssize_t)(op->long_value.lv_tag >> NON_SIZE_BITS);
|
||||
}
|
||||
|
||||
static inline int
|
||||
_PyLong_CompactSign(const PyLongObject *op)
|
||||
{
|
||||
assert(PyLong_Check(op));
|
||||
assert(_PyLong_IsCompact(op));
|
||||
return 1 - (op->long_value.lv_tag & SIGN_MASK);
|
||||
}
|
||||
|
||||
static inline int
|
||||
_PyLong_NonCompactSign(const PyLongObject *op)
|
||||
{
|
||||
assert(PyLong_Check(op));
|
||||
assert(!_PyLong_IsCompact(op));
|
||||
return 1 - (op->long_value.lv_tag & SIGN_MASK);
|
||||
}
|
||||
|
||||
/* Do a and b have the same sign? */
|
||||
static inline int
|
||||
_PyLong_SameSign(const PyLongObject *a, const PyLongObject *b)
|
||||
{
|
||||
return (a->long_value.lv_tag & SIGN_MASK) == (b->long_value.lv_tag & SIGN_MASK);
|
||||
}
|
||||
|
||||
#define TAG_FROM_SIGN_AND_SIZE(sign, size) ((1 - (sign)) | ((size) << NON_SIZE_BITS))
|
||||
|
||||
static inline void
|
||||
_PyLong_SetSignAndDigitCount(PyLongObject *op, int sign, Py_ssize_t size)
|
||||
{
|
||||
assert(size >= 0);
|
||||
assert(-1 <= sign && sign <= 1);
|
||||
assert(sign != 0 || size == 0);
|
||||
op->long_value.lv_tag = TAG_FROM_SIGN_AND_SIZE(sign, (size_t)size);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyLong_SetDigitCount(PyLongObject *op, Py_ssize_t size)
|
||||
{
|
||||
assert(size >= 0);
|
||||
op->long_value.lv_tag = (((size_t)size) << NON_SIZE_BITS) | (op->long_value.lv_tag & SIGN_MASK);
|
||||
}
|
||||
|
||||
#define NON_SIZE_MASK ~((1 << NON_SIZE_BITS) - 1)
|
||||
|
||||
static inline void
|
||||
_PyLong_FlipSign(PyLongObject *op) {
|
||||
unsigned int flipped_sign = 2 - (op->long_value.lv_tag & SIGN_MASK);
|
||||
op->long_value.lv_tag &= NON_SIZE_MASK;
|
||||
op->long_value.lv_tag |= flipped_sign;
|
||||
}
|
||||
|
||||
#define _PyLong_DIGIT_INIT(val) \
|
||||
{ \
|
||||
.ob_base = _PyObject_HEAD_INIT(&PyLong_Type), \
|
||||
.long_value = { \
|
||||
.lv_tag = TAG_FROM_SIGN_AND_SIZE( \
|
||||
(val) == 0 ? 0 : ((val) < 0 ? -1 : 1), \
|
||||
(val) == 0 ? 0 : 1), \
|
||||
{ ((val) >= 0 ? (val) : -(val)) }, \
|
||||
} \
|
||||
}
|
||||
|
||||
#define _PyLong_FALSE_TAG TAG_FROM_SIGN_AND_SIZE(0, 0)
|
||||
#define _PyLong_TRUE_TAG TAG_FROM_SIGN_AND_SIZE(1, 1)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_LONG_H */
|
||||
20
Dependencies/Python/include/internal/pycore_memoryobject.h
vendored
Normal file
20
Dependencies/Python/include/internal/pycore_memoryobject.h
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
#ifndef Py_INTERNAL_MEMORYOBJECT_H
|
||||
#define Py_INTERNAL_MEMORYOBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern PyTypeObject _PyManagedBuffer_Type;
|
||||
|
||||
PyObject *
|
||||
_PyMemoryView_FromBufferProc(PyObject *v, int flags,
|
||||
getbufferproc bufferproc);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_MEMORYOBJECT_H */
|
||||
69
Dependencies/Python/include/internal/pycore_mimalloc.h
vendored
Normal file
69
Dependencies/Python/include/internal/pycore_mimalloc.h
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
#ifndef Py_INTERNAL_MIMALLOC_H
|
||||
#define Py_INTERNAL_MIMALLOC_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#if defined(MIMALLOC_H) || defined(MIMALLOC_TYPES_H)
|
||||
# error "pycore_mimalloc.h must be included before mimalloc.h"
|
||||
#endif
|
||||
|
||||
typedef enum {
|
||||
_Py_MIMALLOC_HEAP_MEM = 0, // PyMem_Malloc() and friends
|
||||
_Py_MIMALLOC_HEAP_OBJECT = 1, // non-GC objects
|
||||
_Py_MIMALLOC_HEAP_GC = 2, // GC objects without pre-header
|
||||
_Py_MIMALLOC_HEAP_GC_PRE = 3, // GC objects with pre-header
|
||||
_Py_MIMALLOC_HEAP_COUNT
|
||||
} _Py_mimalloc_heap_id;
|
||||
|
||||
#include "pycore_pymem.h"
|
||||
|
||||
#ifdef WITH_MIMALLOC
|
||||
# ifdef Py_GIL_DISABLED
|
||||
# define MI_PRIM_THREAD_ID _Py_ThreadId
|
||||
# endif
|
||||
# define MI_DEBUG_UNINIT PYMEM_CLEANBYTE
|
||||
# define MI_DEBUG_FREED PYMEM_DEADBYTE
|
||||
# define MI_DEBUG_PADDING PYMEM_FORBIDDENBYTE
|
||||
#ifdef Py_DEBUG
|
||||
# define MI_DEBUG 2
|
||||
#else
|
||||
# define MI_DEBUG 0
|
||||
#endif
|
||||
|
||||
#ifdef _Py_THREAD_SANITIZER
|
||||
# define MI_TSAN 1
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C++" {
|
||||
#endif
|
||||
|
||||
#include "mimalloc/mimalloc.h"
|
||||
#include "mimalloc/mimalloc/types.h"
|
||||
#include "mimalloc/mimalloc/internal.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
struct _mimalloc_interp_state {
|
||||
// When exiting, threads place any segments with live blocks in this
|
||||
// shared pool for other threads to claim and reuse.
|
||||
mi_abandoned_pool_t abandoned_pool;
|
||||
};
|
||||
|
||||
struct _mimalloc_thread_state {
|
||||
mi_heap_t *current_object_heap;
|
||||
mi_heap_t heaps[_Py_MIMALLOC_HEAP_COUNT];
|
||||
mi_tld_t tld;
|
||||
int initialized;
|
||||
struct llist_node page_list;
|
||||
};
|
||||
#endif
|
||||
|
||||
#endif // Py_INTERNAL_MIMALLOC_H
|
||||
107
Dependencies/Python/include/internal/pycore_modsupport.h
vendored
Normal file
107
Dependencies/Python/include/internal/pycore_modsupport.h
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
#ifndef Py_INTERNAL_MODSUPPORT_H
|
||||
#define Py_INTERNAL_MODSUPPORT_H
|
||||
|
||||
#include "pycore_lock.h" // _PyOnceFlag
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
extern int _PyArg_NoKwnames(const char *funcname, PyObject *kwnames);
|
||||
#define _PyArg_NoKwnames(funcname, kwnames) \
|
||||
((kwnames) == NULL || _PyArg_NoKwnames((funcname), (kwnames)))
|
||||
|
||||
// Export for '_bz2' shared extension
|
||||
PyAPI_FUNC(int) _PyArg_NoPositional(const char *funcname, PyObject *args);
|
||||
#define _PyArg_NoPositional(funcname, args) \
|
||||
((args) == NULL || _PyArg_NoPositional((funcname), (args)))
|
||||
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _PyArg_NoKeywords(const char *funcname, PyObject *kwargs);
|
||||
#define _PyArg_NoKeywords(funcname, kwargs) \
|
||||
((kwargs) == NULL || _PyArg_NoKeywords((funcname), (kwargs)))
|
||||
|
||||
// Export for 'zlib' shared extension
|
||||
PyAPI_FUNC(int) _PyArg_CheckPositional(const char *, Py_ssize_t,
|
||||
Py_ssize_t, Py_ssize_t);
|
||||
#define _Py_ANY_VARARGS(n) ((n) == PY_SSIZE_T_MAX)
|
||||
#define _PyArg_CheckPositional(funcname, nargs, min, max) \
|
||||
((!_Py_ANY_VARARGS(max) && (min) <= (nargs) && (nargs) <= (max)) \
|
||||
|| _PyArg_CheckPositional((funcname), (nargs), (min), (max)))
|
||||
|
||||
extern PyObject ** _Py_VaBuildStack(
|
||||
PyObject **small_stack,
|
||||
Py_ssize_t small_stack_len,
|
||||
const char *format,
|
||||
va_list va,
|
||||
Py_ssize_t *p_nargs);
|
||||
|
||||
extern PyObject* _PyModule_CreateInitialized(PyModuleDef*, int apiver);
|
||||
|
||||
// Export for '_curses' shared extension
|
||||
PyAPI_FUNC(int) _PyArg_ParseStack(
|
||||
PyObject *const *args,
|
||||
Py_ssize_t nargs,
|
||||
const char *format,
|
||||
...);
|
||||
|
||||
extern int _PyArg_UnpackStack(
|
||||
PyObject *const *args,
|
||||
Py_ssize_t nargs,
|
||||
const char *name,
|
||||
Py_ssize_t min,
|
||||
Py_ssize_t max,
|
||||
...);
|
||||
|
||||
// Export for '_heapq' shared extension
|
||||
PyAPI_FUNC(void) _PyArg_BadArgument(
|
||||
const char *fname,
|
||||
const char *displayname,
|
||||
const char *expected,
|
||||
PyObject *arg);
|
||||
|
||||
// --- _PyArg_Parser API ---------------------------------------------------
|
||||
|
||||
// Export for '_dbm' shared extension
|
||||
PyAPI_FUNC(int) _PyArg_ParseStackAndKeywords(
|
||||
PyObject *const *args,
|
||||
Py_ssize_t nargs,
|
||||
PyObject *kwnames,
|
||||
struct _PyArg_Parser *,
|
||||
...);
|
||||
|
||||
// Export for 'math' shared extension
|
||||
PyAPI_FUNC(PyObject * const *) _PyArg_UnpackKeywords(
|
||||
PyObject *const *args,
|
||||
Py_ssize_t nargs,
|
||||
PyObject *kwargs,
|
||||
PyObject *kwnames,
|
||||
struct _PyArg_Parser *parser,
|
||||
int minpos,
|
||||
int maxpos,
|
||||
int minkw,
|
||||
PyObject **buf);
|
||||
#define _PyArg_UnpackKeywords(args, nargs, kwargs, kwnames, parser, minpos, maxpos, minkw, buf) \
|
||||
(((minkw) == 0 && (kwargs) == NULL && (kwnames) == NULL && \
|
||||
(minpos) <= (nargs) && (nargs) <= (maxpos) && (args) != NULL) ? (args) : \
|
||||
_PyArg_UnpackKeywords((args), (nargs), (kwargs), (kwnames), (parser), \
|
||||
(minpos), (maxpos), (minkw), (buf)))
|
||||
|
||||
// Export for '_testclinic' shared extension
|
||||
PyAPI_FUNC(PyObject * const *) _PyArg_UnpackKeywordsWithVararg(
|
||||
PyObject *const *args, Py_ssize_t nargs,
|
||||
PyObject *kwargs, PyObject *kwnames,
|
||||
struct _PyArg_Parser *parser,
|
||||
int minpos, int maxpos, int minkw,
|
||||
int vararg, PyObject **buf);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_MODSUPPORT_H
|
||||
|
||||
56
Dependencies/Python/include/internal/pycore_moduleobject.h
vendored
Normal file
56
Dependencies/Python/include/internal/pycore_moduleobject.h
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
#ifndef Py_INTERNAL_MODULEOBJECT_H
|
||||
#define Py_INTERNAL_MODULEOBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern void _PyModule_Clear(PyObject *);
|
||||
extern void _PyModule_ClearDict(PyObject *);
|
||||
extern int _PyModuleSpec_IsInitializing(PyObject *);
|
||||
extern int _PyModuleSpec_GetFileOrigin(PyObject *, PyObject **);
|
||||
extern int _PyModule_IsPossiblyShadowing(PyObject *);
|
||||
|
||||
extern int _PyModule_IsExtension(PyObject *obj);
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
PyObject *md_dict;
|
||||
PyModuleDef *md_def;
|
||||
void *md_state;
|
||||
PyObject *md_weaklist;
|
||||
// for logging purposes after md_dict is cleared
|
||||
PyObject *md_name;
|
||||
#ifdef Py_GIL_DISABLED
|
||||
void *md_gil;
|
||||
#endif
|
||||
} PyModuleObject;
|
||||
|
||||
static inline PyModuleDef* _PyModule_GetDef(PyObject *mod) {
|
||||
assert(PyModule_Check(mod));
|
||||
return ((PyModuleObject *)mod)->md_def;
|
||||
}
|
||||
|
||||
static inline void* _PyModule_GetState(PyObject* mod) {
|
||||
assert(PyModule_Check(mod));
|
||||
return ((PyModuleObject *)mod)->md_state;
|
||||
}
|
||||
|
||||
static inline PyObject* _PyModule_GetDict(PyObject *mod) {
|
||||
assert(PyModule_Check(mod));
|
||||
PyObject *dict = ((PyModuleObject *)mod) -> md_dict;
|
||||
// _PyModule_GetDict(mod) must not be used after calling module_clear(mod)
|
||||
assert(dict != NULL);
|
||||
return dict; // borrowed reference
|
||||
}
|
||||
|
||||
PyObject* _Py_module_getattro_impl(PyModuleObject *m, PyObject *name, int suppress);
|
||||
PyObject* _Py_module_getattro(PyModuleObject *m, PyObject *name);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_MODULEOBJECT_H */
|
||||
21
Dependencies/Python/include/internal/pycore_namespace.h
vendored
Normal file
21
Dependencies/Python/include/internal/pycore_namespace.h
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
// Simple namespace object interface
|
||||
|
||||
#ifndef Py_INTERNAL_NAMESPACE_H
|
||||
#define Py_INTERNAL_NAMESPACE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern PyTypeObject _PyNamespace_Type;
|
||||
|
||||
// Export for '_testmultiphase' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyNamespace_New(PyObject *kwds);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_NAMESPACE_H
|
||||
850
Dependencies/Python/include/internal/pycore_object.h
vendored
Normal file
850
Dependencies/Python/include/internal/pycore_object.h
vendored
Normal file
@@ -0,0 +1,850 @@
|
||||
#ifndef Py_INTERNAL_OBJECT_H
|
||||
#define Py_INTERNAL_OBJECT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include <stdbool.h>
|
||||
#include "pycore_gc.h" // _PyObject_GC_IS_TRACKED()
|
||||
#include "pycore_emscripten_trampoline.h" // _PyCFunction_TrampolineCall()
|
||||
#include "pycore_interp.h" // PyInterpreterState.gc
|
||||
#include "pycore_pyatomic_ft_wrappers.h" // FT_ATOMIC_STORE_PTR_RELAXED
|
||||
#include "pycore_pystate.h" // _PyInterpreterState_GET()
|
||||
|
||||
|
||||
#define _Py_IMMORTAL_REFCNT_LOOSE ((_Py_IMMORTAL_REFCNT >> 1) + 1)
|
||||
|
||||
// gh-121528, gh-118997: Similar to _Py_IsImmortal() but be more loose when
|
||||
// comparing the reference count to stay compatible with C extensions built
|
||||
// with the stable ABI 3.11 or older. Such extensions implement INCREF/DECREF
|
||||
// as refcnt++ and refcnt-- without taking in account immortal objects. For
|
||||
// example, the reference count of an immortal object can change from
|
||||
// _Py_IMMORTAL_REFCNT to _Py_IMMORTAL_REFCNT+1 (INCREF) or
|
||||
// _Py_IMMORTAL_REFCNT-1 (DECREF).
|
||||
//
|
||||
// This function should only be used in assertions. Otherwise, _Py_IsImmortal()
|
||||
// must be used instead.
|
||||
static inline int _Py_IsImmortalLoose(PyObject *op)
|
||||
{
|
||||
#if defined(Py_GIL_DISABLED)
|
||||
return _Py_IsImmortal(op);
|
||||
#else
|
||||
return (op->ob_refcnt >= _Py_IMMORTAL_REFCNT_LOOSE);
|
||||
#endif
|
||||
}
|
||||
#define _Py_IsImmortalLoose(op) _Py_IsImmortalLoose(_PyObject_CAST(op))
|
||||
|
||||
|
||||
/* Check if an object is consistent. For example, ensure that the reference
|
||||
counter is greater than or equal to 1, and ensure that ob_type is not NULL.
|
||||
|
||||
Call _PyObject_AssertFailed() if the object is inconsistent.
|
||||
|
||||
If check_content is zero, only check header fields: reduce the overhead.
|
||||
|
||||
The function always return 1. The return value is just here to be able to
|
||||
write:
|
||||
|
||||
assert(_PyObject_CheckConsistency(obj, 1)); */
|
||||
extern int _PyObject_CheckConsistency(PyObject *op, int check_content);
|
||||
|
||||
extern void _PyDebugAllocatorStats(FILE *out, const char *block_name,
|
||||
int num_blocks, size_t sizeof_block);
|
||||
|
||||
extern void _PyObject_DebugTypeStats(FILE *out);
|
||||
|
||||
#ifdef Py_TRACE_REFS
|
||||
// Forget a reference registered by _Py_NewReference(). Function called by
|
||||
// _Py_Dealloc().
|
||||
//
|
||||
// On a free list, the function can be used before modifying an object to
|
||||
// remove the object from traced objects. Then _Py_NewReference() or
|
||||
// _Py_NewReferenceNoTotal() should be called again on the object to trace
|
||||
// it again.
|
||||
extern void _Py_ForgetReference(PyObject *);
|
||||
#endif
|
||||
|
||||
// Export for shared _testinternalcapi extension
|
||||
PyAPI_FUNC(int) _PyObject_IsFreed(PyObject *);
|
||||
|
||||
/* We need to maintain an internal copy of Py{Var}Object_HEAD_INIT to avoid
|
||||
designated initializer conflicts in C++20. If we use the deinition in
|
||||
object.h, we will be mixing designated and non-designated initializers in
|
||||
pycore objects which is forbiddent in C++20. However, if we then use
|
||||
designated initializers in object.h then Extensions without designated break.
|
||||
Furthermore, we can't use designated initializers in Extensions since these
|
||||
are not supported pre-C++20. Thus, keeping an internal copy here is the most
|
||||
backwards compatible solution */
|
||||
#if defined(Py_GIL_DISABLED)
|
||||
#define _PyObject_HEAD_INIT(type) \
|
||||
{ \
|
||||
.ob_ref_local = _Py_IMMORTAL_REFCNT_LOCAL, \
|
||||
.ob_type = (type) \
|
||||
}
|
||||
#else
|
||||
#define _PyObject_HEAD_INIT(type) \
|
||||
{ \
|
||||
.ob_refcnt = _Py_IMMORTAL_REFCNT, \
|
||||
.ob_type = (type) \
|
||||
}
|
||||
#endif
|
||||
#define _PyVarObject_HEAD_INIT(type, size) \
|
||||
{ \
|
||||
.ob_base = _PyObject_HEAD_INIT(type), \
|
||||
.ob_size = size \
|
||||
}
|
||||
|
||||
PyAPI_FUNC(void) _Py_NO_RETURN _Py_FatalRefcountErrorFunc(
|
||||
const char *func,
|
||||
const char *message);
|
||||
|
||||
#define _Py_FatalRefcountError(message) \
|
||||
_Py_FatalRefcountErrorFunc(__func__, (message))
|
||||
|
||||
#define _PyReftracerTrack(obj, operation) \
|
||||
do { \
|
||||
struct _reftracer_runtime_state *tracer = &_PyRuntime.ref_tracer; \
|
||||
if (tracer->tracer_func != NULL) { \
|
||||
void *data = tracer->tracer_data; \
|
||||
tracer->tracer_func((obj), (operation), data); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#ifdef Py_REF_DEBUG
|
||||
/* The symbol is only exposed in the API for the sake of extensions
|
||||
built against the pre-3.12 stable ABI. */
|
||||
PyAPI_DATA(Py_ssize_t) _Py_RefTotal;
|
||||
|
||||
extern void _Py_AddRefTotal(PyThreadState *, Py_ssize_t);
|
||||
extern void _Py_IncRefTotal(PyThreadState *);
|
||||
extern void _Py_DecRefTotal(PyThreadState *);
|
||||
|
||||
# define _Py_DEC_REFTOTAL(interp) \
|
||||
interp->object_state.reftotal--
|
||||
#endif
|
||||
|
||||
// Increment reference count by n
|
||||
static inline void _Py_RefcntAdd(PyObject* op, Py_ssize_t n)
|
||||
{
|
||||
if (_Py_IsImmortal(op)) {
|
||||
return;
|
||||
}
|
||||
#ifdef Py_REF_DEBUG
|
||||
_Py_AddRefTotal(_PyThreadState_GET(), n);
|
||||
#endif
|
||||
#if !defined(Py_GIL_DISABLED)
|
||||
op->ob_refcnt += n;
|
||||
#else
|
||||
if (_Py_IsOwnedByCurrentThread(op)) {
|
||||
uint32_t local = op->ob_ref_local;
|
||||
Py_ssize_t refcnt = (Py_ssize_t)local + n;
|
||||
# if PY_SSIZE_T_MAX > UINT32_MAX
|
||||
if (refcnt > (Py_ssize_t)UINT32_MAX) {
|
||||
// Make the object immortal if the 32-bit local reference count
|
||||
// would overflow.
|
||||
refcnt = _Py_IMMORTAL_REFCNT_LOCAL;
|
||||
}
|
||||
# endif
|
||||
_Py_atomic_store_uint32_relaxed(&op->ob_ref_local, (uint32_t)refcnt);
|
||||
}
|
||||
else {
|
||||
_Py_atomic_add_ssize(&op->ob_ref_shared, (n << _Py_REF_SHARED_SHIFT));
|
||||
}
|
||||
#endif
|
||||
// Although the ref count was increased by `n` (which may be greater than 1)
|
||||
// it is only a single increment (i.e. addition) operation, so only 1 refcnt
|
||||
// increment operation is counted.
|
||||
_Py_INCREF_STAT_INC();
|
||||
}
|
||||
#define _Py_RefcntAdd(op, n) _Py_RefcntAdd(_PyObject_CAST(op), n)
|
||||
|
||||
extern void _Py_SetImmortal(PyObject *op);
|
||||
extern void _Py_SetImmortalUntracked(PyObject *op);
|
||||
|
||||
// Makes an immortal object mortal again with the specified refcnt. Should only
|
||||
// be used during runtime finalization.
|
||||
static inline void _Py_SetMortal(PyObject *op, Py_ssize_t refcnt)
|
||||
{
|
||||
if (op) {
|
||||
assert(_Py_IsImmortalLoose(op));
|
||||
#ifdef Py_GIL_DISABLED
|
||||
op->ob_tid = _Py_UNOWNED_TID;
|
||||
op->ob_ref_local = 0;
|
||||
op->ob_ref_shared = _Py_REF_SHARED(refcnt, _Py_REF_MERGED);
|
||||
#else
|
||||
op->ob_refcnt = refcnt;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/* _Py_ClearImmortal() should only be used during runtime finalization. */
|
||||
static inline void _Py_ClearImmortal(PyObject *op)
|
||||
{
|
||||
if (op) {
|
||||
_Py_SetMortal(op, 1);
|
||||
Py_DECREF(op);
|
||||
}
|
||||
}
|
||||
#define _Py_ClearImmortal(op) \
|
||||
do { \
|
||||
_Py_ClearImmortal(_PyObject_CAST(op)); \
|
||||
op = NULL; \
|
||||
} while (0)
|
||||
|
||||
// Mark an object as supporting deferred reference counting. This is a no-op
|
||||
// in the default (with GIL) build. Objects that use deferred reference
|
||||
// counting should be tracked by the GC so that they are eventually collected.
|
||||
extern void _PyObject_SetDeferredRefcount(PyObject *op);
|
||||
|
||||
static inline int
|
||||
_PyObject_HasDeferredRefcount(PyObject *op)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
return _PyObject_HAS_GC_BITS(op, _PyGC_BITS_DEFERRED);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if !defined(Py_GIL_DISABLED)
|
||||
static inline void
|
||||
_Py_DECREF_SPECIALIZED(PyObject *op, const destructor destruct)
|
||||
{
|
||||
if (_Py_IsImmortal(op)) {
|
||||
return;
|
||||
}
|
||||
_Py_DECREF_STAT_INC();
|
||||
#ifdef Py_REF_DEBUG
|
||||
_Py_DEC_REFTOTAL(PyInterpreterState_Get());
|
||||
#endif
|
||||
if (--op->ob_refcnt != 0) {
|
||||
assert(op->ob_refcnt > 0);
|
||||
}
|
||||
else {
|
||||
#ifdef Py_TRACE_REFS
|
||||
_Py_ForgetReference(op);
|
||||
#endif
|
||||
_PyReftracerTrack(op, PyRefTracer_DESTROY);
|
||||
destruct(op);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_DECREF_NO_DEALLOC(PyObject *op)
|
||||
{
|
||||
if (_Py_IsImmortal(op)) {
|
||||
return;
|
||||
}
|
||||
_Py_DECREF_STAT_INC();
|
||||
#ifdef Py_REF_DEBUG
|
||||
_Py_DEC_REFTOTAL(PyInterpreterState_Get());
|
||||
#endif
|
||||
op->ob_refcnt--;
|
||||
#ifdef Py_DEBUG
|
||||
if (op->ob_refcnt <= 0) {
|
||||
_Py_FatalRefcountError("Expected a positive remaining refcount");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#else
|
||||
// TODO: implement Py_DECREF specializations for Py_GIL_DISABLED build
|
||||
static inline void
|
||||
_Py_DECREF_SPECIALIZED(PyObject *op, const destructor destruct)
|
||||
{
|
||||
Py_DECREF(op);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_Py_DECREF_NO_DEALLOC(PyObject *op)
|
||||
{
|
||||
Py_DECREF(op);
|
||||
}
|
||||
|
||||
static inline int
|
||||
_Py_REF_IS_MERGED(Py_ssize_t ob_ref_shared)
|
||||
{
|
||||
return (ob_ref_shared & _Py_REF_SHARED_FLAG_MASK) == _Py_REF_MERGED;
|
||||
}
|
||||
|
||||
static inline int
|
||||
_Py_REF_IS_QUEUED(Py_ssize_t ob_ref_shared)
|
||||
{
|
||||
return (ob_ref_shared & _Py_REF_SHARED_FLAG_MASK) == _Py_REF_QUEUED;
|
||||
}
|
||||
|
||||
// Merge the local and shared reference count fields and add `extra` to the
|
||||
// refcount when merging.
|
||||
Py_ssize_t _Py_ExplicitMergeRefcount(PyObject *op, Py_ssize_t extra);
|
||||
#endif // !defined(Py_GIL_DISABLED)
|
||||
|
||||
#ifdef Py_REF_DEBUG
|
||||
# undef _Py_DEC_REFTOTAL
|
||||
#endif
|
||||
|
||||
|
||||
extern int _PyType_CheckConsistency(PyTypeObject *type);
|
||||
extern int _PyDict_CheckConsistency(PyObject *mp, int check_content);
|
||||
|
||||
/* Update the Python traceback of an object. This function must be called
|
||||
when a memory block is reused from a free list.
|
||||
|
||||
Internal function called by _Py_NewReference(). */
|
||||
extern int _PyTraceMalloc_TraceRef(PyObject *op, PyRefTracerEvent event, void*);
|
||||
|
||||
// Fast inlined version of PyType_HasFeature()
|
||||
static inline int
|
||||
_PyType_HasFeature(PyTypeObject *type, unsigned long feature) {
|
||||
return ((FT_ATOMIC_LOAD_ULONG_RELAXED(type->tp_flags) & feature) != 0);
|
||||
}
|
||||
|
||||
extern void _PyType_InitCache(PyInterpreterState *interp);
|
||||
|
||||
extern PyStatus _PyObject_InitState(PyInterpreterState *interp);
|
||||
extern void _PyObject_FiniState(PyInterpreterState *interp);
|
||||
extern bool _PyRefchain_IsTraced(PyInterpreterState *interp, PyObject *obj);
|
||||
|
||||
/* Inline functions trading binary compatibility for speed:
|
||||
_PyObject_Init() is the fast version of PyObject_Init(), and
|
||||
_PyObject_InitVar() is the fast version of PyObject_InitVar().
|
||||
|
||||
These inline functions must not be called with op=NULL. */
|
||||
static inline void
|
||||
_PyObject_Init(PyObject *op, PyTypeObject *typeobj)
|
||||
{
|
||||
assert(op != NULL);
|
||||
Py_SET_TYPE(op, typeobj);
|
||||
assert(_PyType_HasFeature(typeobj, Py_TPFLAGS_HEAPTYPE) || _Py_IsImmortalLoose(typeobj));
|
||||
Py_INCREF(typeobj);
|
||||
_Py_NewReference(op);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyObject_InitVar(PyVarObject *op, PyTypeObject *typeobj, Py_ssize_t size)
|
||||
{
|
||||
assert(op != NULL);
|
||||
assert(typeobj != &PyLong_Type);
|
||||
_PyObject_Init((PyObject *)op, typeobj);
|
||||
Py_SET_SIZE(op, size);
|
||||
}
|
||||
|
||||
|
||||
/* Tell the GC to track this object.
|
||||
*
|
||||
* The object must not be tracked by the GC.
|
||||
*
|
||||
* NB: While the object is tracked by the collector, it must be safe to call the
|
||||
* ob_traverse method.
|
||||
*
|
||||
* Internal note: interp->gc.generation0->_gc_prev doesn't have any bit flags
|
||||
* because it's not object header. So we don't use _PyGCHead_PREV() and
|
||||
* _PyGCHead_SET_PREV() for it to avoid unnecessary bitwise operations.
|
||||
*
|
||||
* See also the public PyObject_GC_Track() function.
|
||||
*/
|
||||
static inline void _PyObject_GC_TRACK(
|
||||
// The preprocessor removes _PyObject_ASSERT_FROM() calls if NDEBUG is defined
|
||||
#ifndef NDEBUG
|
||||
const char *filename, int lineno,
|
||||
#endif
|
||||
PyObject *op)
|
||||
{
|
||||
_PyObject_ASSERT_FROM(op, !_PyObject_GC_IS_TRACKED(op),
|
||||
"object already tracked by the garbage collector",
|
||||
filename, lineno, __func__);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyObject_SET_GC_BITS(op, _PyGC_BITS_TRACKED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
_PyObject_ASSERT_FROM(op,
|
||||
(gc->_gc_prev & _PyGC_PREV_MASK_COLLECTING) == 0,
|
||||
"object is in generation which is garbage collected",
|
||||
filename, lineno, __func__);
|
||||
|
||||
PyInterpreterState *interp = _PyInterpreterState_GET();
|
||||
PyGC_Head *generation0 = interp->gc.generation0;
|
||||
PyGC_Head *last = (PyGC_Head*)(generation0->_gc_prev);
|
||||
_PyGCHead_SET_NEXT(last, gc);
|
||||
_PyGCHead_SET_PREV(gc, last);
|
||||
_PyGCHead_SET_NEXT(gc, generation0);
|
||||
generation0->_gc_prev = (uintptr_t)gc;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Tell the GC to stop tracking this object.
|
||||
*
|
||||
* Internal note: This may be called while GC. So _PyGC_PREV_MASK_COLLECTING
|
||||
* must be cleared. But _PyGC_PREV_MASK_FINALIZED bit is kept.
|
||||
*
|
||||
* The object must be tracked by the GC.
|
||||
*
|
||||
* See also the public PyObject_GC_UnTrack() which accept an object which is
|
||||
* not tracked.
|
||||
*/
|
||||
static inline void _PyObject_GC_UNTRACK(
|
||||
// The preprocessor removes _PyObject_ASSERT_FROM() calls if NDEBUG is defined
|
||||
#ifndef NDEBUG
|
||||
const char *filename, int lineno,
|
||||
#endif
|
||||
PyObject *op)
|
||||
{
|
||||
_PyObject_ASSERT_FROM(op, _PyObject_GC_IS_TRACKED(op),
|
||||
"object not tracked by the garbage collector",
|
||||
filename, lineno, __func__);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyObject_CLEAR_GC_BITS(op, _PyGC_BITS_TRACKED);
|
||||
#else
|
||||
PyGC_Head *gc = _Py_AS_GC(op);
|
||||
PyGC_Head *prev = _PyGCHead_PREV(gc);
|
||||
PyGC_Head *next = _PyGCHead_NEXT(gc);
|
||||
_PyGCHead_SET_NEXT(prev, next);
|
||||
_PyGCHead_SET_PREV(next, prev);
|
||||
gc->_gc_next = 0;
|
||||
gc->_gc_prev &= _PyGC_PREV_MASK_FINALIZED;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Macros to accept any type for the parameter, and to automatically pass
|
||||
// the filename and the filename (if NDEBUG is not defined) where the macro
|
||||
// is called.
|
||||
#ifdef NDEBUG
|
||||
# define _PyObject_GC_TRACK(op) \
|
||||
_PyObject_GC_TRACK(_PyObject_CAST(op))
|
||||
# define _PyObject_GC_UNTRACK(op) \
|
||||
_PyObject_GC_UNTRACK(_PyObject_CAST(op))
|
||||
#else
|
||||
# define _PyObject_GC_TRACK(op) \
|
||||
_PyObject_GC_TRACK(__FILE__, __LINE__, _PyObject_CAST(op))
|
||||
# define _PyObject_GC_UNTRACK(op) \
|
||||
_PyObject_GC_UNTRACK(__FILE__, __LINE__, _PyObject_CAST(op))
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
|
||||
/* Tries to increment an object's reference count
|
||||
*
|
||||
* This is a specialized version of _Py_TryIncref that only succeeds if the
|
||||
* object is immortal or local to this thread. It does not handle the case
|
||||
* where the reference count modification requires an atomic operation. This
|
||||
* allows call sites to specialize for the immortal/local case.
|
||||
*/
|
||||
static inline int
|
||||
_Py_TryIncrefFast(PyObject *op) {
|
||||
uint32_t local = _Py_atomic_load_uint32_relaxed(&op->ob_ref_local);
|
||||
local += 1;
|
||||
if (local == 0) {
|
||||
// immortal
|
||||
return 1;
|
||||
}
|
||||
if (_Py_IsOwnedByCurrentThread(op)) {
|
||||
_Py_INCREF_STAT_INC();
|
||||
_Py_atomic_store_uint32_relaxed(&op->ob_ref_local, local);
|
||||
#ifdef Py_REF_DEBUG
|
||||
_Py_IncRefTotal(_PyThreadState_GET());
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
_Py_TryIncRefShared(PyObject *op)
|
||||
{
|
||||
Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&op->ob_ref_shared);
|
||||
for (;;) {
|
||||
// If the shared refcount is zero and the object is either merged
|
||||
// or may not have weak references, then we cannot incref it.
|
||||
if (shared == 0 || shared == _Py_REF_MERGED) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (_Py_atomic_compare_exchange_ssize(
|
||||
&op->ob_ref_shared,
|
||||
&shared,
|
||||
shared + (1 << _Py_REF_SHARED_SHIFT))) {
|
||||
#ifdef Py_REF_DEBUG
|
||||
_Py_IncRefTotal(_PyThreadState_GET());
|
||||
#endif
|
||||
_Py_INCREF_STAT_INC();
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Tries to incref the object op and ensures that *src still points to it. */
|
||||
static inline int
|
||||
_Py_TryIncrefCompare(PyObject **src, PyObject *op)
|
||||
{
|
||||
if (_Py_TryIncrefFast(op)) {
|
||||
return 1;
|
||||
}
|
||||
if (!_Py_TryIncRefShared(op)) {
|
||||
return 0;
|
||||
}
|
||||
if (op != _Py_atomic_load_ptr(src)) {
|
||||
Py_DECREF(op);
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Loads and increfs an object from ptr, which may contain a NULL value.
|
||||
Safe with concurrent (atomic) updates to ptr.
|
||||
NOTE: The writer must set maybe-weakref on the stored object! */
|
||||
static inline PyObject *
|
||||
_Py_XGetRef(PyObject **ptr)
|
||||
{
|
||||
for (;;) {
|
||||
PyObject *value = _Py_atomic_load_ptr(ptr);
|
||||
if (value == NULL) {
|
||||
return value;
|
||||
}
|
||||
if (_Py_TryIncrefCompare(ptr, value)) {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Attempts to loads and increfs an object from ptr. Returns NULL
|
||||
on failure, which may be due to a NULL value or a concurrent update. */
|
||||
static inline PyObject *
|
||||
_Py_TryXGetRef(PyObject **ptr)
|
||||
{
|
||||
PyObject *value = _Py_atomic_load_ptr(ptr);
|
||||
if (value == NULL) {
|
||||
return value;
|
||||
}
|
||||
if (_Py_TryIncrefCompare(ptr, value)) {
|
||||
return value;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Like Py_NewRef but also optimistically sets _Py_REF_MAYBE_WEAKREF
|
||||
on objects owned by a different thread. */
|
||||
static inline PyObject *
|
||||
_Py_NewRefWithLock(PyObject *op)
|
||||
{
|
||||
if (_Py_TryIncrefFast(op)) {
|
||||
return op;
|
||||
}
|
||||
#ifdef Py_REF_DEBUG
|
||||
_Py_IncRefTotal(_PyThreadState_GET());
|
||||
#endif
|
||||
_Py_INCREF_STAT_INC();
|
||||
for (;;) {
|
||||
Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&op->ob_ref_shared);
|
||||
Py_ssize_t new_shared = shared + (1 << _Py_REF_SHARED_SHIFT);
|
||||
if ((shared & _Py_REF_SHARED_FLAG_MASK) == 0) {
|
||||
new_shared |= _Py_REF_MAYBE_WEAKREF;
|
||||
}
|
||||
if (_Py_atomic_compare_exchange_ssize(
|
||||
&op->ob_ref_shared,
|
||||
&shared,
|
||||
new_shared)) {
|
||||
return op;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline PyObject *
|
||||
_Py_XNewRefWithLock(PyObject *obj)
|
||||
{
|
||||
if (obj == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return _Py_NewRefWithLock(obj);
|
||||
}
|
||||
|
||||
static inline void
|
||||
_PyObject_SetMaybeWeakref(PyObject *op)
|
||||
{
|
||||
if (_Py_IsImmortal(op)) {
|
||||
return;
|
||||
}
|
||||
for (;;) {
|
||||
Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&op->ob_ref_shared);
|
||||
if ((shared & _Py_REF_SHARED_FLAG_MASK) != 0) {
|
||||
// Nothing to do if it's in WEAKREFS, QUEUED, or MERGED states.
|
||||
return;
|
||||
}
|
||||
if (_Py_atomic_compare_exchange_ssize(
|
||||
&op->ob_ref_shared, &shared, shared | _Py_REF_MAYBE_WEAKREF)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extern int _PyObject_ResurrectEndSlow(PyObject *op);
|
||||
#endif
|
||||
|
||||
// Temporarily resurrects an object during deallocation. The refcount is set
|
||||
// to one.
|
||||
static inline void
|
||||
_PyObject_ResurrectStart(PyObject *op)
|
||||
{
|
||||
assert(Py_REFCNT(op) == 0);
|
||||
#ifdef Py_REF_DEBUG
|
||||
_Py_IncRefTotal(_PyThreadState_GET());
|
||||
#endif
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_Py_atomic_store_uintptr_relaxed(&op->ob_tid, _Py_ThreadId());
|
||||
_Py_atomic_store_uint32_relaxed(&op->ob_ref_local, 1);
|
||||
_Py_atomic_store_ssize_relaxed(&op->ob_ref_shared, 0);
|
||||
#else
|
||||
Py_SET_REFCNT(op, 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Undoes an object resurrection by decrementing the refcount without calling
|
||||
// _Py_Dealloc(). Returns 0 if the object is dead (the normal case), and
|
||||
// deallocation should continue. Returns 1 if the object is still alive.
|
||||
static inline int
|
||||
_PyObject_ResurrectEnd(PyObject *op)
|
||||
{
|
||||
#ifdef Py_REF_DEBUG
|
||||
_Py_DecRefTotal(_PyThreadState_GET());
|
||||
#endif
|
||||
#ifndef Py_GIL_DISABLED
|
||||
Py_SET_REFCNT(op, Py_REFCNT(op) - 1);
|
||||
return Py_REFCNT(op) != 0;
|
||||
#else
|
||||
uint32_t local = _Py_atomic_load_uint32_relaxed(&op->ob_ref_local);
|
||||
Py_ssize_t shared = _Py_atomic_load_ssize_acquire(&op->ob_ref_shared);
|
||||
if (_Py_IsOwnedByCurrentThread(op) && local == 1 && shared == 0) {
|
||||
// Fast-path: object has a single refcount and is owned by this thread
|
||||
_Py_atomic_store_uint32_relaxed(&op->ob_ref_local, 0);
|
||||
return 0;
|
||||
}
|
||||
// Slow-path: object has a shared refcount or is not owned by this thread
|
||||
return _PyObject_ResurrectEndSlow(op);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Tries to incref op and returns 1 if successful or 0 otherwise. */
|
||||
static inline int
|
||||
_Py_TryIncref(PyObject *op)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
return _Py_TryIncrefFast(op) || _Py_TryIncRefShared(op);
|
||||
#else
|
||||
if (Py_REFCNT(op) > 0) {
|
||||
Py_INCREF(op);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef Py_REF_DEBUG
|
||||
extern void _PyInterpreterState_FinalizeRefTotal(PyInterpreterState *);
|
||||
extern void _Py_FinalizeRefTotal(_PyRuntimeState *);
|
||||
extern void _PyDebug_PrintTotalRefs(void);
|
||||
#endif
|
||||
|
||||
#ifdef Py_TRACE_REFS
|
||||
extern void _Py_AddToAllObjects(PyObject *op);
|
||||
extern void _Py_PrintReferences(PyInterpreterState *, FILE *);
|
||||
extern void _Py_PrintReferenceAddresses(PyInterpreterState *, FILE *);
|
||||
#endif
|
||||
|
||||
|
||||
/* Return the *address* of the object's weaklist. The address may be
|
||||
* dereferenced to get the current head of the weaklist. This is useful
|
||||
* for iterating over the linked list of weakrefs, especially when the
|
||||
* list is being modified externally (e.g. refs getting removed).
|
||||
*
|
||||
* The returned pointer should not be used to change the head of the list
|
||||
* nor should it be used to add, remove, or swap any refs in the list.
|
||||
* That is the sole responsibility of the code in weakrefobject.c.
|
||||
*/
|
||||
static inline PyObject **
|
||||
_PyObject_GET_WEAKREFS_LISTPTR(PyObject *op)
|
||||
{
|
||||
if (PyType_Check(op) &&
|
||||
((PyTypeObject *)op)->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) {
|
||||
PyInterpreterState *interp = _PyInterpreterState_GET();
|
||||
managed_static_type_state *state = _PyStaticType_GetState(
|
||||
interp, (PyTypeObject *)op);
|
||||
return _PyStaticType_GET_WEAKREFS_LISTPTR(state);
|
||||
}
|
||||
// Essentially _PyObject_GET_WEAKREFS_LISTPTR_FROM_OFFSET():
|
||||
Py_ssize_t offset = Py_TYPE(op)->tp_weaklistoffset;
|
||||
return (PyObject **)((char *)op + offset);
|
||||
}
|
||||
|
||||
/* This is a special case of _PyObject_GET_WEAKREFS_LISTPTR().
|
||||
* Only the most fundamental lookup path is used.
|
||||
* Consequently, static types should not be used.
|
||||
*
|
||||
* For static builtin types the returned pointer will always point
|
||||
* to a NULL tp_weaklist. This is fine for any deallocation cases,
|
||||
* since static types are never deallocated and static builtin types
|
||||
* are only finalized at the end of runtime finalization.
|
||||
*
|
||||
* If the weaklist for static types is actually needed then use
|
||||
* _PyObject_GET_WEAKREFS_LISTPTR().
|
||||
*/
|
||||
static inline PyWeakReference **
|
||||
_PyObject_GET_WEAKREFS_LISTPTR_FROM_OFFSET(PyObject *op)
|
||||
{
|
||||
assert(!PyType_Check(op) ||
|
||||
((PyTypeObject *)op)->tp_flags & Py_TPFLAGS_HEAPTYPE);
|
||||
Py_ssize_t offset = Py_TYPE(op)->tp_weaklistoffset;
|
||||
return (PyWeakReference **)((char *)op + offset);
|
||||
}
|
||||
|
||||
// Fast inlined version of PyObject_IS_GC()
|
||||
static inline int
|
||||
_PyObject_IS_GC(PyObject *obj)
|
||||
{
|
||||
PyTypeObject *type = Py_TYPE(obj);
|
||||
return (PyType_IS_GC(type)
|
||||
&& (type->tp_is_gc == NULL || type->tp_is_gc(obj)));
|
||||
}
|
||||
|
||||
// Fast inlined version of PyObject_Hash()
|
||||
static inline Py_hash_t
|
||||
_PyObject_HashFast(PyObject *op)
|
||||
{
|
||||
if (PyUnicode_CheckExact(op)) {
|
||||
Py_hash_t hash = FT_ATOMIC_LOAD_SSIZE_RELAXED(
|
||||
_PyASCIIObject_CAST(op)->hash);
|
||||
if (hash != -1) {
|
||||
return hash;
|
||||
}
|
||||
}
|
||||
return PyObject_Hash(op);
|
||||
}
|
||||
|
||||
// Fast inlined version of PyType_IS_GC()
|
||||
#define _PyType_IS_GC(t) _PyType_HasFeature((t), Py_TPFLAGS_HAVE_GC)
|
||||
|
||||
static inline size_t
|
||||
_PyType_PreHeaderSize(PyTypeObject *tp)
|
||||
{
|
||||
return (
|
||||
#ifndef Py_GIL_DISABLED
|
||||
_PyType_IS_GC(tp) * sizeof(PyGC_Head) +
|
||||
#endif
|
||||
_PyType_HasFeature(tp, Py_TPFLAGS_PREHEADER) * 2 * sizeof(PyObject *)
|
||||
);
|
||||
}
|
||||
|
||||
void _PyObject_GC_Link(PyObject *op);
|
||||
|
||||
// Usage: assert(_Py_CheckSlotResult(obj, "__getitem__", result != NULL));
|
||||
extern int _Py_CheckSlotResult(
|
||||
PyObject *obj,
|
||||
const char *slot_name,
|
||||
int success);
|
||||
|
||||
// Test if a type supports weak references
|
||||
static inline int _PyType_SUPPORTS_WEAKREFS(PyTypeObject *type) {
|
||||
return (type->tp_weaklistoffset != 0);
|
||||
}
|
||||
|
||||
extern PyObject* _PyType_AllocNoTrack(PyTypeObject *type, Py_ssize_t nitems);
|
||||
extern PyObject *_PyType_NewManagedObject(PyTypeObject *type);
|
||||
|
||||
extern PyTypeObject* _PyType_CalculateMetaclass(PyTypeObject *, PyObject *);
|
||||
extern PyObject* _PyType_GetDocFromInternalDoc(const char *, const char *);
|
||||
extern PyObject* _PyType_GetTextSignatureFromInternalDoc(const char *, const char *, int);
|
||||
extern int _PyObject_SetAttributeErrorContext(PyObject *v, PyObject* name);
|
||||
|
||||
void _PyObject_InitInlineValues(PyObject *obj, PyTypeObject *tp);
|
||||
extern int _PyObject_StoreInstanceAttribute(PyObject *obj,
|
||||
PyObject *name, PyObject *value);
|
||||
extern bool _PyObject_TryGetInstanceAttribute(PyObject *obj, PyObject *name,
|
||||
PyObject **attr);
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
# define MANAGED_DICT_OFFSET (((Py_ssize_t)sizeof(PyObject *))*-1)
|
||||
# define MANAGED_WEAKREF_OFFSET (((Py_ssize_t)sizeof(PyObject *))*-2)
|
||||
#else
|
||||
# define MANAGED_DICT_OFFSET (((Py_ssize_t)sizeof(PyObject *))*-3)
|
||||
# define MANAGED_WEAKREF_OFFSET (((Py_ssize_t)sizeof(PyObject *))*-4)
|
||||
#endif
|
||||
|
||||
typedef union {
|
||||
PyDictObject *dict;
|
||||
} PyManagedDictPointer;
|
||||
|
||||
static inline PyManagedDictPointer *
|
||||
_PyObject_ManagedDictPointer(PyObject *obj)
|
||||
{
|
||||
assert(Py_TYPE(obj)->tp_flags & Py_TPFLAGS_MANAGED_DICT);
|
||||
return (PyManagedDictPointer *)((char *)obj + MANAGED_DICT_OFFSET);
|
||||
}
|
||||
|
||||
static inline PyDictObject *
|
||||
_PyObject_GetManagedDict(PyObject *obj)
|
||||
{
|
||||
PyManagedDictPointer *dorv = _PyObject_ManagedDictPointer(obj);
|
||||
return (PyDictObject *)FT_ATOMIC_LOAD_PTR_ACQUIRE(dorv->dict);
|
||||
}
|
||||
|
||||
static inline PyDictValues *
|
||||
_PyObject_InlineValues(PyObject *obj)
|
||||
{
|
||||
assert(Py_TYPE(obj)->tp_flags & Py_TPFLAGS_INLINE_VALUES);
|
||||
assert(Py_TYPE(obj)->tp_flags & Py_TPFLAGS_MANAGED_DICT);
|
||||
assert(Py_TYPE(obj)->tp_basicsize == sizeof(PyObject));
|
||||
return (PyDictValues *)((char *)obj + sizeof(PyObject));
|
||||
}
|
||||
|
||||
extern PyObject ** _PyObject_ComputedDictPointer(PyObject *);
|
||||
extern int _PyObject_IsInstanceDictEmpty(PyObject *);
|
||||
|
||||
// Export for 'math' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyObject_LookupSpecial(PyObject *, PyObject *);
|
||||
|
||||
extern int _PyObject_IsAbstract(PyObject *);
|
||||
|
||||
PyAPI_FUNC(int) _PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);
|
||||
extern PyObject* _PyObject_NextNotImplemented(PyObject *);
|
||||
|
||||
// Pickle support.
|
||||
// Export for '_datetime' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyObject_GetState(PyObject *);
|
||||
|
||||
/* C function call trampolines to mitigate bad function pointer casts.
|
||||
*
|
||||
* Typical native ABIs ignore additional arguments or fill in missing
|
||||
* values with 0/NULL in function pointer cast. Compilers do not show
|
||||
* warnings when a function pointer is explicitly casted to an
|
||||
* incompatible type.
|
||||
*
|
||||
* Bad fpcasts are an issue in WebAssembly. WASM's indirect_call has strict
|
||||
* function signature checks. Argument count, types, and return type must
|
||||
* match.
|
||||
*
|
||||
* Third party code unintentionally rely on problematic fpcasts. The call
|
||||
* trampoline mitigates common occurrences of bad fpcasts on Emscripten.
|
||||
*/
|
||||
#if !(defined(__EMSCRIPTEN__) && defined(PY_CALL_TRAMPOLINE))
|
||||
#define _PyCFunction_TrampolineCall(meth, self, args) \
|
||||
(meth)((self), (args))
|
||||
#define _PyCFunctionWithKeywords_TrampolineCall(meth, self, args, kw) \
|
||||
(meth)((self), (args), (kw))
|
||||
#endif // __EMSCRIPTEN__ && PY_CALL_TRAMPOLINE
|
||||
|
||||
// Export these 2 symbols for '_pickle' shared extension
|
||||
PyAPI_DATA(PyTypeObject) _PyNone_Type;
|
||||
PyAPI_DATA(PyTypeObject) _PyNotImplemented_Type;
|
||||
|
||||
// Maps Py_LT to Py_GT, ..., Py_GE to Py_LE.
|
||||
// Export for the stable ABI.
|
||||
PyAPI_DATA(int) _Py_SwappedOp[];
|
||||
|
||||
extern void _Py_GetConstant_Init(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_OBJECT_H */
|
||||
71
Dependencies/Python/include/internal/pycore_object_alloc.h
vendored
Normal file
71
Dependencies/Python/include/internal/pycore_object_alloc.h
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
#ifndef Py_INTERNAL_OBJECT_ALLOC_H
|
||||
#define Py_INTERNAL_OBJECT_ALLOC_H
|
||||
|
||||
#include "pycore_object.h" // _PyType_HasFeature()
|
||||
#include "pycore_pystate.h" // _PyThreadState_GET()
|
||||
#include "pycore_tstate.h" // _PyThreadStateImpl
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
static inline mi_heap_t *
|
||||
_PyObject_GetAllocationHeap(_PyThreadStateImpl *tstate, PyTypeObject *tp)
|
||||
{
|
||||
struct _mimalloc_thread_state *m = &tstate->mimalloc;
|
||||
if (_PyType_HasFeature(tp, Py_TPFLAGS_PREHEADER)) {
|
||||
return &m->heaps[_Py_MIMALLOC_HEAP_GC_PRE];
|
||||
}
|
||||
else if (_PyType_IS_GC(tp)) {
|
||||
return &m->heaps[_Py_MIMALLOC_HEAP_GC];
|
||||
}
|
||||
else {
|
||||
return &m->heaps[_Py_MIMALLOC_HEAP_OBJECT];
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
// Sets the heap used for PyObject_Malloc(), PyObject_Realloc(), etc. calls in
|
||||
// Py_GIL_DISABLED builds. We use different heaps depending on if the object
|
||||
// supports GC and if it has a pre-header. We smuggle the choice of heap
|
||||
// through the _mimalloc_thread_state. In the default build, this simply
|
||||
// calls PyObject_Malloc().
|
||||
static inline void *
|
||||
_PyObject_MallocWithType(PyTypeObject *tp, size_t size)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)_PyThreadState_GET();
|
||||
struct _mimalloc_thread_state *m = &tstate->mimalloc;
|
||||
m->current_object_heap = _PyObject_GetAllocationHeap(tstate, tp);
|
||||
#endif
|
||||
void *mem = PyObject_Malloc(size);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
m->current_object_heap = &m->heaps[_Py_MIMALLOC_HEAP_OBJECT];
|
||||
#endif
|
||||
return mem;
|
||||
}
|
||||
|
||||
static inline void *
|
||||
_PyObject_ReallocWithType(PyTypeObject *tp, void *ptr, size_t size)
|
||||
{
|
||||
#ifdef Py_GIL_DISABLED
|
||||
_PyThreadStateImpl *tstate = (_PyThreadStateImpl *)_PyThreadState_GET();
|
||||
struct _mimalloc_thread_state *m = &tstate->mimalloc;
|
||||
m->current_object_heap = _PyObject_GetAllocationHeap(tstate, tp);
|
||||
#endif
|
||||
void *mem = PyObject_Realloc(ptr, size);
|
||||
#ifdef Py_GIL_DISABLED
|
||||
m->current_object_heap = &m->heaps[_Py_MIMALLOC_HEAP_OBJECT];
|
||||
#endif
|
||||
return mem;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_OBJECT_ALLOC_H
|
||||
97
Dependencies/Python/include/internal/pycore_object_stack.h
vendored
Normal file
97
Dependencies/Python/include/internal/pycore_object_stack.h
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
#ifndef Py_INTERNAL_OBJECT_STACK_H
|
||||
#define Py_INTERNAL_OBJECT_STACK_H
|
||||
|
||||
#include "pycore_freelist.h" // _PyFreeListState
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// _PyObjectStack is a stack of Python objects implemented as a linked list of
|
||||
// fixed size buffers.
|
||||
|
||||
// Chosen so that _PyObjectStackChunk is a power-of-two size.
|
||||
#define _Py_OBJECT_STACK_CHUNK_SIZE 254
|
||||
|
||||
typedef struct _PyObjectStackChunk {
|
||||
struct _PyObjectStackChunk *prev;
|
||||
Py_ssize_t n;
|
||||
PyObject *objs[_Py_OBJECT_STACK_CHUNK_SIZE];
|
||||
} _PyObjectStackChunk;
|
||||
|
||||
typedef struct _PyObjectStack {
|
||||
_PyObjectStackChunk *head;
|
||||
} _PyObjectStack;
|
||||
|
||||
|
||||
extern _PyObjectStackChunk *
|
||||
_PyObjectStackChunk_New(void);
|
||||
|
||||
extern void
|
||||
_PyObjectStackChunk_Free(_PyObjectStackChunk *);
|
||||
|
||||
// Push an item onto the stack. Return -1 on allocation failure, 0 on success.
|
||||
static inline int
|
||||
_PyObjectStack_Push(_PyObjectStack *stack, PyObject *obj)
|
||||
{
|
||||
_PyObjectStackChunk *buf = stack->head;
|
||||
if (buf == NULL || buf->n == _Py_OBJECT_STACK_CHUNK_SIZE) {
|
||||
buf = _PyObjectStackChunk_New();
|
||||
if (buf == NULL) {
|
||||
return -1;
|
||||
}
|
||||
buf->prev = stack->head;
|
||||
buf->n = 0;
|
||||
stack->head = buf;
|
||||
}
|
||||
|
||||
assert(buf->n >= 0 && buf->n < _Py_OBJECT_STACK_CHUNK_SIZE);
|
||||
buf->objs[buf->n] = obj;
|
||||
buf->n++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Pop the top item from the stack. Return NULL if the stack is empty.
|
||||
static inline PyObject *
|
||||
_PyObjectStack_Pop(_PyObjectStack *stack)
|
||||
{
|
||||
_PyObjectStackChunk *buf = stack->head;
|
||||
if (buf == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
assert(buf->n > 0 && buf->n <= _Py_OBJECT_STACK_CHUNK_SIZE);
|
||||
buf->n--;
|
||||
PyObject *obj = buf->objs[buf->n];
|
||||
if (buf->n == 0) {
|
||||
stack->head = buf->prev;
|
||||
_PyObjectStackChunk_Free(buf);
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
static inline Py_ssize_t
|
||||
_PyObjectStack_Size(_PyObjectStack *stack)
|
||||
{
|
||||
Py_ssize_t size = 0;
|
||||
for (_PyObjectStackChunk *buf = stack->head; buf != NULL; buf = buf->prev) {
|
||||
size += buf->n;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
// Merge src into dst, leaving src empty
|
||||
extern void
|
||||
_PyObjectStack_Merge(_PyObjectStack *dst, _PyObjectStack *src);
|
||||
|
||||
// Remove all items from the stack
|
||||
extern void
|
||||
_PyObjectStack_Clear(_PyObjectStack *stack);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_OBJECT_STACK_H
|
||||
41
Dependencies/Python/include/internal/pycore_object_state.h
vendored
Normal file
41
Dependencies/Python/include/internal/pycore_object_state.h
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
#ifndef Py_INTERNAL_OBJECT_STATE_H
|
||||
#define Py_INTERNAL_OBJECT_STATE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_freelist.h" // _PyObject_freelists
|
||||
#include "pycore_hashtable.h" // _Py_hashtable_t
|
||||
|
||||
struct _py_object_runtime_state {
|
||||
#ifdef Py_REF_DEBUG
|
||||
Py_ssize_t interpreter_leaks;
|
||||
#endif
|
||||
int _not_used;
|
||||
};
|
||||
|
||||
struct _py_object_state {
|
||||
#if !defined(Py_GIL_DISABLED)
|
||||
struct _Py_object_freelists freelists;
|
||||
#endif
|
||||
#ifdef Py_REF_DEBUG
|
||||
Py_ssize_t reftotal;
|
||||
#endif
|
||||
#ifdef Py_TRACE_REFS
|
||||
// Hash table storing all objects. The key is the object pointer
|
||||
// (PyObject*) and the value is always the number 1 (as uintptr_t).
|
||||
// See _PyRefchain_IsTraced() and _PyRefchain_Trace() functions.
|
||||
_Py_hashtable_t *refchain;
|
||||
#endif
|
||||
int _not_used;
|
||||
};
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_OBJECT_STATE_H */
|
||||
702
Dependencies/Python/include/internal/pycore_obmalloc.h
vendored
Normal file
702
Dependencies/Python/include/internal/pycore_obmalloc.h
vendored
Normal file
@@ -0,0 +1,702 @@
|
||||
#ifndef Py_INTERNAL_OBMALLOC_H
|
||||
#define Py_INTERNAL_OBMALLOC_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
typedef unsigned int pymem_uint; /* assuming >= 16 bits */
|
||||
|
||||
#undef uint
|
||||
#define uint pymem_uint
|
||||
|
||||
|
||||
/* An object allocator for Python.
|
||||
|
||||
Here is an introduction to the layers of the Python memory architecture,
|
||||
showing where the object allocator is actually used (layer +2), It is
|
||||
called for every object allocation and deallocation (PyObject_New/Del),
|
||||
unless the object-specific allocators implement a proprietary allocation
|
||||
scheme (ex.: ints use a simple free list). This is also the place where
|
||||
the cyclic garbage collector operates selectively on container objects.
|
||||
|
||||
|
||||
Object-specific allocators
|
||||
_____ ______ ______ ________
|
||||
[ int ] [ dict ] [ list ] ... [ string ] Python core |
|
||||
+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
|
||||
_______________________________ | |
|
||||
[ Python's object allocator ] | |
|
||||
+2 | ####### Object memory ####### | <------ Internal buffers ------> |
|
||||
______________________________________________________________ |
|
||||
[ Python's raw memory allocator (PyMem_ API) ] |
|
||||
+1 | <----- Python memory (under PyMem manager's control) ------> | |
|
||||
__________________________________________________________________
|
||||
[ Underlying general-purpose allocator (ex: C library malloc) ]
|
||||
0 | <------ Virtual memory allocated for the python process -------> |
|
||||
|
||||
=========================================================================
|
||||
_______________________________________________________________________
|
||||
[ OS-specific Virtual Memory Manager (VMM) ]
|
||||
-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
|
||||
__________________________________ __________________________________
|
||||
[ ] [ ]
|
||||
-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
|
||||
|
||||
*/
|
||||
/*==========================================================================*/
|
||||
|
||||
/* A fast, special-purpose memory allocator for small blocks, to be used
|
||||
on top of a general-purpose malloc -- heavily based on previous art. */
|
||||
|
||||
/* Vladimir Marangozov -- August 2000 */
|
||||
|
||||
/*
|
||||
* "Memory management is where the rubber meets the road -- if we do the wrong
|
||||
* thing at any level, the results will not be good. And if we don't make the
|
||||
* levels work well together, we are in serious trouble." (1)
|
||||
*
|
||||
* (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
|
||||
* "Dynamic Storage Allocation: A Survey and Critical Review",
|
||||
* in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
|
||||
*/
|
||||
|
||||
/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
|
||||
|
||||
/*==========================================================================*/
|
||||
|
||||
/*
|
||||
* Allocation strategy abstract:
|
||||
*
|
||||
* For small requests, the allocator sub-allocates <Big> blocks of memory.
|
||||
* Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the
|
||||
* system's allocator.
|
||||
*
|
||||
* Small requests are grouped in size classes spaced 8 bytes apart, due
|
||||
* to the required valid alignment of the returned address. Requests of
|
||||
* a particular size are serviced from memory pools of 4K (one VMM page).
|
||||
* Pools are fragmented on demand and contain free lists of blocks of one
|
||||
* particular size class. In other words, there is a fixed-size allocator
|
||||
* for each size class. Free pools are shared by the different allocators
|
||||
* thus minimizing the space reserved for a particular size class.
|
||||
*
|
||||
* This allocation strategy is a variant of what is known as "simple
|
||||
* segregated storage based on array of free lists". The main drawback of
|
||||
* simple segregated storage is that we might end up with lot of reserved
|
||||
* memory for the different free lists, which degenerate in time. To avoid
|
||||
* this, we partition each free list in pools and we share dynamically the
|
||||
* reserved space between all free lists. This technique is quite efficient
|
||||
* for memory intensive programs which allocate mainly small-sized blocks.
|
||||
*
|
||||
* For small requests we have the following table:
|
||||
*
|
||||
* Request in bytes Size of allocated block Size class idx
|
||||
* ----------------------------------------------------------------
|
||||
* 1-8 8 0
|
||||
* 9-16 16 1
|
||||
* 17-24 24 2
|
||||
* 25-32 32 3
|
||||
* 33-40 40 4
|
||||
* 41-48 48 5
|
||||
* 49-56 56 6
|
||||
* 57-64 64 7
|
||||
* 65-72 72 8
|
||||
* ... ... ...
|
||||
* 497-504 504 62
|
||||
* 505-512 512 63
|
||||
*
|
||||
* 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying
|
||||
* allocator.
|
||||
*/
|
||||
|
||||
/*==========================================================================*/
|
||||
|
||||
/*
|
||||
* -- Main tunable settings section --
|
||||
*/
|
||||
|
||||
/*
|
||||
* Alignment of addresses returned to the user. 8-bytes alignment works
|
||||
* on most current architectures (with 32-bit or 64-bit address buses).
|
||||
* The alignment value is also used for grouping small requests in size
|
||||
* classes spaced ALIGNMENT bytes apart.
|
||||
*
|
||||
* You shouldn't change this unless you know what you are doing.
|
||||
*/
|
||||
|
||||
#if SIZEOF_VOID_P > 4
|
||||
#define ALIGNMENT 16 /* must be 2^N */
|
||||
#define ALIGNMENT_SHIFT 4
|
||||
#else
|
||||
#define ALIGNMENT 8 /* must be 2^N */
|
||||
#define ALIGNMENT_SHIFT 3
|
||||
#endif
|
||||
|
||||
/* Return the number of bytes in size class I, as a uint. */
|
||||
#define INDEX2SIZE(I) (((pymem_uint)(I) + 1) << ALIGNMENT_SHIFT)
|
||||
|
||||
/*
|
||||
* Max size threshold below which malloc requests are considered to be
|
||||
* small enough in order to use preallocated memory pools. You can tune
|
||||
* this value according to your application behaviour and memory needs.
|
||||
*
|
||||
* Note: a size threshold of 512 guarantees that newly created dictionaries
|
||||
* will be allocated from preallocated memory pools on 64-bit.
|
||||
*
|
||||
* The following invariants must hold:
|
||||
* 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512
|
||||
* 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
|
||||
*
|
||||
* Although not required, for better performance and space efficiency,
|
||||
* it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
|
||||
*/
|
||||
#define SMALL_REQUEST_THRESHOLD 512
|
||||
#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
|
||||
|
||||
/*
|
||||
* The system's VMM page size can be obtained on most unices with a
|
||||
* getpagesize() call or deduced from various header files. To make
|
||||
* things simpler, we assume that it is 4K, which is OK for most systems.
|
||||
* It is probably better if this is the native page size, but it doesn't
|
||||
* have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
|
||||
* size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
|
||||
* violation fault. 4K is apparently OK for all the platforms that python
|
||||
* currently targets.
|
||||
*/
|
||||
#define SYSTEM_PAGE_SIZE (4 * 1024)
|
||||
|
||||
/*
|
||||
* Maximum amount of memory managed by the allocator for small requests.
|
||||
*/
|
||||
#ifdef WITH_MEMORY_LIMITS
|
||||
#ifndef SMALL_MEMORY_LIMIT
|
||||
#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(WITH_PYMALLOC_RADIX_TREE)
|
||||
/* Use radix-tree to track arena memory regions, for address_in_range().
|
||||
* Enable by default since it allows larger pool sizes. Can be disabled
|
||||
* using -DWITH_PYMALLOC_RADIX_TREE=0 */
|
||||
#define WITH_PYMALLOC_RADIX_TREE 1
|
||||
#endif
|
||||
|
||||
#if SIZEOF_VOID_P > 4
|
||||
/* on 64-bit platforms use larger pools and arenas if we can */
|
||||
#define USE_LARGE_ARENAS
|
||||
#if WITH_PYMALLOC_RADIX_TREE
|
||||
/* large pools only supported if radix-tree is enabled */
|
||||
#define USE_LARGE_POOLS
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
|
||||
* on a page boundary. This is a reserved virtual address space for the
|
||||
* current process (obtained through a malloc()/mmap() call). In no way this
|
||||
* means that the memory arenas will be used entirely. A malloc(<Big>) is
|
||||
* usually an address range reservation for <Big> bytes, unless all pages within
|
||||
* this space are referenced subsequently. So malloc'ing big blocks and not
|
||||
* using them does not mean "wasting memory". It's an addressable range
|
||||
* wastage...
|
||||
*
|
||||
* Arenas are allocated with mmap() on systems supporting anonymous memory
|
||||
* mappings to reduce heap fragmentation.
|
||||
*/
|
||||
#ifdef USE_LARGE_ARENAS
|
||||
#define ARENA_BITS 20 /* 1 MiB */
|
||||
#else
|
||||
#define ARENA_BITS 18 /* 256 KiB */
|
||||
#endif
|
||||
#define ARENA_SIZE (1 << ARENA_BITS)
|
||||
#define ARENA_SIZE_MASK (ARENA_SIZE - 1)
|
||||
|
||||
#ifdef WITH_MEMORY_LIMITS
|
||||
#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Size of the pools used for small blocks. Must be a power of 2.
|
||||
*/
|
||||
#ifdef USE_LARGE_POOLS
|
||||
#define POOL_BITS 14 /* 16 KiB */
|
||||
#else
|
||||
#define POOL_BITS 12 /* 4 KiB */
|
||||
#endif
|
||||
#define POOL_SIZE (1 << POOL_BITS)
|
||||
#define POOL_SIZE_MASK (POOL_SIZE - 1)
|
||||
|
||||
#if !WITH_PYMALLOC_RADIX_TREE
|
||||
#if POOL_SIZE != SYSTEM_PAGE_SIZE
|
||||
# error "pool size must be equal to system page size"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define MAX_POOLS_IN_ARENA (ARENA_SIZE / POOL_SIZE)
|
||||
#if MAX_POOLS_IN_ARENA * POOL_SIZE != ARENA_SIZE
|
||||
# error "arena size not an exact multiple of pool size"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* -- End of tunable settings section --
|
||||
*/
|
||||
|
||||
/*==========================================================================*/
|
||||
|
||||
/* When you say memory, my mind reasons in terms of (pointers to) blocks */
|
||||
typedef uint8_t pymem_block;
|
||||
|
||||
/* Pool for small blocks. */
|
||||
struct pool_header {
|
||||
union { pymem_block *_padding;
|
||||
uint count; } ref; /* number of allocated blocks */
|
||||
pymem_block *freeblock; /* pool's free list head */
|
||||
struct pool_header *nextpool; /* next pool of this size class */
|
||||
struct pool_header *prevpool; /* previous pool "" */
|
||||
uint arenaindex; /* index into arenas of base adr */
|
||||
uint szidx; /* block size class index */
|
||||
uint nextoffset; /* bytes to virgin block */
|
||||
uint maxnextoffset; /* largest valid nextoffset */
|
||||
};
|
||||
|
||||
typedef struct pool_header *poolp;
|
||||
|
||||
/* Record keeping for arenas. */
|
||||
struct arena_object {
|
||||
/* The address of the arena, as returned by malloc. Note that 0
|
||||
* will never be returned by a successful malloc, and is used
|
||||
* here to mark an arena_object that doesn't correspond to an
|
||||
* allocated arena.
|
||||
*/
|
||||
uintptr_t address;
|
||||
|
||||
/* Pool-aligned pointer to the next pool to be carved off. */
|
||||
pymem_block* pool_address;
|
||||
|
||||
/* The number of available pools in the arena: free pools + never-
|
||||
* allocated pools.
|
||||
*/
|
||||
uint nfreepools;
|
||||
|
||||
/* The total number of pools in the arena, whether or not available. */
|
||||
uint ntotalpools;
|
||||
|
||||
/* Singly-linked list of available pools. */
|
||||
struct pool_header* freepools;
|
||||
|
||||
/* Whenever this arena_object is not associated with an allocated
|
||||
* arena, the nextarena member is used to link all unassociated
|
||||
* arena_objects in the singly-linked `unused_arena_objects` list.
|
||||
* The prevarena member is unused in this case.
|
||||
*
|
||||
* When this arena_object is associated with an allocated arena
|
||||
* with at least one available pool, both members are used in the
|
||||
* doubly-linked `usable_arenas` list, which is maintained in
|
||||
* increasing order of `nfreepools` values.
|
||||
*
|
||||
* Else this arena_object is associated with an allocated arena
|
||||
* all of whose pools are in use. `nextarena` and `prevarena`
|
||||
* are both meaningless in this case.
|
||||
*/
|
||||
struct arena_object* nextarena;
|
||||
struct arena_object* prevarena;
|
||||
};
|
||||
|
||||
#define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT)
|
||||
|
||||
#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
|
||||
|
||||
/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
|
||||
#define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE))
|
||||
|
||||
/* Return total number of blocks in pool of size index I, as a uint. */
|
||||
#define NUMBLOCKS(I) ((pymem_uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
|
||||
|
||||
/*==========================================================================*/
|
||||
|
||||
/*
|
||||
* Pool table -- headed, circular, doubly-linked lists of partially used pools.
|
||||
|
||||
This is involved. For an index i, usedpools[i+i] is the header for a list of
|
||||
all partially used pools holding small blocks with "size class idx" i. So
|
||||
usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
|
||||
16, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
|
||||
|
||||
Pools are carved off an arena's highwater mark (an arena_object's pool_address
|
||||
member) as needed. Once carved off, a pool is in one of three states forever
|
||||
after:
|
||||
|
||||
used == partially used, neither empty nor full
|
||||
At least one block in the pool is currently allocated, and at least one
|
||||
block in the pool is not currently allocated (note this implies a pool
|
||||
has room for at least two blocks).
|
||||
This is a pool's initial state, as a pool is created only when malloc
|
||||
needs space.
|
||||
The pool holds blocks of a fixed size, and is in the circular list headed
|
||||
at usedpools[i] (see above). It's linked to the other used pools of the
|
||||
same size class via the pool_header's nextpool and prevpool members.
|
||||
If all but one block is currently allocated, a malloc can cause a
|
||||
transition to the full state. If all but one block is not currently
|
||||
allocated, a free can cause a transition to the empty state.
|
||||
|
||||
full == all the pool's blocks are currently allocated
|
||||
On transition to full, a pool is unlinked from its usedpools[] list.
|
||||
It's not linked to from anything then anymore, and its nextpool and
|
||||
prevpool members are meaningless until it transitions back to used.
|
||||
A free of a block in a full pool puts the pool back in the used state.
|
||||
Then it's linked in at the front of the appropriate usedpools[] list, so
|
||||
that the next allocation for its size class will reuse the freed block.
|
||||
|
||||
empty == all the pool's blocks are currently available for allocation
|
||||
On transition to empty, a pool is unlinked from its usedpools[] list,
|
||||
and linked to the front of its arena_object's singly-linked freepools list,
|
||||
via its nextpool member. The prevpool member has no meaning in this case.
|
||||
Empty pools have no inherent size class: the next time a malloc finds
|
||||
an empty list in usedpools[], it takes the first pool off of freepools.
|
||||
If the size class needed happens to be the same as the size class the pool
|
||||
last had, some pool initialization can be skipped.
|
||||
|
||||
|
||||
Block Management
|
||||
|
||||
Blocks within pools are again carved out as needed. pool->freeblock points to
|
||||
the start of a singly-linked list of free blocks within the pool. When a
|
||||
block is freed, it's inserted at the front of its pool's freeblock list. Note
|
||||
that the available blocks in a pool are *not* linked all together when a pool
|
||||
is initialized. Instead only "the first two" (lowest addresses) blocks are
|
||||
set up, returning the first such block, and setting pool->freeblock to a
|
||||
one-block list holding the second such block. This is consistent with that
|
||||
pymalloc strives at all levels (arena, pool, and block) never to touch a piece
|
||||
of memory until it's actually needed.
|
||||
|
||||
So long as a pool is in the used state, we're certain there *is* a block
|
||||
available for allocating, and pool->freeblock is not NULL. If pool->freeblock
|
||||
points to the end of the free list before we've carved the entire pool into
|
||||
blocks, that means we simply haven't yet gotten to one of the higher-address
|
||||
blocks. The offset from the pool_header to the start of "the next" virgin
|
||||
block is stored in the pool_header nextoffset member, and the largest value
|
||||
of nextoffset that makes sense is stored in the maxnextoffset member when a
|
||||
pool is initialized. All the blocks in a pool have been passed out at least
|
||||
once when and only when nextoffset > maxnextoffset.
|
||||
|
||||
|
||||
Major obscurity: While the usedpools vector is declared to have poolp
|
||||
entries, it doesn't really. It really contains two pointers per (conceptual)
|
||||
poolp entry, the nextpool and prevpool members of a pool_header. The
|
||||
excruciating initialization code below fools C so that
|
||||
|
||||
usedpool[i+i]
|
||||
|
||||
"acts like" a genuine poolp, but only so long as you only reference its
|
||||
nextpool and prevpool members. The "- 2*sizeof(pymem_block *)" gibberish is
|
||||
compensating for that a pool_header's nextpool and prevpool members
|
||||
immediately follow a pool_header's first two members:
|
||||
|
||||
union { pymem_block *_padding;
|
||||
uint count; } ref;
|
||||
pymem_block *freeblock;
|
||||
|
||||
each of which consume sizeof(pymem_block *) bytes. So what usedpools[i+i] really
|
||||
contains is a fudged-up pointer p such that *if* C believes it's a poolp
|
||||
pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
|
||||
circular list is empty).
|
||||
|
||||
It's unclear why the usedpools setup is so convoluted. It could be to
|
||||
minimize the amount of cache required to hold this heavily-referenced table
|
||||
(which only *needs* the two interpool pointer members of a pool_header). OTOH,
|
||||
referencing code has to remember to "double the index" and doing so isn't
|
||||
free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
|
||||
on that C doesn't insert any padding anywhere in a pool_header at or before
|
||||
the prevpool member.
|
||||
**************************************************************************** */
|
||||
|
||||
#define OBMALLOC_USED_POOLS_SIZE (2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8)
|
||||
|
||||
struct _obmalloc_pools {
|
||||
poolp used[OBMALLOC_USED_POOLS_SIZE];
|
||||
};
|
||||
|
||||
|
||||
/*==========================================================================
|
||||
Arena management.
|
||||
|
||||
`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
|
||||
which may not be currently used (== they're arena_objects that aren't
|
||||
currently associated with an allocated arena). Note that arenas proper are
|
||||
separately malloc'ed.
|
||||
|
||||
Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
|
||||
we do try to free() arenas, and use some mild heuristic strategies to increase
|
||||
the likelihood that arenas eventually can be freed.
|
||||
|
||||
unused_arena_objects
|
||||
|
||||
This is a singly-linked list of the arena_objects that are currently not
|
||||
being used (no arena is associated with them). Objects are taken off the
|
||||
head of the list in new_arena(), and are pushed on the head of the list in
|
||||
PyObject_Free() when the arena is empty. Key invariant: an arena_object
|
||||
is on this list if and only if its .address member is 0.
|
||||
|
||||
usable_arenas
|
||||
|
||||
This is a doubly-linked list of the arena_objects associated with arenas
|
||||
that have pools available. These pools are either waiting to be reused,
|
||||
or have not been used before. The list is sorted to have the most-
|
||||
allocated arenas first (ascending order based on the nfreepools member).
|
||||
This means that the next allocation will come from a heavily used arena,
|
||||
which gives the nearly empty arenas a chance to be returned to the system.
|
||||
In my unscientific tests this dramatically improved the number of arenas
|
||||
that could be freed.
|
||||
|
||||
Note that an arena_object associated with an arena all of whose pools are
|
||||
currently in use isn't on either list.
|
||||
|
||||
Changed in Python 3.8: keeping usable_arenas sorted by number of free pools
|
||||
used to be done by one-at-a-time linear search when an arena's number of
|
||||
free pools changed. That could, overall, consume time quadratic in the
|
||||
number of arenas. That didn't really matter when there were only a few
|
||||
hundred arenas (typical!), but could be a timing disaster when there were
|
||||
hundreds of thousands. See bpo-37029.
|
||||
|
||||
Now we have a vector of "search fingers" to eliminate the need to search:
|
||||
nfp2lasta[nfp] returns the last ("rightmost") arena in usable_arenas
|
||||
with nfp free pools. This is NULL if and only if there is no arena with
|
||||
nfp free pools in usable_arenas.
|
||||
*/
|
||||
|
||||
/* How many arena_objects do we initially allocate?
|
||||
* 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
|
||||
* `arenas` vector.
|
||||
*/
|
||||
#define INITIAL_ARENA_OBJECTS 16
|
||||
|
||||
struct _obmalloc_mgmt {
|
||||
/* Array of objects used to track chunks of memory (arenas). */
|
||||
struct arena_object* arenas;
|
||||
/* Number of slots currently allocated in the `arenas` vector. */
|
||||
uint maxarenas;
|
||||
|
||||
/* The head of the singly-linked, NULL-terminated list of available
|
||||
* arena_objects.
|
||||
*/
|
||||
struct arena_object* unused_arena_objects;
|
||||
|
||||
/* The head of the doubly-linked, NULL-terminated at each end, list of
|
||||
* arena_objects associated with arenas that have pools available.
|
||||
*/
|
||||
struct arena_object* usable_arenas;
|
||||
|
||||
/* nfp2lasta[nfp] is the last arena in usable_arenas with nfp free pools */
|
||||
struct arena_object* nfp2lasta[MAX_POOLS_IN_ARENA + 1];
|
||||
|
||||
/* Number of arenas allocated that haven't been free()'d. */
|
||||
size_t narenas_currently_allocated;
|
||||
|
||||
/* Total number of times malloc() called to allocate an arena. */
|
||||
size_t ntimes_arena_allocated;
|
||||
/* High water mark (max value ever seen) for narenas_currently_allocated. */
|
||||
size_t narenas_highwater;
|
||||
|
||||
Py_ssize_t raw_allocated_blocks;
|
||||
};
|
||||
|
||||
|
||||
#if WITH_PYMALLOC_RADIX_TREE
|
||||
/*==========================================================================*/
|
||||
/* radix tree for tracking arena usage. If enabled, used to implement
|
||||
address_in_range().
|
||||
|
||||
memory address bit allocation for keys
|
||||
|
||||
64-bit pointers, IGNORE_BITS=0 and 2^20 arena size:
|
||||
15 -> MAP_TOP_BITS
|
||||
15 -> MAP_MID_BITS
|
||||
14 -> MAP_BOT_BITS
|
||||
20 -> ideal aligned arena
|
||||
----
|
||||
64
|
||||
|
||||
64-bit pointers, IGNORE_BITS=16, and 2^20 arena size:
|
||||
16 -> IGNORE_BITS
|
||||
10 -> MAP_TOP_BITS
|
||||
10 -> MAP_MID_BITS
|
||||
8 -> MAP_BOT_BITS
|
||||
20 -> ideal aligned arena
|
||||
----
|
||||
64
|
||||
|
||||
32-bit pointers and 2^18 arena size:
|
||||
14 -> MAP_BOT_BITS
|
||||
18 -> ideal aligned arena
|
||||
----
|
||||
32
|
||||
|
||||
*/
|
||||
|
||||
#if SIZEOF_VOID_P == 8
|
||||
|
||||
/* number of bits in a pointer */
|
||||
#define POINTER_BITS 64
|
||||
|
||||
/* High bits of memory addresses that will be ignored when indexing into the
|
||||
* radix tree. Setting this to zero is the safe default. For most 64-bit
|
||||
* machines, setting this to 16 would be safe. The kernel would not give
|
||||
* user-space virtual memory addresses that have significant information in
|
||||
* those high bits. The main advantage to setting IGNORE_BITS > 0 is that less
|
||||
* virtual memory will be used for the top and middle radix tree arrays. Those
|
||||
* arrays are allocated in the BSS segment and so will typically consume real
|
||||
* memory only if actually accessed.
|
||||
*/
|
||||
#define IGNORE_BITS 0
|
||||
|
||||
/* use the top and mid layers of the radix tree */
|
||||
#define USE_INTERIOR_NODES
|
||||
|
||||
#elif SIZEOF_VOID_P == 4
|
||||
|
||||
#define POINTER_BITS 32
|
||||
#define IGNORE_BITS 0
|
||||
|
||||
#else
|
||||
|
||||
/* Currently this code works for 64-bit or 32-bit pointers only. */
|
||||
#error "obmalloc radix tree requires 64-bit or 32-bit pointers."
|
||||
|
||||
#endif /* SIZEOF_VOID_P */
|
||||
|
||||
/* arena_coverage_t members require this to be true */
|
||||
#if ARENA_BITS >= 32
|
||||
# error "arena size must be < 2^32"
|
||||
#endif
|
||||
|
||||
/* the lower bits of the address that are not ignored */
|
||||
#define ADDRESS_BITS (POINTER_BITS - IGNORE_BITS)
|
||||
|
||||
#ifdef USE_INTERIOR_NODES
|
||||
/* number of bits used for MAP_TOP and MAP_MID nodes */
|
||||
#define INTERIOR_BITS ((ADDRESS_BITS - ARENA_BITS + 2) / 3)
|
||||
#else
|
||||
#define INTERIOR_BITS 0
|
||||
#endif
|
||||
|
||||
#define MAP_TOP_BITS INTERIOR_BITS
|
||||
#define MAP_TOP_LENGTH (1 << MAP_TOP_BITS)
|
||||
#define MAP_TOP_MASK (MAP_TOP_LENGTH - 1)
|
||||
|
||||
#define MAP_MID_BITS INTERIOR_BITS
|
||||
#define MAP_MID_LENGTH (1 << MAP_MID_BITS)
|
||||
#define MAP_MID_MASK (MAP_MID_LENGTH - 1)
|
||||
|
||||
#define MAP_BOT_BITS (ADDRESS_BITS - ARENA_BITS - 2*INTERIOR_BITS)
|
||||
#define MAP_BOT_LENGTH (1 << MAP_BOT_BITS)
|
||||
#define MAP_BOT_MASK (MAP_BOT_LENGTH - 1)
|
||||
|
||||
#define MAP_BOT_SHIFT ARENA_BITS
|
||||
#define MAP_MID_SHIFT (MAP_BOT_BITS + MAP_BOT_SHIFT)
|
||||
#define MAP_TOP_SHIFT (MAP_MID_BITS + MAP_MID_SHIFT)
|
||||
|
||||
#define AS_UINT(p) ((uintptr_t)(p))
|
||||
#define MAP_BOT_INDEX(p) ((AS_UINT(p) >> MAP_BOT_SHIFT) & MAP_BOT_MASK)
|
||||
#define MAP_MID_INDEX(p) ((AS_UINT(p) >> MAP_MID_SHIFT) & MAP_MID_MASK)
|
||||
#define MAP_TOP_INDEX(p) ((AS_UINT(p) >> MAP_TOP_SHIFT) & MAP_TOP_MASK)
|
||||
|
||||
#if IGNORE_BITS > 0
|
||||
/* Return the ignored part of the pointer address. Those bits should be same
|
||||
* for all valid pointers if IGNORE_BITS is set correctly.
|
||||
*/
|
||||
#define HIGH_BITS(p) (AS_UINT(p) >> ADDRESS_BITS)
|
||||
#else
|
||||
#define HIGH_BITS(p) 0
|
||||
#endif
|
||||
|
||||
|
||||
/* This is the leaf of the radix tree. See arena_map_mark_used() for the
|
||||
* meaning of these members. */
|
||||
typedef struct {
|
||||
int32_t tail_hi;
|
||||
int32_t tail_lo;
|
||||
} arena_coverage_t;
|
||||
|
||||
typedef struct arena_map_bot {
|
||||
/* The members tail_hi and tail_lo are accessed together. So, it
|
||||
* better to have them as an array of structs, rather than two
|
||||
* arrays.
|
||||
*/
|
||||
arena_coverage_t arenas[MAP_BOT_LENGTH];
|
||||
} arena_map_bot_t;
|
||||
|
||||
#ifdef USE_INTERIOR_NODES
|
||||
typedef struct arena_map_mid {
|
||||
struct arena_map_bot *ptrs[MAP_MID_LENGTH];
|
||||
} arena_map_mid_t;
|
||||
|
||||
typedef struct arena_map_top {
|
||||
struct arena_map_mid *ptrs[MAP_TOP_LENGTH];
|
||||
} arena_map_top_t;
|
||||
#endif
|
||||
|
||||
struct _obmalloc_usage {
|
||||
/* The root of radix tree. Note that by initializing like this, the memory
|
||||
* should be in the BSS. The OS will only memory map pages as the MAP_MID
|
||||
* nodes get used (OS pages are demand loaded as needed).
|
||||
*/
|
||||
#ifdef USE_INTERIOR_NODES
|
||||
arena_map_top_t arena_map_root;
|
||||
/* accounting for number of used interior nodes */
|
||||
int arena_map_mid_count;
|
||||
int arena_map_bot_count;
|
||||
#else
|
||||
arena_map_bot_t arena_map_root;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* WITH_PYMALLOC_RADIX_TREE */
|
||||
|
||||
|
||||
struct _obmalloc_global_state {
|
||||
int dump_debug_stats;
|
||||
Py_ssize_t interpreter_leaks;
|
||||
};
|
||||
|
||||
struct _obmalloc_state {
|
||||
struct _obmalloc_pools pools;
|
||||
struct _obmalloc_mgmt mgmt;
|
||||
#if WITH_PYMALLOC_RADIX_TREE
|
||||
struct _obmalloc_usage usage;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
#undef uint
|
||||
|
||||
|
||||
/* Allocate memory directly from the O/S virtual memory system,
|
||||
* where supported. Otherwise fallback on malloc */
|
||||
void *_PyObject_VirtualAlloc(size_t size);
|
||||
void _PyObject_VirtualFree(void *, size_t size);
|
||||
|
||||
|
||||
/* This function returns the number of allocated memory blocks, regardless of size */
|
||||
extern Py_ssize_t _Py_GetGlobalAllocatedBlocks(void);
|
||||
#define _Py_GetAllocatedBlocks() \
|
||||
_Py_GetGlobalAllocatedBlocks()
|
||||
extern Py_ssize_t _PyInterpreterState_GetAllocatedBlocks(PyInterpreterState *);
|
||||
extern void _PyInterpreterState_FinalizeAllocatedBlocks(PyInterpreterState *);
|
||||
extern int _PyMem_init_obmalloc(PyInterpreterState *interp);
|
||||
extern bool _PyMem_obmalloc_state_on_heap(PyInterpreterState *interp);
|
||||
|
||||
|
||||
#ifdef WITH_PYMALLOC
|
||||
// Export the symbol for the 3rd party 'guppy3' project
|
||||
PyAPI_FUNC(int) _PyObject_DebugMallocStats(FILE *out);
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_OBMALLOC_H
|
||||
66
Dependencies/Python/include/internal/pycore_obmalloc_init.h
vendored
Normal file
66
Dependencies/Python/include/internal/pycore_obmalloc_init.h
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
#ifndef Py_INTERNAL_OBMALLOC_INIT_H
|
||||
#define Py_INTERNAL_OBMALLOC_INIT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
/****************************************************/
|
||||
/* the default object allocator's state initializer */
|
||||
|
||||
#define PTA(pools, x) \
|
||||
((poolp )((uint8_t *)&(pools.used[2*(x)]) - 2*sizeof(pymem_block *)))
|
||||
#define PT(p, x) PTA(p, x), PTA(p, x)
|
||||
|
||||
#define PT_8(p, start) \
|
||||
PT(p, start), \
|
||||
PT(p, start+1), \
|
||||
PT(p, start+2), \
|
||||
PT(p, start+3), \
|
||||
PT(p, start+4), \
|
||||
PT(p, start+5), \
|
||||
PT(p, start+6), \
|
||||
PT(p, start+7)
|
||||
|
||||
#if NB_SMALL_SIZE_CLASSES <= 8
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0) }
|
||||
#elif NB_SMALL_SIZE_CLASSES <= 16
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0), PT_8(p, 8) }
|
||||
#elif NB_SMALL_SIZE_CLASSES <= 24
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16) }
|
||||
#elif NB_SMALL_SIZE_CLASSES <= 32
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24) }
|
||||
#elif NB_SMALL_SIZE_CLASSES <= 40
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32) }
|
||||
#elif NB_SMALL_SIZE_CLASSES <= 48
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40) }
|
||||
#elif NB_SMALL_SIZE_CLASSES <= 56
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40), PT_8(p, 48) }
|
||||
#elif NB_SMALL_SIZE_CLASSES <= 64
|
||||
# define _obmalloc_pools_INIT(p) \
|
||||
{ PT_8(p, 0), PT_8(p, 8), PT_8(p, 16), PT_8(p, 24), PT_8(p, 32), PT_8(p, 40), PT_8(p, 48), PT_8(p, 56) }
|
||||
#else
|
||||
# error "NB_SMALL_SIZE_CLASSES should be less than 64"
|
||||
#endif
|
||||
|
||||
#define _obmalloc_global_state_INIT \
|
||||
{ \
|
||||
.dump_debug_stats = -1, \
|
||||
}
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_OBMALLOC_INIT_H
|
||||
1922
Dependencies/Python/include/internal/pycore_opcode_metadata.h
vendored
Normal file
1922
Dependencies/Python/include/internal/pycore_opcode_metadata.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
73
Dependencies/Python/include/internal/pycore_opcode_utils.h
vendored
Normal file
73
Dependencies/Python/include/internal/pycore_opcode_utils.h
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
#ifndef Py_INTERNAL_OPCODE_UTILS_H
|
||||
#define Py_INTERNAL_OPCODE_UTILS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "opcode_ids.h"
|
||||
|
||||
#define MAX_REAL_OPCODE 254
|
||||
|
||||
#define IS_WITHIN_OPCODE_RANGE(opcode) \
|
||||
(((opcode) >= 0 && (opcode) <= MAX_REAL_OPCODE) || \
|
||||
IS_PSEUDO_INSTR(opcode))
|
||||
|
||||
#define IS_BLOCK_PUSH_OPCODE(opcode) \
|
||||
((opcode) == SETUP_FINALLY || \
|
||||
(opcode) == SETUP_WITH || \
|
||||
(opcode) == SETUP_CLEANUP)
|
||||
|
||||
#define HAS_TARGET(opcode) \
|
||||
(OPCODE_HAS_JUMP(opcode) || IS_BLOCK_PUSH_OPCODE(opcode))
|
||||
|
||||
/* opcodes that must be last in the basicblock */
|
||||
#define IS_TERMINATOR_OPCODE(opcode) \
|
||||
(OPCODE_HAS_JUMP(opcode) || IS_SCOPE_EXIT_OPCODE(opcode))
|
||||
|
||||
/* opcodes which are not emitted in codegen stage, only by the assembler */
|
||||
#define IS_ASSEMBLER_OPCODE(opcode) \
|
||||
((opcode) == JUMP_FORWARD || \
|
||||
(opcode) == JUMP_BACKWARD || \
|
||||
(opcode) == JUMP_BACKWARD_NO_INTERRUPT)
|
||||
|
||||
#define IS_BACKWARDS_JUMP_OPCODE(opcode) \
|
||||
((opcode) == JUMP_BACKWARD || \
|
||||
(opcode) == JUMP_BACKWARD_NO_INTERRUPT)
|
||||
|
||||
#define IS_UNCONDITIONAL_JUMP_OPCODE(opcode) \
|
||||
((opcode) == JUMP || \
|
||||
(opcode) == JUMP_NO_INTERRUPT || \
|
||||
(opcode) == JUMP_FORWARD || \
|
||||
(opcode) == JUMP_BACKWARD || \
|
||||
(opcode) == JUMP_BACKWARD_NO_INTERRUPT)
|
||||
|
||||
#define IS_SCOPE_EXIT_OPCODE(opcode) \
|
||||
((opcode) == RETURN_VALUE || \
|
||||
(opcode) == RETURN_CONST || \
|
||||
(opcode) == RAISE_VARARGS || \
|
||||
(opcode) == RERAISE)
|
||||
|
||||
|
||||
/* Flags used in the oparg for MAKE_FUNCTION */
|
||||
#define MAKE_FUNCTION_DEFAULTS 0x01
|
||||
#define MAKE_FUNCTION_KWDEFAULTS 0x02
|
||||
#define MAKE_FUNCTION_ANNOTATIONS 0x04
|
||||
#define MAKE_FUNCTION_CLOSURE 0x08
|
||||
|
||||
/* Values used in the oparg for RESUME */
|
||||
#define RESUME_AT_FUNC_START 0
|
||||
#define RESUME_AFTER_YIELD 1
|
||||
#define RESUME_AFTER_YIELD_FROM 2
|
||||
#define RESUME_AFTER_AWAIT 3
|
||||
|
||||
#define RESUME_OPARG_LOCATION_MASK 0x3
|
||||
#define RESUME_OPARG_DEPTH1_MASK 0x4
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_OPCODE_UTILS_H */
|
||||
272
Dependencies/Python/include/internal/pycore_optimizer.h
vendored
Normal file
272
Dependencies/Python/include/internal/pycore_optimizer.h
vendored
Normal file
@@ -0,0 +1,272 @@
|
||||
#ifndef Py_INTERNAL_OPTIMIZER_H
|
||||
#define Py_INTERNAL_OPTIMIZER_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_uop_ids.h"
|
||||
#include <stdbool.h>
|
||||
|
||||
|
||||
typedef struct _PyExecutorLinkListNode {
|
||||
struct _PyExecutorObject *next;
|
||||
struct _PyExecutorObject *previous;
|
||||
} _PyExecutorLinkListNode;
|
||||
|
||||
|
||||
/* Bloom filter with m = 256
|
||||
* https://en.wikipedia.org/wiki/Bloom_filter */
|
||||
#define BLOOM_FILTER_WORDS 8
|
||||
|
||||
typedef struct _bloom_filter {
|
||||
uint32_t bits[BLOOM_FILTER_WORDS];
|
||||
} _PyBloomFilter;
|
||||
|
||||
typedef struct {
|
||||
uint8_t opcode;
|
||||
uint8_t oparg;
|
||||
uint8_t valid;
|
||||
uint8_t linked;
|
||||
int index; // Index of ENTER_EXECUTOR (if code isn't NULL, below).
|
||||
_PyBloomFilter bloom;
|
||||
_PyExecutorLinkListNode links;
|
||||
PyCodeObject *code; // Weak (NULL if no corresponding ENTER_EXECUTOR).
|
||||
} _PyVMData;
|
||||
|
||||
#define UOP_FORMAT_TARGET 0
|
||||
#define UOP_FORMAT_EXIT 1
|
||||
#define UOP_FORMAT_JUMP 2
|
||||
#define UOP_FORMAT_UNUSED 3
|
||||
|
||||
/* Depending on the format,
|
||||
* the 32 bits between the oparg and operand are:
|
||||
* UOP_FORMAT_TARGET:
|
||||
* uint32_t target;
|
||||
* UOP_FORMAT_EXIT
|
||||
* uint16_t exit_index;
|
||||
* uint16_t error_target;
|
||||
* UOP_FORMAT_JUMP
|
||||
* uint16_t jump_target;
|
||||
* uint16_t error_target;
|
||||
*/
|
||||
typedef struct {
|
||||
uint16_t opcode:14;
|
||||
uint16_t format:2;
|
||||
uint16_t oparg;
|
||||
union {
|
||||
uint32_t target;
|
||||
struct {
|
||||
union {
|
||||
uint16_t exit_index;
|
||||
uint16_t jump_target;
|
||||
};
|
||||
uint16_t error_target;
|
||||
};
|
||||
};
|
||||
uint64_t operand; // A cache entry
|
||||
} _PyUOpInstruction;
|
||||
|
||||
static inline uint32_t uop_get_target(const _PyUOpInstruction *inst)
|
||||
{
|
||||
assert(inst->format == UOP_FORMAT_TARGET);
|
||||
return inst->target;
|
||||
}
|
||||
|
||||
static inline uint16_t uop_get_exit_index(const _PyUOpInstruction *inst)
|
||||
{
|
||||
assert(inst->format == UOP_FORMAT_EXIT);
|
||||
return inst->exit_index;
|
||||
}
|
||||
|
||||
static inline uint16_t uop_get_jump_target(const _PyUOpInstruction *inst)
|
||||
{
|
||||
assert(inst->format == UOP_FORMAT_JUMP);
|
||||
return inst->jump_target;
|
||||
}
|
||||
|
||||
static inline uint16_t uop_get_error_target(const _PyUOpInstruction *inst)
|
||||
{
|
||||
assert(inst->format != UOP_FORMAT_TARGET);
|
||||
return inst->error_target;
|
||||
}
|
||||
|
||||
typedef struct _exit_data {
|
||||
uint32_t target;
|
||||
_Py_BackoffCounter temperature;
|
||||
const struct _PyExecutorObject *executor;
|
||||
} _PyExitData;
|
||||
|
||||
typedef struct _PyExecutorObject {
|
||||
PyObject_VAR_HEAD
|
||||
const _PyUOpInstruction *trace;
|
||||
_PyVMData vm_data; /* Used by the VM, but opaque to the optimizer */
|
||||
uint32_t exit_count;
|
||||
uint32_t code_size;
|
||||
size_t jit_size;
|
||||
void *jit_code;
|
||||
void *jit_side_entry;
|
||||
_PyExitData exits[1];
|
||||
} _PyExecutorObject;
|
||||
|
||||
typedef struct _PyOptimizerObject _PyOptimizerObject;
|
||||
|
||||
/* Should return > 0 if a new executor is created. O if no executor is produced and < 0 if an error occurred. */
|
||||
typedef int (*optimize_func)(
|
||||
_PyOptimizerObject* self, struct _PyInterpreterFrame *frame,
|
||||
_Py_CODEUNIT *instr, _PyExecutorObject **exec_ptr,
|
||||
int curr_stackentries);
|
||||
|
||||
struct _PyOptimizerObject {
|
||||
PyObject_HEAD
|
||||
optimize_func optimize;
|
||||
/* Data needed by the optimizer goes here, but is opaque to the VM */
|
||||
};
|
||||
|
||||
/** Test support **/
|
||||
typedef struct {
|
||||
_PyOptimizerObject base;
|
||||
int64_t count;
|
||||
} _PyCounterOptimizerObject;
|
||||
|
||||
_PyOptimizerObject *_Py_SetOptimizer(PyInterpreterState *interp, _PyOptimizerObject* optimizer);
|
||||
|
||||
PyAPI_FUNC(int) _Py_SetTier2Optimizer(_PyOptimizerObject* optimizer);
|
||||
|
||||
PyAPI_FUNC(_PyOptimizerObject *) _Py_GetOptimizer(void);
|
||||
|
||||
PyAPI_FUNC(_PyExecutorObject *) _Py_GetExecutor(PyCodeObject *code, int offset);
|
||||
|
||||
void _Py_ExecutorInit(_PyExecutorObject *, const _PyBloomFilter *);
|
||||
void _Py_ExecutorDetach(_PyExecutorObject *);
|
||||
void _Py_BloomFilter_Init(_PyBloomFilter *);
|
||||
void _Py_BloomFilter_Add(_PyBloomFilter *bloom, void *obj);
|
||||
PyAPI_FUNC(void) _Py_Executor_DependsOn(_PyExecutorObject *executor, void *obj);
|
||||
/* For testing */
|
||||
PyAPI_FUNC(PyObject *) _PyOptimizer_NewCounter(void);
|
||||
PyAPI_FUNC(PyObject *) _PyOptimizer_NewUOpOptimizer(void);
|
||||
|
||||
#define _Py_MAX_ALLOWED_BUILTINS_MODIFICATIONS 3
|
||||
#define _Py_MAX_ALLOWED_GLOBALS_MODIFICATIONS 6
|
||||
|
||||
#ifdef _Py_TIER2
|
||||
PyAPI_FUNC(void) _Py_Executors_InvalidateDependency(PyInterpreterState *interp, void *obj, int is_invalidation);
|
||||
PyAPI_FUNC(void) _Py_Executors_InvalidateAll(PyInterpreterState *interp, int is_invalidation);
|
||||
#else
|
||||
# define _Py_Executors_InvalidateDependency(A, B, C) ((void)0)
|
||||
# define _Py_Executors_InvalidateAll(A, B) ((void)0)
|
||||
#endif
|
||||
|
||||
|
||||
// This is the length of the trace we project initially.
|
||||
#define UOP_MAX_TRACE_LENGTH 800
|
||||
|
||||
#define TRACE_STACK_SIZE 5
|
||||
|
||||
int _Py_uop_analyze_and_optimize(struct _PyInterpreterFrame *frame,
|
||||
_PyUOpInstruction *trace, int trace_len, int curr_stackentries,
|
||||
_PyBloomFilter *dependencies);
|
||||
|
||||
extern PyTypeObject _PyCounterExecutor_Type;
|
||||
extern PyTypeObject _PyCounterOptimizer_Type;
|
||||
extern PyTypeObject _PyDefaultOptimizer_Type;
|
||||
extern PyTypeObject _PyUOpExecutor_Type;
|
||||
extern PyTypeObject _PyUOpOptimizer_Type;
|
||||
|
||||
/* Symbols */
|
||||
/* See explanation in optimizer_symbols.c */
|
||||
|
||||
struct _Py_UopsSymbol {
|
||||
int flags; // 0 bits: Top; 2 or more bits: Bottom
|
||||
PyTypeObject *typ; // Borrowed reference
|
||||
PyObject *const_val; // Owned reference (!)
|
||||
};
|
||||
|
||||
// Holds locals, stack, locals, stack ... co_consts (in that order)
|
||||
#define MAX_ABSTRACT_INTERP_SIZE 4096
|
||||
|
||||
#define TY_ARENA_SIZE (UOP_MAX_TRACE_LENGTH * 5)
|
||||
|
||||
// Need extras for root frame and for overflow frame (see TRACE_STACK_PUSH())
|
||||
#define MAX_ABSTRACT_FRAME_DEPTH (TRACE_STACK_SIZE + 2)
|
||||
|
||||
typedef struct _Py_UopsSymbol _Py_UopsSymbol;
|
||||
|
||||
struct _Py_UOpsAbstractFrame {
|
||||
// Max stacklen
|
||||
int stack_len;
|
||||
int locals_len;
|
||||
|
||||
_Py_UopsSymbol **stack_pointer;
|
||||
_Py_UopsSymbol **stack;
|
||||
_Py_UopsSymbol **locals;
|
||||
};
|
||||
|
||||
typedef struct _Py_UOpsAbstractFrame _Py_UOpsAbstractFrame;
|
||||
|
||||
typedef struct ty_arena {
|
||||
int ty_curr_number;
|
||||
int ty_max_number;
|
||||
_Py_UopsSymbol arena[TY_ARENA_SIZE];
|
||||
} ty_arena;
|
||||
|
||||
struct _Py_UOpsContext {
|
||||
PyObject_HEAD
|
||||
// The current "executing" frame.
|
||||
_Py_UOpsAbstractFrame *frame;
|
||||
_Py_UOpsAbstractFrame frames[MAX_ABSTRACT_FRAME_DEPTH];
|
||||
int curr_frame_depth;
|
||||
|
||||
// Arena for the symbolic types.
|
||||
ty_arena t_arena;
|
||||
|
||||
_Py_UopsSymbol **n_consumed;
|
||||
_Py_UopsSymbol **limit;
|
||||
_Py_UopsSymbol *locals_and_stack[MAX_ABSTRACT_INTERP_SIZE];
|
||||
};
|
||||
|
||||
typedef struct _Py_UOpsContext _Py_UOpsContext;
|
||||
|
||||
extern bool _Py_uop_sym_is_null(_Py_UopsSymbol *sym);
|
||||
extern bool _Py_uop_sym_is_not_null(_Py_UopsSymbol *sym);
|
||||
extern bool _Py_uop_sym_is_const(_Py_UopsSymbol *sym);
|
||||
extern PyObject *_Py_uop_sym_get_const(_Py_UopsSymbol *sym);
|
||||
extern _Py_UopsSymbol *_Py_uop_sym_new_unknown(_Py_UOpsContext *ctx);
|
||||
extern _Py_UopsSymbol *_Py_uop_sym_new_not_null(_Py_UOpsContext *ctx);
|
||||
extern _Py_UopsSymbol *_Py_uop_sym_new_type(
|
||||
_Py_UOpsContext *ctx, PyTypeObject *typ);
|
||||
extern _Py_UopsSymbol *_Py_uop_sym_new_const(_Py_UOpsContext *ctx, PyObject *const_val);
|
||||
extern _Py_UopsSymbol *_Py_uop_sym_new_null(_Py_UOpsContext *ctx);
|
||||
extern bool _Py_uop_sym_has_type(_Py_UopsSymbol *sym);
|
||||
extern bool _Py_uop_sym_matches_type(_Py_UopsSymbol *sym, PyTypeObject *typ);
|
||||
extern bool _Py_uop_sym_set_null(_Py_UopsSymbol *sym);
|
||||
extern bool _Py_uop_sym_set_non_null(_Py_UopsSymbol *sym);
|
||||
extern bool _Py_uop_sym_set_type(_Py_UopsSymbol *sym, PyTypeObject *typ);
|
||||
extern bool _Py_uop_sym_set_const(_Py_UopsSymbol *sym, PyObject *const_val);
|
||||
extern bool _Py_uop_sym_is_bottom(_Py_UopsSymbol *sym);
|
||||
extern int _Py_uop_sym_truthiness(_Py_UopsSymbol *sym);
|
||||
extern PyTypeObject *_Py_uop_sym_get_type(_Py_UopsSymbol *sym);
|
||||
|
||||
|
||||
extern int _Py_uop_abstractcontext_init(_Py_UOpsContext *ctx);
|
||||
extern void _Py_uop_abstractcontext_fini(_Py_UOpsContext *ctx);
|
||||
|
||||
extern _Py_UOpsAbstractFrame *_Py_uop_frame_new(
|
||||
_Py_UOpsContext *ctx,
|
||||
PyCodeObject *co,
|
||||
int curr_stackentries,
|
||||
_Py_UopsSymbol **args,
|
||||
int arg_len);
|
||||
extern int _Py_uop_frame_pop(_Py_UOpsContext *ctx);
|
||||
|
||||
PyAPI_FUNC(PyObject *) _Py_uop_symbols_test(PyObject *self, PyObject *ignored);
|
||||
|
||||
PyAPI_FUNC(int) _PyOptimizer_Optimize(struct _PyInterpreterFrame *frame, _Py_CODEUNIT *start, PyObject **stack_pointer, _PyExecutorObject **exec_ptr);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_OPTIMIZER_H */
|
||||
97
Dependencies/Python/include/internal/pycore_parking_lot.h
vendored
Normal file
97
Dependencies/Python/include/internal/pycore_parking_lot.h
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
// ParkingLot is an internal API for building efficient synchronization
|
||||
// primitives like mutexes and events.
|
||||
//
|
||||
// The API and name is inspired by WebKit's WTF::ParkingLot, which in turn
|
||||
// is inspired Linux's futex API.
|
||||
// See https://webkit.org/blog/6161/locking-in-webkit/.
|
||||
//
|
||||
// The core functionality is an atomic "compare-and-sleep" operation along with
|
||||
// an atomic "wake-up" operation.
|
||||
|
||||
#ifndef Py_INTERNAL_PARKING_LOT_H
|
||||
#define Py_INTERNAL_PARKING_LOT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
enum {
|
||||
// The thread was unparked by another thread.
|
||||
Py_PARK_OK = 0,
|
||||
|
||||
// The value of `address` did not match `expected`.
|
||||
Py_PARK_AGAIN = -1,
|
||||
|
||||
// The thread was unparked due to a timeout.
|
||||
Py_PARK_TIMEOUT = -2,
|
||||
|
||||
// The thread was interrupted by a signal.
|
||||
Py_PARK_INTR = -3,
|
||||
};
|
||||
|
||||
// Checks that `*address == *expected` and puts the thread to sleep until an
|
||||
// unpark operation is called on the same `address`. Otherwise, the function
|
||||
// returns `Py_PARK_AGAIN`. The comparison behaves like memcmp, but is
|
||||
// performed atomically with respect to unpark operations.
|
||||
//
|
||||
// The `address_size` argument is the size of the data pointed to by the
|
||||
// `address` and `expected` pointers (i.e., sizeof(*address)). It must be
|
||||
// 1, 2, 4, or 8.
|
||||
//
|
||||
// The `timeout_ns` argument specifies the maximum amount of time to wait, with
|
||||
// -1 indicating an infinite wait.
|
||||
//
|
||||
// `park_arg`, which can be NULL, is passed to the unpark operation.
|
||||
//
|
||||
// If `detach` is true, then the thread will detach/release the GIL while
|
||||
// waiting.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// if (_Py_atomic_compare_exchange_uint8(address, &expected, new_value)) {
|
||||
// int res = _PyParkingLot_Park(address, &new_value, sizeof(*address),
|
||||
// timeout_ns, NULL, 1);
|
||||
// ...
|
||||
// }
|
||||
PyAPI_FUNC(int)
|
||||
_PyParkingLot_Park(const void *address, const void *expected,
|
||||
size_t address_size, PyTime_t timeout_ns,
|
||||
void *park_arg, int detach);
|
||||
|
||||
// Callback for _PyParkingLot_Unpark:
|
||||
//
|
||||
// `arg` is the data of the same name provided to the _PyParkingLot_Unpark()
|
||||
// call.
|
||||
// `park_arg` is the data provided to _PyParkingLot_Park() call or NULL if
|
||||
// no waiting thread was found.
|
||||
// `has_more_waiters` is true if there are more threads waiting on the same
|
||||
// address. May be true in cases where threads are waiting on a different
|
||||
// address that map to the same internal bucket.
|
||||
typedef void _Py_unpark_fn_t(void *arg, void *park_arg, int has_more_waiters);
|
||||
|
||||
// Unparks a single thread waiting on `address`.
|
||||
//
|
||||
// Note that fn() is called regardless of whether a thread was unparked. If
|
||||
// no threads are waiting on `address` then the `park_arg` argument to fn()
|
||||
// will be NULL.
|
||||
//
|
||||
// Example usage:
|
||||
// void callback(void *arg, void *park_arg, int has_more_waiters);
|
||||
// _PyParkingLot_Unpark(address, &callback, arg);
|
||||
PyAPI_FUNC(void)
|
||||
_PyParkingLot_Unpark(const void *address, _Py_unpark_fn_t *fn, void *arg);
|
||||
|
||||
// Unparks all threads waiting on `address`.
|
||||
PyAPI_FUNC(void) _PyParkingLot_UnparkAll(const void *address);
|
||||
|
||||
// Resets the parking lot state after a fork. Forgets all parked threads.
|
||||
PyAPI_FUNC(void) _PyParkingLot_AfterFork(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PARKING_LOT_H */
|
||||
95
Dependencies/Python/include/internal/pycore_parser.h
vendored
Normal file
95
Dependencies/Python/include/internal/pycore_parser.h
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
#ifndef Py_INTERNAL_PARSER_H
|
||||
#define Py_INTERNAL_PARSER_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
#include "pycore_ast.h" // struct _expr
|
||||
#include "pycore_global_strings.h" // _Py_DECLARE_STR()
|
||||
#include "pycore_pyarena.h" // PyArena
|
||||
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
#define _PYPEGEN_NSTATISTICS 2000
|
||||
#endif
|
||||
|
||||
struct _parser_runtime_state {
|
||||
#ifdef Py_DEBUG
|
||||
long memo_statistics[_PYPEGEN_NSTATISTICS];
|
||||
#ifdef Py_GIL_DISABLED
|
||||
PyMutex mutex;
|
||||
#endif
|
||||
#else
|
||||
int _not_used;
|
||||
#endif
|
||||
struct _expr dummy_name;
|
||||
};
|
||||
|
||||
_Py_DECLARE_STR(empty, "")
|
||||
#if defined(Py_DEBUG) && defined(Py_GIL_DISABLED)
|
||||
#define _parser_runtime_state_INIT \
|
||||
{ \
|
||||
.mutex = {0}, \
|
||||
.dummy_name = { \
|
||||
.kind = Name_kind, \
|
||||
.v.Name.id = &_Py_STR(empty), \
|
||||
.v.Name.ctx = Load, \
|
||||
.lineno = 1, \
|
||||
.col_offset = 0, \
|
||||
.end_lineno = 1, \
|
||||
.end_col_offset = 0, \
|
||||
}, \
|
||||
}
|
||||
#else
|
||||
#define _parser_runtime_state_INIT \
|
||||
{ \
|
||||
.dummy_name = { \
|
||||
.kind = Name_kind, \
|
||||
.v.Name.id = &_Py_STR(empty), \
|
||||
.v.Name.ctx = Load, \
|
||||
.lineno = 1, \
|
||||
.col_offset = 0, \
|
||||
.end_lineno = 1, \
|
||||
.end_col_offset = 0, \
|
||||
}, \
|
||||
}
|
||||
#endif
|
||||
|
||||
extern struct _mod* _PyParser_ASTFromString(
|
||||
const char *str,
|
||||
PyObject* filename,
|
||||
int mode,
|
||||
PyCompilerFlags *flags,
|
||||
PyArena *arena);
|
||||
|
||||
extern struct _mod* _PyParser_ASTFromFile(
|
||||
FILE *fp,
|
||||
PyObject *filename_ob,
|
||||
const char *enc,
|
||||
int mode,
|
||||
const char *ps1,
|
||||
const char *ps2,
|
||||
PyCompilerFlags *flags,
|
||||
int *errcode,
|
||||
PyArena *arena);
|
||||
extern struct _mod* _PyParser_InteractiveASTFromFile(
|
||||
FILE *fp,
|
||||
PyObject *filename_ob,
|
||||
const char *enc,
|
||||
int mode,
|
||||
const char *ps1,
|
||||
const char *ps2,
|
||||
PyCompilerFlags *flags,
|
||||
int *errcode,
|
||||
PyObject **interactive_src,
|
||||
PyArena *arena);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PARSER_H */
|
||||
26
Dependencies/Python/include/internal/pycore_pathconfig.h
vendored
Normal file
26
Dependencies/Python/include/internal/pycore_pathconfig.h
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
#ifndef Py_INTERNAL_PATHCONFIG_H
|
||||
#define Py_INTERNAL_PATHCONFIG_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(void) _PyPathConfig_ClearGlobal(void);
|
||||
|
||||
extern PyStatus _PyPathConfig_ReadGlobal(PyConfig *config);
|
||||
extern PyStatus _PyPathConfig_UpdateGlobal(const PyConfig *config);
|
||||
extern const wchar_t * _PyPathConfig_GetGlobalModuleSearchPath(void);
|
||||
|
||||
extern int _PyPathConfig_ComputeSysPath0(
|
||||
const PyWideStringList *argv,
|
||||
PyObject **path0);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PATHCONFIG_H */
|
||||
68
Dependencies/Python/include/internal/pycore_pyarena.h
vendored
Normal file
68
Dependencies/Python/include/internal/pycore_pyarena.h
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
// An arena-like memory interface for the compiler.
|
||||
|
||||
#ifndef Py_INTERNAL_PYARENA_H
|
||||
#define Py_INTERNAL_PYARENA_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
typedef struct _arena PyArena;
|
||||
|
||||
// _PyArena_New() and _PyArena_Free() create a new arena and free it,
|
||||
// respectively. Once an arena has been created, it can be used
|
||||
// to allocate memory via _PyArena_Malloc(). Pointers to PyObject can
|
||||
// also be registered with the arena via _PyArena_AddPyObject(), and the
|
||||
// arena will ensure that the PyObjects stay alive at least until
|
||||
// _PyArena_Free() is called. When an arena is freed, all the memory it
|
||||
// allocated is freed, the arena releases internal references to registered
|
||||
// PyObject*, and none of its pointers are valid.
|
||||
// XXX (tim) What does "none of its pointers are valid" mean? Does it
|
||||
// XXX mean that pointers previously obtained via _PyArena_Malloc() are
|
||||
// XXX no longer valid? (That's clearly true, but not sure that's what
|
||||
// XXX the text is trying to say.)
|
||||
//
|
||||
// _PyArena_New() returns an arena pointer. On error, it
|
||||
// returns a negative number and sets an exception.
|
||||
// XXX (tim): Not true. On error, _PyArena_New() actually returns NULL,
|
||||
// XXX and looks like it may or may not set an exception (e.g., if the
|
||||
// XXX internal PyList_New(0) returns NULL, _PyArena_New() passes that on
|
||||
// XXX and an exception is set; OTOH, if the internal
|
||||
// XXX block_new(DEFAULT_BLOCK_SIZE) returns NULL, that's passed on but
|
||||
// XXX an exception is not set in that case).
|
||||
//
|
||||
// Export for test_peg_generator
|
||||
PyAPI_FUNC(PyArena*) _PyArena_New(void);
|
||||
|
||||
// Export for test_peg_generator
|
||||
PyAPI_FUNC(void) _PyArena_Free(PyArena *);
|
||||
|
||||
// Mostly like malloc(), return the address of a block of memory spanning
|
||||
// `size` bytes, or return NULL (without setting an exception) if enough
|
||||
// new memory can't be obtained. Unlike malloc(0), _PyArena_Malloc() with
|
||||
// size=0 does not guarantee to return a unique pointer (the pointer
|
||||
// returned may equal one or more other pointers obtained from
|
||||
// _PyArena_Malloc()).
|
||||
// Note that pointers obtained via _PyArena_Malloc() must never be passed to
|
||||
// the system free() or realloc(), or to any of Python's similar memory-
|
||||
// management functions. _PyArena_Malloc()-obtained pointers remain valid
|
||||
// until _PyArena_Free(ar) is called, at which point all pointers obtained
|
||||
// from the arena `ar` become invalid simultaneously.
|
||||
//
|
||||
// Export for test_peg_generator
|
||||
PyAPI_FUNC(void*) _PyArena_Malloc(PyArena *, size_t size);
|
||||
|
||||
// This routine isn't a proper arena allocation routine. It takes
|
||||
// a PyObject* and records it so that it can be DECREFed when the
|
||||
// arena is freed.
|
||||
//
|
||||
// Export for test_peg_generator
|
||||
PyAPI_FUNC(int) _PyArena_AddPyObject(PyArena *, PyObject *);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PYARENA_H */
|
||||
165
Dependencies/Python/include/internal/pycore_pyatomic_ft_wrappers.h
vendored
Normal file
165
Dependencies/Python/include/internal/pycore_pyatomic_ft_wrappers.h
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
// This header file provides wrappers around the atomic operations found in
|
||||
// `pyatomic.h` that are only atomic in free-threaded builds.
|
||||
//
|
||||
// These are intended to be used in places where atomics are required in
|
||||
// free-threaded builds, but not in the default build, and we don't want to
|
||||
// introduce the potential performance overhead of an atomic operation in the
|
||||
// default build.
|
||||
//
|
||||
// All usages of these macros should be replaced with unconditionally atomic or
|
||||
// non-atomic versions, and this file should be removed, once the dust settles
|
||||
// on free threading.
|
||||
#ifndef Py_ATOMIC_FT_WRAPPERS_H
|
||||
#define Py_ATOMIC_FT_WRAPPERS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
#error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
#define FT_ATOMIC_LOAD_PTR(value) _Py_atomic_load_ptr(&value)
|
||||
#define FT_ATOMIC_STORE_PTR(value, new_value) _Py_atomic_store_ptr(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_SSIZE(value) _Py_atomic_load_ssize(&value)
|
||||
#define FT_ATOMIC_LOAD_SSIZE_ACQUIRE(value) \
|
||||
_Py_atomic_load_ssize_acquire(&value)
|
||||
#define FT_ATOMIC_LOAD_SSIZE_RELAXED(value) \
|
||||
_Py_atomic_load_ssize_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_PTR(value, new_value) \
|
||||
_Py_atomic_store_ptr(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_PTR_ACQUIRE(value) \
|
||||
_Py_atomic_load_ptr_acquire(&value)
|
||||
#define FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(value) \
|
||||
_Py_atomic_load_uintptr_acquire(&value)
|
||||
#define FT_ATOMIC_LOAD_PTR_RELAXED(value) \
|
||||
_Py_atomic_load_ptr_relaxed(&value)
|
||||
#define FT_ATOMIC_LOAD_UINT8(value) \
|
||||
_Py_atomic_load_uint8(&value)
|
||||
#define FT_ATOMIC_STORE_UINT8(value, new_value) \
|
||||
_Py_atomic_store_uint8(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_UINT8_RELAXED(value) \
|
||||
_Py_atomic_load_uint8_relaxed(&value)
|
||||
#define FT_ATOMIC_LOAD_UINT16_RELAXED(value) \
|
||||
_Py_atomic_load_uint16_relaxed(&value)
|
||||
#define FT_ATOMIC_LOAD_UINT32_RELAXED(value) \
|
||||
_Py_atomic_load_uint32_relaxed(&value)
|
||||
#define FT_ATOMIC_LOAD_ULONG_RELAXED(value) \
|
||||
_Py_atomic_load_ulong_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_PTR_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_ptr_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_PTR_RELEASE(value, new_value) \
|
||||
_Py_atomic_store_ptr_release(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_UINTPTR_RELEASE(value, new_value) \
|
||||
_Py_atomic_store_uintptr_release(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_SSIZE_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_ssize_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_UINT8_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_uint8_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_UINT16_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_uint16_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_UINT32_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_uint32_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_CHAR_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_char_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_CHAR_RELAXED(value) \
|
||||
_Py_atomic_load_char_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_UCHAR_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_uchar_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_UCHAR_RELAXED(value) \
|
||||
_Py_atomic_load_uchar_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_SHORT_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_short_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_SHORT_RELAXED(value) \
|
||||
_Py_atomic_load_short_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_USHORT_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_ushort_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_USHORT_RELAXED(value) \
|
||||
_Py_atomic_load_ushort_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_INT_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_int_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_INT_RELAXED(value) \
|
||||
_Py_atomic_load_int_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_UINT_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_uint_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_UINT_RELAXED(value) \
|
||||
_Py_atomic_load_uint_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_LONG_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_long_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_LONG_RELAXED(value) \
|
||||
_Py_atomic_load_long_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_ULONG_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_ulong_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_SSIZE_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_ssize_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_STORE_FLOAT_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_float_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_FLOAT_RELAXED(value) \
|
||||
_Py_atomic_load_float_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_DOUBLE_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_double_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_DOUBLE_RELAXED(value) \
|
||||
_Py_atomic_load_double_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_LLONG_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_llong_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_LLONG_RELAXED(value) \
|
||||
_Py_atomic_load_llong_relaxed(&value)
|
||||
#define FT_ATOMIC_STORE_ULLONG_RELAXED(value, new_value) \
|
||||
_Py_atomic_store_ullong_relaxed(&value, new_value)
|
||||
#define FT_ATOMIC_LOAD_ULLONG_RELAXED(value) \
|
||||
_Py_atomic_load_ullong_relaxed(&value)
|
||||
|
||||
#else
|
||||
#define FT_ATOMIC_LOAD_PTR(value) value
|
||||
#define FT_ATOMIC_STORE_PTR(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_SSIZE(value) value
|
||||
#define FT_ATOMIC_LOAD_SSIZE_ACQUIRE(value) value
|
||||
#define FT_ATOMIC_LOAD_SSIZE_RELAXED(value) value
|
||||
#define FT_ATOMIC_LOAD_PTR_ACQUIRE(value) value
|
||||
#define FT_ATOMIC_LOAD_UINTPTR_ACQUIRE(value) value
|
||||
#define FT_ATOMIC_LOAD_PTR_RELAXED(value) value
|
||||
#define FT_ATOMIC_LOAD_UINT8(value) value
|
||||
#define FT_ATOMIC_STORE_UINT8(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_UINT8_RELAXED(value) value
|
||||
#define FT_ATOMIC_LOAD_UINT16_RELAXED(value) value
|
||||
#define FT_ATOMIC_LOAD_UINT32_RELAXED(value) value
|
||||
#define FT_ATOMIC_LOAD_ULONG_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_PTR_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_PTR_RELEASE(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_UINTPTR_RELEASE(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_SSIZE_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_UINT8_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_UINT16_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_UINT32_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_CHAR_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_CHAR_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_UCHAR_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_UCHAR_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_SHORT_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_SHORT_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_USHORT_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_USHORT_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_INT_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_INT_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_UINT_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_UINT_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_LONG_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_LONG_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_ULONG_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_STORE_SSIZE_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_FLOAT_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_FLOAT_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_DOUBLE_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_DOUBLE_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_LLONG_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_LLONG_RELAXED(value, new_value) value = new_value
|
||||
#define FT_ATOMIC_LOAD_ULLONG_RELAXED(value) value
|
||||
#define FT_ATOMIC_STORE_ULLONG_RELAXED(value, new_value) value = new_value
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_ATOMIC_FT_WRAPPERS_H */
|
||||
21
Dependencies/Python/include/internal/pycore_pybuffer.h
vendored
Normal file
21
Dependencies/Python/include/internal/pycore_pybuffer.h
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
#ifndef Py_INTERNAL_PYBUFFER_H
|
||||
#define Py_INTERNAL_PYBUFFER_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
// Exported for the _interpchannels module.
|
||||
PyAPI_FUNC(int) _PyBuffer_ReleaseInInterpreter(
|
||||
PyInterpreterState *interp, Py_buffer *view);
|
||||
PyAPI_FUNC(int) _PyBuffer_ReleaseInInterpreterAndRawFree(
|
||||
PyInterpreterState *interp, Py_buffer *view);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PYBUFFER_H */
|
||||
190
Dependencies/Python/include/internal/pycore_pyerrors.h
vendored
Normal file
190
Dependencies/Python/include/internal/pycore_pyerrors.h
vendored
Normal file
@@ -0,0 +1,190 @@
|
||||
#ifndef Py_INTERNAL_PYERRORS_H
|
||||
#define Py_INTERNAL_PYERRORS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
/* Error handling definitions */
|
||||
|
||||
extern _PyErr_StackItem* _PyErr_GetTopmostException(PyThreadState *tstate);
|
||||
extern PyObject* _PyErr_GetHandledException(PyThreadState *);
|
||||
extern void _PyErr_SetHandledException(PyThreadState *, PyObject *);
|
||||
extern void _PyErr_GetExcInfo(PyThreadState *, PyObject **, PyObject **, PyObject **);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(void) _PyErr_SetKeyError(PyObject *);
|
||||
|
||||
|
||||
// Like PyErr_Format(), but saves current exception as __context__ and
|
||||
// __cause__.
|
||||
// Export for '_sqlite3' shared extension.
|
||||
PyAPI_FUNC(PyObject*) _PyErr_FormatFromCause(
|
||||
PyObject *exception,
|
||||
const char *format, /* ASCII-encoded string */
|
||||
...
|
||||
);
|
||||
|
||||
extern int _PyException_AddNote(
|
||||
PyObject *exc,
|
||||
PyObject *note);
|
||||
|
||||
extern int _PyErr_CheckSignals(void);
|
||||
|
||||
/* Support for adding program text to SyntaxErrors */
|
||||
|
||||
// Export for test_peg_generator
|
||||
PyAPI_FUNC(PyObject*) _PyErr_ProgramDecodedTextObject(
|
||||
PyObject *filename,
|
||||
int lineno,
|
||||
const char* encoding);
|
||||
|
||||
extern PyObject* _PyUnicodeTranslateError_Create(
|
||||
PyObject *object,
|
||||
Py_ssize_t start,
|
||||
Py_ssize_t end,
|
||||
const char *reason /* UTF-8 encoded string */
|
||||
);
|
||||
|
||||
extern void _Py_NO_RETURN _Py_FatalErrorFormat(
|
||||
const char *func,
|
||||
const char *format,
|
||||
...);
|
||||
|
||||
extern PyObject* _PyErr_SetImportErrorWithNameFrom(
|
||||
PyObject *,
|
||||
PyObject *,
|
||||
PyObject *,
|
||||
PyObject *);
|
||||
|
||||
|
||||
/* runtime lifecycle */
|
||||
|
||||
extern PyStatus _PyErr_InitTypes(PyInterpreterState *);
|
||||
extern void _PyErr_FiniTypes(PyInterpreterState *);
|
||||
|
||||
|
||||
/* other API */
|
||||
|
||||
static inline PyObject* _PyErr_Occurred(PyThreadState *tstate)
|
||||
{
|
||||
assert(tstate != NULL);
|
||||
if (tstate->current_exception == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return (PyObject *)Py_TYPE(tstate->current_exception);
|
||||
}
|
||||
|
||||
static inline void _PyErr_ClearExcState(_PyErr_StackItem *exc_state)
|
||||
{
|
||||
Py_CLEAR(exc_state->exc_value);
|
||||
}
|
||||
|
||||
extern PyObject* _PyErr_StackItemToExcInfoTuple(
|
||||
_PyErr_StackItem *err_info);
|
||||
|
||||
extern void _PyErr_Fetch(
|
||||
PyThreadState *tstate,
|
||||
PyObject **type,
|
||||
PyObject **value,
|
||||
PyObject **traceback);
|
||||
|
||||
extern PyObject* _PyErr_GetRaisedException(PyThreadState *tstate);
|
||||
|
||||
PyAPI_FUNC(int) _PyErr_ExceptionMatches(
|
||||
PyThreadState *tstate,
|
||||
PyObject *exc);
|
||||
|
||||
extern void _PyErr_SetRaisedException(PyThreadState *tstate, PyObject *exc);
|
||||
|
||||
extern void _PyErr_Restore(
|
||||
PyThreadState *tstate,
|
||||
PyObject *type,
|
||||
PyObject *value,
|
||||
PyObject *traceback);
|
||||
|
||||
extern void _PyErr_SetObject(
|
||||
PyThreadState *tstate,
|
||||
PyObject *type,
|
||||
PyObject *value);
|
||||
|
||||
extern void _PyErr_ChainStackItem(void);
|
||||
|
||||
PyAPI_FUNC(void) _PyErr_Clear(PyThreadState *tstate);
|
||||
|
||||
extern void _PyErr_SetNone(PyThreadState *tstate, PyObject *exception);
|
||||
|
||||
extern PyObject* _PyErr_NoMemory(PyThreadState *tstate);
|
||||
|
||||
PyAPI_FUNC(void) _PyErr_SetString(
|
||||
PyThreadState *tstate,
|
||||
PyObject *exception,
|
||||
const char *string);
|
||||
|
||||
/*
|
||||
* Set an exception with the error message decoded from the current locale
|
||||
* encoding (LC_CTYPE).
|
||||
*
|
||||
* Exceptions occurring in decoding take priority over the desired exception.
|
||||
*
|
||||
* Exported for '_ctypes' shared extensions.
|
||||
*/
|
||||
PyAPI_FUNC(void) _PyErr_SetLocaleString(
|
||||
PyObject *exception,
|
||||
const char *string);
|
||||
|
||||
PyAPI_FUNC(PyObject*) _PyErr_Format(
|
||||
PyThreadState *tstate,
|
||||
PyObject *exception,
|
||||
const char *format,
|
||||
...);
|
||||
|
||||
extern void _PyErr_NormalizeException(
|
||||
PyThreadState *tstate,
|
||||
PyObject **exc,
|
||||
PyObject **val,
|
||||
PyObject **tb);
|
||||
|
||||
extern PyObject* _PyErr_FormatFromCauseTstate(
|
||||
PyThreadState *tstate,
|
||||
PyObject *exception,
|
||||
const char *format,
|
||||
...);
|
||||
|
||||
extern PyObject* _PyExc_CreateExceptionGroup(
|
||||
const char *msg,
|
||||
PyObject *excs);
|
||||
|
||||
extern PyObject* _PyExc_PrepReraiseStar(
|
||||
PyObject *orig,
|
||||
PyObject *excs);
|
||||
|
||||
extern int _PyErr_CheckSignalsTstate(PyThreadState *tstate);
|
||||
|
||||
extern void _Py_DumpExtensionModules(int fd, PyInterpreterState *interp);
|
||||
extern PyObject* _Py_CalculateSuggestions(PyObject *dir, PyObject *name);
|
||||
extern PyObject* _Py_Offer_Suggestions(PyObject* exception);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(Py_ssize_t) _Py_UTF8_Edit_Cost(PyObject *str_a, PyObject *str_b,
|
||||
Py_ssize_t max_cost);
|
||||
|
||||
void _PyErr_FormatNote(const char *format, ...);
|
||||
|
||||
/* Context manipulation (PEP 3134) */
|
||||
|
||||
Py_DEPRECATED(3.12) extern void _PyErr_ChainExceptions(PyObject *, PyObject *, PyObject *);
|
||||
|
||||
// implementation detail for the codeop module.
|
||||
// Exported for test.test_peg_generator.test_c_parser
|
||||
PyAPI_DATA(PyTypeObject) _PyExc_IncompleteInputError;
|
||||
#define PyExc_IncompleteInputError ((PyObject *)(&_PyExc_IncompleteInputError))
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PYERRORS_H */
|
||||
107
Dependencies/Python/include/internal/pycore_pyhash.h
vendored
Normal file
107
Dependencies/Python/include/internal/pycore_pyhash.h
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
#ifndef Py_INTERNAL_PYHASH_H
|
||||
#define Py_INTERNAL_PYHASH_H
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// Similar to Py_HashPointer(), but don't replace -1 with -2.
|
||||
static inline Py_hash_t
|
||||
_Py_HashPointerRaw(const void *ptr)
|
||||
{
|
||||
uintptr_t x = (uintptr_t)ptr;
|
||||
Py_BUILD_ASSERT(sizeof(x) == sizeof(ptr));
|
||||
|
||||
// Bottom 3 or 4 bits are likely to be 0; rotate x by 4 to the right
|
||||
// to avoid excessive hash collisions for dicts and sets.
|
||||
x = (x >> 4) | (x << (8 * sizeof(uintptr_t) - 4));
|
||||
|
||||
Py_BUILD_ASSERT(sizeof(x) == sizeof(Py_hash_t));
|
||||
return (Py_hash_t)x;
|
||||
}
|
||||
|
||||
// Export for '_datetime' shared extension
|
||||
PyAPI_FUNC(Py_hash_t) _Py_HashBytes(const void*, Py_ssize_t);
|
||||
|
||||
/* Hash secret
|
||||
*
|
||||
* memory layout on 64 bit systems
|
||||
* cccccccc cccccccc cccccccc uc -- unsigned char[24]
|
||||
* pppppppp ssssssss ........ fnv -- two Py_hash_t
|
||||
* k0k0k0k0 k1k1k1k1 ........ siphash -- two uint64_t
|
||||
* ........ ........ ssssssss djbx33a -- 16 bytes padding + one Py_hash_t
|
||||
* ........ ........ eeeeeeee pyexpat XML hash salt
|
||||
*
|
||||
* memory layout on 32 bit systems
|
||||
* cccccccc cccccccc cccccccc uc
|
||||
* ppppssss ........ ........ fnv -- two Py_hash_t
|
||||
* k0k0k0k0 k1k1k1k1 ........ siphash -- two uint64_t (*)
|
||||
* ........ ........ ssss.... djbx33a -- 16 bytes padding + one Py_hash_t
|
||||
* ........ ........ eeee.... pyexpat XML hash salt
|
||||
*
|
||||
* (*) The siphash member may not be available on 32 bit platforms without
|
||||
* an unsigned int64 data type.
|
||||
*/
|
||||
typedef union {
|
||||
/* ensure 24 bytes */
|
||||
unsigned char uc[24];
|
||||
/* two Py_hash_t for FNV */
|
||||
struct {
|
||||
Py_hash_t prefix;
|
||||
Py_hash_t suffix;
|
||||
} fnv;
|
||||
/* two uint64 for SipHash24 */
|
||||
struct {
|
||||
uint64_t k0;
|
||||
uint64_t k1;
|
||||
} siphash;
|
||||
/* a different (!) Py_hash_t for small string optimization */
|
||||
struct {
|
||||
unsigned char padding[16];
|
||||
Py_hash_t suffix;
|
||||
} djbx33a;
|
||||
struct {
|
||||
unsigned char padding[16];
|
||||
Py_hash_t hashsalt;
|
||||
} expat;
|
||||
} _Py_HashSecret_t;
|
||||
|
||||
// Export for '_elementtree' shared extension
|
||||
PyAPI_DATA(_Py_HashSecret_t) _Py_HashSecret;
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
extern int _Py_HashSecret_Initialized;
|
||||
#endif
|
||||
|
||||
|
||||
struct pyhash_runtime_state {
|
||||
struct {
|
||||
#ifndef MS_WINDOWS
|
||||
int fd;
|
||||
dev_t st_dev;
|
||||
ino_t st_ino;
|
||||
#else
|
||||
// This is a placeholder so the struct isn't empty on Windows.
|
||||
int _not_used;
|
||||
#endif
|
||||
} urandom_cache;
|
||||
};
|
||||
|
||||
#ifndef MS_WINDOWS
|
||||
# define _py_urandom_cache_INIT \
|
||||
{ \
|
||||
.fd = -1, \
|
||||
}
|
||||
#else
|
||||
# define _py_urandom_cache_INIT {0}
|
||||
#endif
|
||||
|
||||
#define pyhash_state_INIT \
|
||||
{ \
|
||||
.urandom_cache = _py_urandom_cache_INIT, \
|
||||
}
|
||||
|
||||
|
||||
extern uint64_t _Py_KeyedHash(uint64_t key, const void *src, Py_ssize_t src_sz);
|
||||
|
||||
#endif // !Py_INTERNAL_PYHASH_H
|
||||
136
Dependencies/Python/include/internal/pycore_pylifecycle.h
vendored
Normal file
136
Dependencies/Python/include/internal/pycore_pylifecycle.h
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
#ifndef Py_INTERNAL_LIFECYCLE_H
|
||||
#define Py_INTERNAL_LIFECYCLE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_runtime.h" // _PyRuntimeState
|
||||
|
||||
/* Forward declarations */
|
||||
struct _PyArgv;
|
||||
struct pyruntimestate;
|
||||
|
||||
extern int _Py_SetFileSystemEncoding(
|
||||
const char *encoding,
|
||||
const char *errors);
|
||||
extern void _Py_ClearFileSystemEncoding(void);
|
||||
extern PyStatus _PyUnicode_InitEncodings(PyThreadState *tstate);
|
||||
#ifdef MS_WINDOWS
|
||||
extern int _PyUnicode_EnableLegacyWindowsFSEncoding(void);
|
||||
#endif
|
||||
|
||||
extern int _Py_IsLocaleCoercionTarget(const char *ctype_loc);
|
||||
|
||||
/* Various one-time initializers */
|
||||
|
||||
extern void _Py_InitVersion(void);
|
||||
extern PyStatus _PyFaulthandler_Init(int enable);
|
||||
extern PyObject * _PyBuiltin_Init(PyInterpreterState *interp);
|
||||
extern PyStatus _PySys_Create(
|
||||
PyThreadState *tstate,
|
||||
PyObject **sysmod_p);
|
||||
extern PyStatus _PySys_ReadPreinitWarnOptions(PyWideStringList *options);
|
||||
extern PyStatus _PySys_ReadPreinitXOptions(PyConfig *config);
|
||||
extern int _PySys_UpdateConfig(PyThreadState *tstate);
|
||||
extern void _PySys_FiniTypes(PyInterpreterState *interp);
|
||||
extern int _PyBuiltins_AddExceptions(PyObject * bltinmod);
|
||||
extern PyStatus _Py_HashRandomization_Init(const PyConfig *);
|
||||
|
||||
extern PyStatus _PyGC_Init(PyInterpreterState *interp);
|
||||
extern PyStatus _PyAtExit_Init(PyInterpreterState *interp);
|
||||
|
||||
/* Various internal finalizers */
|
||||
|
||||
extern int _PySignal_Init(int install_signal_handlers);
|
||||
extern void _PySignal_Fini(void);
|
||||
|
||||
extern void _PyGC_Fini(PyInterpreterState *interp);
|
||||
extern void _Py_HashRandomization_Fini(void);
|
||||
extern void _PyFaulthandler_Fini(void);
|
||||
extern void _PyHash_Fini(void);
|
||||
extern void _PyTraceMalloc_Fini(void);
|
||||
extern void _PyWarnings_Fini(PyInterpreterState *interp);
|
||||
extern void _PyAST_Fini(PyInterpreterState *interp);
|
||||
extern void _PyAtExit_Fini(PyInterpreterState *interp);
|
||||
extern void _PyThread_FiniType(PyInterpreterState *interp);
|
||||
extern void _PyArg_Fini(void);
|
||||
extern void _Py_FinalizeAllocatedBlocks(_PyRuntimeState *);
|
||||
|
||||
extern PyStatus _PyGILState_Init(PyInterpreterState *interp);
|
||||
extern void _PyGILState_SetTstate(PyThreadState *tstate);
|
||||
extern void _PyGILState_Fini(PyInterpreterState *interp);
|
||||
|
||||
extern void _PyGC_DumpShutdownStats(PyInterpreterState *interp);
|
||||
|
||||
extern PyStatus _Py_PreInitializeFromPyArgv(
|
||||
const PyPreConfig *src_config,
|
||||
const struct _PyArgv *args);
|
||||
extern PyStatus _Py_PreInitializeFromConfig(
|
||||
const PyConfig *config,
|
||||
const struct _PyArgv *args);
|
||||
|
||||
extern wchar_t * _Py_GetStdlibDir(void);
|
||||
|
||||
extern int _Py_HandleSystemExit(int *exitcode_p);
|
||||
|
||||
extern PyObject* _PyErr_WriteUnraisableDefaultHook(PyObject *unraisable);
|
||||
|
||||
extern void _PyErr_Print(PyThreadState *tstate);
|
||||
extern void _PyErr_Display(PyObject *file, PyObject *exception,
|
||||
PyObject *value, PyObject *tb);
|
||||
extern void _PyErr_DisplayException(PyObject *file, PyObject *exc);
|
||||
|
||||
extern void _PyThreadState_DeleteCurrent(PyThreadState *tstate);
|
||||
|
||||
extern void _PyAtExit_Call(PyInterpreterState *interp);
|
||||
|
||||
extern int _Py_IsCoreInitialized(void);
|
||||
|
||||
extern int _Py_FdIsInteractive(FILE *fp, PyObject *filename);
|
||||
|
||||
extern const char* _Py_gitidentifier(void);
|
||||
extern const char* _Py_gitversion(void);
|
||||
|
||||
// Export for '_asyncio' shared extension
|
||||
PyAPI_FUNC(int) _Py_IsInterpreterFinalizing(PyInterpreterState *interp);
|
||||
|
||||
/* Random */
|
||||
extern int _PyOS_URandom(void *buffer, Py_ssize_t size);
|
||||
|
||||
// Export for '_random' shared extension
|
||||
PyAPI_FUNC(int) _PyOS_URandomNonblock(void *buffer, Py_ssize_t size);
|
||||
|
||||
/* Legacy locale support */
|
||||
extern int _Py_CoerceLegacyLocale(int warn);
|
||||
extern int _Py_LegacyLocaleDetected(int warn);
|
||||
|
||||
// Export for 'readline' shared extension
|
||||
PyAPI_FUNC(char*) _Py_SetLocaleFromEnv(int category);
|
||||
|
||||
// Export for special main.c string compiling with source tracebacks
|
||||
int _PyRun_SimpleStringFlagsWithName(const char *command, const char* name, PyCompilerFlags *flags);
|
||||
|
||||
|
||||
/* interpreter config */
|
||||
|
||||
// Export for _testinternalcapi shared extension
|
||||
PyAPI_FUNC(int) _PyInterpreterConfig_InitFromState(
|
||||
PyInterpreterConfig *,
|
||||
PyInterpreterState *);
|
||||
PyAPI_FUNC(PyObject *) _PyInterpreterConfig_AsDict(PyInterpreterConfig *);
|
||||
PyAPI_FUNC(int) _PyInterpreterConfig_InitFromDict(
|
||||
PyInterpreterConfig *,
|
||||
PyObject *);
|
||||
PyAPI_FUNC(int) _PyInterpreterConfig_UpdateFromDict(
|
||||
PyInterpreterConfig *,
|
||||
PyObject *);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_LIFECYCLE_H */
|
||||
205
Dependencies/Python/include/internal/pycore_pymath.h
vendored
Normal file
205
Dependencies/Python/include/internal/pycore_pymath.h
vendored
Normal file
@@ -0,0 +1,205 @@
|
||||
#ifndef Py_INTERNAL_PYMATH_H
|
||||
#define Py_INTERNAL_PYMATH_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
/* _Py_ADJUST_ERANGE1(x)
|
||||
* _Py_ADJUST_ERANGE2(x, y)
|
||||
* Set errno to 0 before calling a libm function, and invoke one of these
|
||||
* macros after, passing the function result(s) (_Py_ADJUST_ERANGE2 is useful
|
||||
* for functions returning complex results). This makes two kinds of
|
||||
* adjustments to errno: (A) If it looks like the platform libm set
|
||||
* errno=ERANGE due to underflow, clear errno. (B) If it looks like the
|
||||
* platform libm overflowed but didn't set errno, force errno to ERANGE. In
|
||||
* effect, we're trying to force a useful implementation of C89 errno
|
||||
* behavior.
|
||||
* Caution:
|
||||
* This isn't reliable. C99 no longer requires libm to set errno under
|
||||
* any exceptional condition, but does require +- HUGE_VAL return
|
||||
* values on overflow. A 754 box *probably* maps HUGE_VAL to a
|
||||
* double infinity, and we're cool if that's so, unless the input
|
||||
* was an infinity and an infinity is the expected result. A C89
|
||||
* system sets errno to ERANGE, so we check for that too. We're
|
||||
* out of luck if a C99 754 box doesn't map HUGE_VAL to +Inf, or
|
||||
* if the returned result is a NaN, or if a C89 box returns HUGE_VAL
|
||||
* in non-overflow cases.
|
||||
*/
|
||||
static inline void _Py_ADJUST_ERANGE1(double x)
|
||||
{
|
||||
if (errno == 0) {
|
||||
if (x == Py_HUGE_VAL || x == -Py_HUGE_VAL) {
|
||||
errno = ERANGE;
|
||||
}
|
||||
}
|
||||
else if (errno == ERANGE && x == 0.0) {
|
||||
errno = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void _Py_ADJUST_ERANGE2(double x, double y)
|
||||
{
|
||||
if (x == Py_HUGE_VAL || x == -Py_HUGE_VAL ||
|
||||
y == Py_HUGE_VAL || y == -Py_HUGE_VAL)
|
||||
{
|
||||
if (errno == 0) {
|
||||
errno = ERANGE;
|
||||
}
|
||||
}
|
||||
else if (errno == ERANGE) {
|
||||
errno = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//--- HAVE_PY_SET_53BIT_PRECISION macro ------------------------------------
|
||||
//
|
||||
// The functions _Py_dg_strtod() and _Py_dg_dtoa() in Python/dtoa.c (which are
|
||||
// required to support the short float repr introduced in Python 3.1) require
|
||||
// that the floating-point unit that's being used for arithmetic operations on
|
||||
// C doubles is set to use 53-bit precision. It also requires that the FPU
|
||||
// rounding mode is round-half-to-even, but that's less often an issue.
|
||||
//
|
||||
// If your FPU isn't already set to 53-bit precision/round-half-to-even, and
|
||||
// you want to make use of _Py_dg_strtod() and _Py_dg_dtoa(), then you should:
|
||||
//
|
||||
// #define HAVE_PY_SET_53BIT_PRECISION 1
|
||||
//
|
||||
// and also give appropriate definitions for the following three macros:
|
||||
//
|
||||
// * _Py_SET_53BIT_PRECISION_HEADER: any variable declarations needed to
|
||||
// use the two macros below.
|
||||
// * _Py_SET_53BIT_PRECISION_START: store original FPU settings, and
|
||||
// set FPU to 53-bit precision/round-half-to-even
|
||||
// * _Py_SET_53BIT_PRECISION_END: restore original FPU settings
|
||||
//
|
||||
// The macros are designed to be used within a single C function: see
|
||||
// Python/pystrtod.c for an example of their use.
|
||||
|
||||
|
||||
// Get and set x87 control word for gcc/x86
|
||||
#ifdef HAVE_GCC_ASM_FOR_X87
|
||||
#define HAVE_PY_SET_53BIT_PRECISION 1
|
||||
|
||||
// Functions defined in Python/pymath.c
|
||||
extern unsigned short _Py_get_387controlword(void);
|
||||
extern void _Py_set_387controlword(unsigned short);
|
||||
|
||||
#define _Py_SET_53BIT_PRECISION_HEADER \
|
||||
unsigned short old_387controlword, new_387controlword
|
||||
#define _Py_SET_53BIT_PRECISION_START \
|
||||
do { \
|
||||
old_387controlword = _Py_get_387controlword(); \
|
||||
new_387controlword = (old_387controlword & ~0x0f00) | 0x0200; \
|
||||
if (new_387controlword != old_387controlword) { \
|
||||
_Py_set_387controlword(new_387controlword); \
|
||||
} \
|
||||
} while (0)
|
||||
#define _Py_SET_53BIT_PRECISION_END \
|
||||
do { \
|
||||
if (new_387controlword != old_387controlword) { \
|
||||
_Py_set_387controlword(old_387controlword); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
// Get and set x87 control word for VisualStudio/x86.
|
||||
// x87 is not supported in 64-bit or ARM.
|
||||
#if defined(_MSC_VER) && !defined(_WIN64) && !defined(_M_ARM)
|
||||
#define HAVE_PY_SET_53BIT_PRECISION 1
|
||||
|
||||
#include <float.h> // __control87_2()
|
||||
|
||||
#define _Py_SET_53BIT_PRECISION_HEADER \
|
||||
unsigned int old_387controlword, new_387controlword, out_387controlword
|
||||
// We use the __control87_2 function to set only the x87 control word.
|
||||
// The SSE control word is unaffected.
|
||||
#define _Py_SET_53BIT_PRECISION_START \
|
||||
do { \
|
||||
__control87_2(0, 0, &old_387controlword, NULL); \
|
||||
new_387controlword = \
|
||||
(old_387controlword & ~(_MCW_PC | _MCW_RC)) | (_PC_53 | _RC_NEAR); \
|
||||
if (new_387controlword != old_387controlword) { \
|
||||
__control87_2(new_387controlword, _MCW_PC | _MCW_RC, \
|
||||
&out_387controlword, NULL); \
|
||||
} \
|
||||
} while (0)
|
||||
#define _Py_SET_53BIT_PRECISION_END \
|
||||
do { \
|
||||
if (new_387controlword != old_387controlword) { \
|
||||
__control87_2(old_387controlword, _MCW_PC | _MCW_RC, \
|
||||
&out_387controlword, NULL); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
|
||||
// MC68881
|
||||
#ifdef HAVE_GCC_ASM_FOR_MC68881
|
||||
#define HAVE_PY_SET_53BIT_PRECISION 1
|
||||
#define _Py_SET_53BIT_PRECISION_HEADER \
|
||||
unsigned int old_fpcr, new_fpcr
|
||||
#define _Py_SET_53BIT_PRECISION_START \
|
||||
do { \
|
||||
__asm__ ("fmove.l %%fpcr,%0" : "=g" (old_fpcr)); \
|
||||
/* Set double precision / round to nearest. */ \
|
||||
new_fpcr = (old_fpcr & ~0xf0) | 0x80; \
|
||||
if (new_fpcr != old_fpcr) { \
|
||||
__asm__ volatile ("fmove.l %0,%%fpcr" : : "g" (new_fpcr));\
|
||||
} \
|
||||
} while (0)
|
||||
#define _Py_SET_53BIT_PRECISION_END \
|
||||
do { \
|
||||
if (new_fpcr != old_fpcr) { \
|
||||
__asm__ volatile ("fmove.l %0,%%fpcr" : : "g" (old_fpcr)); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
// Default definitions are empty
|
||||
#ifndef _Py_SET_53BIT_PRECISION_HEADER
|
||||
# define _Py_SET_53BIT_PRECISION_HEADER
|
||||
# define _Py_SET_53BIT_PRECISION_START
|
||||
# define _Py_SET_53BIT_PRECISION_END
|
||||
#endif
|
||||
|
||||
|
||||
//--- _PY_SHORT_FLOAT_REPR macro -------------------------------------------
|
||||
|
||||
// If we can't guarantee 53-bit precision, don't use the code
|
||||
// in Python/dtoa.c, but fall back to standard code. This
|
||||
// means that repr of a float will be long (17 significant digits).
|
||||
//
|
||||
// Realistically, there are two things that could go wrong:
|
||||
//
|
||||
// (1) doubles aren't IEEE 754 doubles, or
|
||||
// (2) we're on x86 with the rounding precision set to 64-bits
|
||||
// (extended precision), and we don't know how to change
|
||||
// the rounding precision.
|
||||
#if !defined(DOUBLE_IS_LITTLE_ENDIAN_IEEE754) && \
|
||||
!defined(DOUBLE_IS_BIG_ENDIAN_IEEE754) && \
|
||||
!defined(DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754)
|
||||
# define _PY_SHORT_FLOAT_REPR 0
|
||||
#endif
|
||||
|
||||
// Double rounding is symptomatic of use of extended precision on x86.
|
||||
// If we're seeing double rounding, and we don't have any mechanism available
|
||||
// for changing the FPU rounding precision, then don't use Python/dtoa.c.
|
||||
#if defined(X87_DOUBLE_ROUNDING) && !defined(HAVE_PY_SET_53BIT_PRECISION)
|
||||
# define _PY_SHORT_FLOAT_REPR 0
|
||||
#endif
|
||||
|
||||
#ifndef _PY_SHORT_FLOAT_REPR
|
||||
# define _PY_SHORT_FLOAT_REPR 1
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PYMATH_H */
|
||||
138
Dependencies/Python/include/internal/pycore_pymem.h
vendored
Normal file
138
Dependencies/Python/include/internal/pycore_pymem.h
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
#ifndef Py_INTERNAL_PYMEM_H
|
||||
#define Py_INTERNAL_PYMEM_H
|
||||
|
||||
#include "pycore_llist.h" // struct llist_node
|
||||
#include "pycore_lock.h" // PyMutex
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// Try to get the allocators name set by _PyMem_SetupAllocators().
|
||||
// Return NULL if unknown.
|
||||
// Export for '_testinternalcapi' shared extension.
|
||||
PyAPI_FUNC(const char*) _PyMem_GetCurrentAllocatorName(void);
|
||||
|
||||
// strdup() using PyMem_RawMalloc()
|
||||
extern char* _PyMem_RawStrdup(const char *str);
|
||||
|
||||
// strdup() using PyMem_Malloc().
|
||||
// Export for '_pickle ' shared extension.
|
||||
PyAPI_FUNC(char*) _PyMem_Strdup(const char *str);
|
||||
|
||||
// wcsdup() using PyMem_RawMalloc()
|
||||
extern wchar_t* _PyMem_RawWcsdup(const wchar_t *str);
|
||||
|
||||
typedef struct {
|
||||
/* We tag each block with an API ID in order to tag API violations */
|
||||
char api_id;
|
||||
PyMemAllocatorEx alloc;
|
||||
} debug_alloc_api_t;
|
||||
|
||||
struct _pymem_allocators {
|
||||
PyMutex mutex;
|
||||
struct {
|
||||
PyMemAllocatorEx raw;
|
||||
PyMemAllocatorEx mem;
|
||||
PyMemAllocatorEx obj;
|
||||
} standard;
|
||||
struct {
|
||||
debug_alloc_api_t raw;
|
||||
debug_alloc_api_t mem;
|
||||
debug_alloc_api_t obj;
|
||||
} debug;
|
||||
int is_debug_enabled;
|
||||
PyObjectArenaAllocator obj_arena;
|
||||
};
|
||||
|
||||
struct _Py_mem_interp_free_queue {
|
||||
int has_work; // true if the queue is not empty
|
||||
PyMutex mutex; // protects the queue
|
||||
struct llist_node head; // queue of _mem_work_chunk items
|
||||
};
|
||||
|
||||
/* Set the memory allocator of the specified domain to the default.
|
||||
Save the old allocator into *old_alloc if it's non-NULL.
|
||||
Return on success, or return -1 if the domain is unknown. */
|
||||
extern int _PyMem_SetDefaultAllocator(
|
||||
PyMemAllocatorDomain domain,
|
||||
PyMemAllocatorEx *old_alloc);
|
||||
|
||||
/* Special bytes broadcast into debug memory blocks at appropriate times.
|
||||
Strings of these are unlikely to be valid addresses, floats, ints or
|
||||
7-bit ASCII.
|
||||
|
||||
- PYMEM_CLEANBYTE: clean (newly allocated) memory
|
||||
- PYMEM_DEADBYTE dead (newly freed) memory
|
||||
- PYMEM_FORBIDDENBYTE: untouchable bytes at each end of a block
|
||||
|
||||
Byte patterns 0xCB, 0xDB and 0xFB have been replaced with 0xCD, 0xDD and
|
||||
0xFD to use the same values as Windows CRT debug malloc() and free().
|
||||
If modified, _PyMem_IsPtrFreed() should be updated as well. */
|
||||
#define PYMEM_CLEANBYTE 0xCD
|
||||
#define PYMEM_DEADBYTE 0xDD
|
||||
#define PYMEM_FORBIDDENBYTE 0xFD
|
||||
|
||||
/* Heuristic checking if a pointer value is newly allocated
|
||||
(uninitialized), newly freed or NULL (is equal to zero).
|
||||
|
||||
The pointer is not dereferenced, only the pointer value is checked.
|
||||
|
||||
The heuristic relies on the debug hooks on Python memory allocators which
|
||||
fills newly allocated memory with CLEANBYTE (0xCD) and newly freed memory
|
||||
with DEADBYTE (0xDD). Detect also "untouchable bytes" marked
|
||||
with FORBIDDENBYTE (0xFD). */
|
||||
static inline int _PyMem_IsPtrFreed(const void *ptr)
|
||||
{
|
||||
uintptr_t value = (uintptr_t)ptr;
|
||||
#if SIZEOF_VOID_P == 8
|
||||
return (value == 0
|
||||
|| value == (uintptr_t)0xCDCDCDCDCDCDCDCD
|
||||
|| value == (uintptr_t)0xDDDDDDDDDDDDDDDD
|
||||
|| value == (uintptr_t)0xFDFDFDFDFDFDFDFD);
|
||||
#elif SIZEOF_VOID_P == 4
|
||||
return (value == 0
|
||||
|| value == (uintptr_t)0xCDCDCDCD
|
||||
|| value == (uintptr_t)0xDDDDDDDD
|
||||
|| value == (uintptr_t)0xFDFDFDFD);
|
||||
#else
|
||||
# error "unknown pointer size"
|
||||
#endif
|
||||
}
|
||||
|
||||
extern int _PyMem_GetAllocatorName(
|
||||
const char *name,
|
||||
PyMemAllocatorName *allocator);
|
||||
|
||||
/* Configure the Python memory allocators.
|
||||
Pass PYMEM_ALLOCATOR_DEFAULT to use default allocators.
|
||||
PYMEM_ALLOCATOR_NOT_SET does nothing. */
|
||||
extern int _PyMem_SetupAllocators(PyMemAllocatorName allocator);
|
||||
|
||||
/* Is the debug allocator enabled? */
|
||||
extern int _PyMem_DebugEnabled(void);
|
||||
|
||||
// Enqueue a pointer to be freed possibly after some delay.
|
||||
extern void _PyMem_FreeDelayed(void *ptr);
|
||||
|
||||
// Enqueue an object to be freed possibly after some delay
|
||||
extern void _PyObject_FreeDelayed(void *ptr);
|
||||
|
||||
// Periodically process delayed free requests.
|
||||
extern void _PyMem_ProcessDelayed(PyThreadState *tstate);
|
||||
|
||||
// Abandon all thread-local delayed free requests and push them to the
|
||||
// interpreter's queue.
|
||||
extern void _PyMem_AbandonDelayed(PyThreadState *tstate);
|
||||
|
||||
// On interpreter shutdown, frees all delayed free requests.
|
||||
extern void _PyMem_FiniDelayed(PyInterpreterState *interp);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_PYMEM_H
|
||||
103
Dependencies/Python/include/internal/pycore_pymem_init.h
vendored
Normal file
103
Dependencies/Python/include/internal/pycore_pymem_init.h
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
#ifndef Py_INTERNAL_PYMEM_INIT_H
|
||||
#define Py_INTERNAL_PYMEM_INIT_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
|
||||
/********************************/
|
||||
/* the allocators' initializers */
|
||||
|
||||
extern void * _PyMem_RawMalloc(void *, size_t);
|
||||
extern void * _PyMem_RawCalloc(void *, size_t, size_t);
|
||||
extern void * _PyMem_RawRealloc(void *, void *, size_t);
|
||||
extern void _PyMem_RawFree(void *, void *);
|
||||
#define PYRAW_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree}
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
// Py_GIL_DISABLED requires mimalloc
|
||||
extern void* _PyObject_MiMalloc(void *, size_t);
|
||||
extern void* _PyObject_MiCalloc(void *, size_t, size_t);
|
||||
extern void _PyObject_MiFree(void *, void *);
|
||||
extern void* _PyObject_MiRealloc(void *, void *, size_t);
|
||||
# define PYOBJ_ALLOC {NULL, _PyObject_MiMalloc, _PyObject_MiCalloc, _PyObject_MiRealloc, _PyObject_MiFree}
|
||||
extern void* _PyMem_MiMalloc(void *, size_t);
|
||||
extern void* _PyMem_MiCalloc(void *, size_t, size_t);
|
||||
extern void _PyMem_MiFree(void *, void *);
|
||||
extern void* _PyMem_MiRealloc(void *, void *, size_t);
|
||||
# define PYMEM_ALLOC {NULL, _PyMem_MiMalloc, _PyMem_MiCalloc, _PyMem_MiRealloc, _PyMem_MiFree}
|
||||
#elif defined(WITH_PYMALLOC)
|
||||
extern void* _PyObject_Malloc(void *, size_t);
|
||||
extern void* _PyObject_Calloc(void *, size_t, size_t);
|
||||
extern void _PyObject_Free(void *, void *);
|
||||
extern void* _PyObject_Realloc(void *, void *, size_t);
|
||||
# define PYOBJ_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free}
|
||||
# define PYMEM_ALLOC PYOBJ_ALLOC
|
||||
#else
|
||||
# define PYOBJ_ALLOC PYRAW_ALLOC
|
||||
# define PYMEM_ALLOC PYOBJ_ALLOC
|
||||
#endif // WITH_PYMALLOC
|
||||
|
||||
|
||||
extern void* _PyMem_DebugRawMalloc(void *, size_t);
|
||||
extern void* _PyMem_DebugRawCalloc(void *, size_t, size_t);
|
||||
extern void* _PyMem_DebugRawRealloc(void *, void *, size_t);
|
||||
extern void _PyMem_DebugRawFree(void *, void *);
|
||||
|
||||
extern void* _PyMem_DebugMalloc(void *, size_t);
|
||||
extern void* _PyMem_DebugCalloc(void *, size_t, size_t);
|
||||
extern void* _PyMem_DebugRealloc(void *, void *, size_t);
|
||||
extern void _PyMem_DebugFree(void *, void *);
|
||||
|
||||
#define PYDBGRAW_ALLOC(runtime) \
|
||||
{&(runtime).allocators.debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree}
|
||||
#define PYDBGMEM_ALLOC(runtime) \
|
||||
{&(runtime).allocators.debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree}
|
||||
#define PYDBGOBJ_ALLOC(runtime) \
|
||||
{&(runtime).allocators.debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree}
|
||||
|
||||
extern void * _PyMem_ArenaAlloc(void *, size_t);
|
||||
extern void _PyMem_ArenaFree(void *, void *, size_t);
|
||||
|
||||
#ifdef Py_DEBUG
|
||||
# define _pymem_allocators_standard_INIT(runtime) \
|
||||
{ \
|
||||
PYDBGRAW_ALLOC(runtime), \
|
||||
PYDBGMEM_ALLOC(runtime), \
|
||||
PYDBGOBJ_ALLOC(runtime), \
|
||||
}
|
||||
# define _pymem_is_debug_enabled_INIT 1
|
||||
#else
|
||||
# define _pymem_allocators_standard_INIT(runtime) \
|
||||
{ \
|
||||
PYRAW_ALLOC, \
|
||||
PYMEM_ALLOC, \
|
||||
PYOBJ_ALLOC, \
|
||||
}
|
||||
# define _pymem_is_debug_enabled_INIT 0
|
||||
#endif
|
||||
|
||||
#define _pymem_allocators_debug_INIT \
|
||||
{ \
|
||||
{'r', PYRAW_ALLOC}, \
|
||||
{'m', PYMEM_ALLOC}, \
|
||||
{'o', PYOBJ_ALLOC}, \
|
||||
}
|
||||
|
||||
# define _pymem_allocators_obj_arena_INIT \
|
||||
{ NULL, _PyMem_ArenaAlloc, _PyMem_ArenaFree }
|
||||
|
||||
|
||||
#define _Py_mem_free_queue_INIT(queue) \
|
||||
{ \
|
||||
.head = LLIST_INIT(queue.head), \
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_PYMEM_INIT_H
|
||||
299
Dependencies/Python/include/internal/pycore_pystate.h
vendored
Normal file
299
Dependencies/Python/include/internal/pycore_pystate.h
vendored
Normal file
@@ -0,0 +1,299 @@
|
||||
#ifndef Py_INTERNAL_PYSTATE_H
|
||||
#define Py_INTERNAL_PYSTATE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "pycore_freelist.h" // _PyFreeListState
|
||||
#include "pycore_runtime.h" // _PyRuntime
|
||||
#include "pycore_tstate.h" // _PyThreadStateImpl
|
||||
|
||||
|
||||
// Values for PyThreadState.state. A thread must be in the "attached" state
|
||||
// before calling most Python APIs. If the GIL is enabled, then "attached"
|
||||
// implies that the thread holds the GIL and "detached" implies that the
|
||||
// thread does not hold the GIL (or is in the process of releasing it). In
|
||||
// `--disable-gil` builds, multiple threads may be "attached" to the same
|
||||
// interpreter at the same time. Only the "bound" thread may perform the
|
||||
// transitions between "attached" and "detached" on its own PyThreadState.
|
||||
//
|
||||
// The "suspended" state is used to implement stop-the-world pauses, such as
|
||||
// for cyclic garbage collection. It is only used in `--disable-gil` builds.
|
||||
// The "suspended" state is similar to the "detached" state in that in both
|
||||
// states the thread is not allowed to call most Python APIs. However, unlike
|
||||
// the "detached" state, a thread may not transition itself out from the
|
||||
// "suspended" state. Only the thread performing a stop-the-world pause may
|
||||
// transition a thread from the "suspended" state back to the "detached" state.
|
||||
//
|
||||
// State transition diagram:
|
||||
//
|
||||
// (bound thread) (stop-the-world thread)
|
||||
// [attached] <-> [detached] <-> [suspended]
|
||||
// | ^
|
||||
// +---------------------------->---------------------------+
|
||||
// (bound thread)
|
||||
//
|
||||
// The (bound thread) and (stop-the-world thread) labels indicate which thread
|
||||
// is allowed to perform the transition.
|
||||
#define _Py_THREAD_DETACHED 0
|
||||
#define _Py_THREAD_ATTACHED 1
|
||||
#define _Py_THREAD_SUSPENDED 2
|
||||
|
||||
|
||||
/* Check if the current thread is the main thread.
|
||||
Use _Py_IsMainInterpreter() to check if it's the main interpreter. */
|
||||
static inline int
|
||||
_Py_IsMainThread(void)
|
||||
{
|
||||
unsigned long thread = PyThread_get_thread_ident();
|
||||
return (thread == _PyRuntime.main_thread);
|
||||
}
|
||||
|
||||
|
||||
static inline PyInterpreterState *
|
||||
_PyInterpreterState_Main(void)
|
||||
{
|
||||
return _PyRuntime.interpreters.main;
|
||||
}
|
||||
|
||||
static inline int
|
||||
_Py_IsMainInterpreter(PyInterpreterState *interp)
|
||||
{
|
||||
return (interp == _PyInterpreterState_Main());
|
||||
}
|
||||
|
||||
static inline int
|
||||
_Py_IsMainInterpreterFinalizing(PyInterpreterState *interp)
|
||||
{
|
||||
/* bpo-39877: Access _PyRuntime directly rather than using
|
||||
tstate->interp->runtime to support calls from Python daemon threads.
|
||||
After Py_Finalize() has been called, tstate can be a dangling pointer:
|
||||
point to PyThreadState freed memory. */
|
||||
return (_PyRuntimeState_GetFinalizing(&_PyRuntime) != NULL &&
|
||||
interp == &_PyRuntime._main_interpreter);
|
||||
}
|
||||
|
||||
// Export for _interpreters module.
|
||||
PyAPI_FUNC(PyObject *) _PyInterpreterState_GetIDObject(PyInterpreterState *);
|
||||
|
||||
// Export for _interpreters module.
|
||||
PyAPI_FUNC(int) _PyInterpreterState_SetRunningMain(PyInterpreterState *);
|
||||
PyAPI_FUNC(void) _PyInterpreterState_SetNotRunningMain(PyInterpreterState *);
|
||||
PyAPI_FUNC(int) _PyInterpreterState_IsRunningMain(PyInterpreterState *);
|
||||
PyAPI_FUNC(int) _PyInterpreterState_FailIfRunningMain(PyInterpreterState *);
|
||||
|
||||
extern int _PyThreadState_IsRunningMain(PyThreadState *);
|
||||
extern void _PyInterpreterState_ReinitRunningMain(PyThreadState *);
|
||||
|
||||
|
||||
static inline const PyConfig *
|
||||
_Py_GetMainConfig(void)
|
||||
{
|
||||
PyInterpreterState *interp = _PyInterpreterState_Main();
|
||||
if (interp == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
return _PyInterpreterState_GetConfig(interp);
|
||||
}
|
||||
|
||||
|
||||
/* Only handle signals on the main thread of the main interpreter. */
|
||||
static inline int
|
||||
_Py_ThreadCanHandleSignals(PyInterpreterState *interp)
|
||||
{
|
||||
return (_Py_IsMainThread() && _Py_IsMainInterpreter(interp));
|
||||
}
|
||||
|
||||
|
||||
/* Variable and static inline functions for in-line access to current thread
|
||||
and interpreter state */
|
||||
|
||||
#if defined(HAVE_THREAD_LOCAL) && !defined(Py_BUILD_CORE_MODULE)
|
||||
extern _Py_thread_local PyThreadState *_Py_tss_tstate;
|
||||
#endif
|
||||
|
||||
#ifndef NDEBUG
|
||||
extern int _PyThreadState_CheckConsistency(PyThreadState *tstate);
|
||||
#endif
|
||||
|
||||
int _PyThreadState_MustExit(PyThreadState *tstate);
|
||||
|
||||
// Export for most shared extensions, used via _PyThreadState_GET() static
|
||||
// inline function.
|
||||
PyAPI_FUNC(PyThreadState *) _PyThreadState_GetCurrent(void);
|
||||
|
||||
/* Get the current Python thread state.
|
||||
|
||||
This function is unsafe: it does not check for error and it can return NULL.
|
||||
|
||||
The caller must hold the GIL.
|
||||
|
||||
See also PyThreadState_Get() and PyThreadState_GetUnchecked(). */
|
||||
static inline PyThreadState*
|
||||
_PyThreadState_GET(void)
|
||||
{
|
||||
#if defined(HAVE_THREAD_LOCAL) && !defined(Py_BUILD_CORE_MODULE)
|
||||
return _Py_tss_tstate;
|
||||
#else
|
||||
return _PyThreadState_GetCurrent();
|
||||
#endif
|
||||
}
|
||||
|
||||
// Attaches the current thread to the interpreter.
|
||||
//
|
||||
// This may block while acquiring the GIL (if the GIL is enabled) or while
|
||||
// waiting for a stop-the-world pause (if the GIL is disabled).
|
||||
//
|
||||
// High-level code should generally call PyEval_RestoreThread() instead, which
|
||||
// calls this function.
|
||||
extern void _PyThreadState_Attach(PyThreadState *tstate);
|
||||
|
||||
// Detaches the current thread from the interpreter.
|
||||
//
|
||||
// High-level code should generally call PyEval_SaveThread() instead, which
|
||||
// calls this function.
|
||||
extern void _PyThreadState_Detach(PyThreadState *tstate);
|
||||
|
||||
// Detaches the current thread to the "suspended" state if a stop-the-world
|
||||
// pause is in progress.
|
||||
//
|
||||
// If there is no stop-the-world pause in progress, then the thread switches
|
||||
// to the "detached" state.
|
||||
extern void _PyThreadState_Suspend(PyThreadState *tstate);
|
||||
|
||||
// Perform a stop-the-world pause for all threads in the all interpreters.
|
||||
//
|
||||
// Threads in the "attached" state are paused and transitioned to the "GC"
|
||||
// state. Threads in the "detached" state switch to the "GC" state, preventing
|
||||
// them from reattaching until the stop-the-world pause is complete.
|
||||
//
|
||||
// NOTE: This is a no-op outside of Py_GIL_DISABLED builds.
|
||||
extern void _PyEval_StopTheWorldAll(_PyRuntimeState *runtime);
|
||||
extern void _PyEval_StartTheWorldAll(_PyRuntimeState *runtime);
|
||||
|
||||
// Perform a stop-the-world pause for threads in the specified interpreter.
|
||||
//
|
||||
// NOTE: This is a no-op outside of Py_GIL_DISABLED builds.
|
||||
extern void _PyEval_StopTheWorld(PyInterpreterState *interp);
|
||||
extern void _PyEval_StartTheWorld(PyInterpreterState *interp);
|
||||
|
||||
|
||||
static inline void
|
||||
_Py_EnsureFuncTstateNotNULL(const char *func, PyThreadState *tstate)
|
||||
{
|
||||
if (tstate == NULL) {
|
||||
_Py_FatalErrorFunc(func,
|
||||
"the function must be called with the GIL held, "
|
||||
"after Python initialization and before Python finalization, "
|
||||
"but the GIL is released (the current Python thread state is NULL)");
|
||||
}
|
||||
}
|
||||
|
||||
// Call Py_FatalError() if tstate is NULL
|
||||
#define _Py_EnsureTstateNotNULL(tstate) \
|
||||
_Py_EnsureFuncTstateNotNULL(__func__, (tstate))
|
||||
|
||||
|
||||
/* Get the current interpreter state.
|
||||
|
||||
The function is unsafe: it does not check for error and it can return NULL.
|
||||
|
||||
The caller must hold the GIL.
|
||||
|
||||
See also PyInterpreterState_Get()
|
||||
and _PyGILState_GetInterpreterStateUnsafe(). */
|
||||
static inline PyInterpreterState* _PyInterpreterState_GET(void) {
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
#ifdef Py_DEBUG
|
||||
_Py_EnsureTstateNotNULL(tstate);
|
||||
#endif
|
||||
return tstate->interp;
|
||||
}
|
||||
|
||||
|
||||
// PyThreadState functions
|
||||
|
||||
// Export for _testinternalcapi
|
||||
PyAPI_FUNC(PyThreadState *) _PyThreadState_New(
|
||||
PyInterpreterState *interp,
|
||||
int whence);
|
||||
extern void _PyThreadState_Bind(PyThreadState *tstate);
|
||||
PyAPI_FUNC(PyThreadState *) _PyThreadState_NewBound(
|
||||
PyInterpreterState *interp,
|
||||
int whence);
|
||||
extern PyThreadState * _PyThreadState_RemoveExcept(PyThreadState *tstate);
|
||||
extern void _PyThreadState_DeleteList(PyThreadState *list);
|
||||
extern void _PyThreadState_ClearMimallocHeaps(PyThreadState *tstate);
|
||||
|
||||
// Export for '_testinternalcapi' shared extension
|
||||
PyAPI_FUNC(PyObject*) _PyThreadState_GetDict(PyThreadState *tstate);
|
||||
|
||||
/* The implementation of sys._current_exceptions() Returns a dict mapping
|
||||
thread id to that thread's current exception.
|
||||
*/
|
||||
extern PyObject* _PyThread_CurrentExceptions(void);
|
||||
|
||||
|
||||
/* Other */
|
||||
|
||||
extern PyThreadState * _PyThreadState_Swap(
|
||||
_PyRuntimeState *runtime,
|
||||
PyThreadState *newts);
|
||||
|
||||
extern PyStatus _PyInterpreterState_Enable(_PyRuntimeState *runtime);
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
extern PyStatus _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime);
|
||||
extern void _PySignal_AfterFork(void);
|
||||
#endif
|
||||
|
||||
// Export for the stable ABI
|
||||
PyAPI_FUNC(int) _PyState_AddModule(
|
||||
PyThreadState *tstate,
|
||||
PyObject* module,
|
||||
PyModuleDef* def);
|
||||
|
||||
|
||||
extern int _PyOS_InterruptOccurred(PyThreadState *tstate);
|
||||
|
||||
#define HEAD_LOCK(runtime) \
|
||||
PyMutex_LockFlags(&(runtime)->interpreters.mutex, _Py_LOCK_DONT_DETACH)
|
||||
#define HEAD_UNLOCK(runtime) \
|
||||
PyMutex_Unlock(&(runtime)->interpreters.mutex)
|
||||
|
||||
// Get the configuration of the current interpreter.
|
||||
// The caller must hold the GIL.
|
||||
// Export for test_peg_generator.
|
||||
PyAPI_FUNC(const PyConfig*) _Py_GetConfig(void);
|
||||
|
||||
// Get the single PyInterpreterState used by this process' GILState
|
||||
// implementation.
|
||||
//
|
||||
// This function doesn't check for error. Return NULL before _PyGILState_Init()
|
||||
// is called and after _PyGILState_Fini() is called.
|
||||
//
|
||||
// See also PyInterpreterState_Get() and _PyInterpreterState_GET().
|
||||
extern PyInterpreterState* _PyGILState_GetInterpreterStateUnsafe(void);
|
||||
|
||||
static inline struct _Py_object_freelists* _Py_object_freelists_GET(void)
|
||||
{
|
||||
PyThreadState *tstate = _PyThreadState_GET();
|
||||
#ifdef Py_DEBUG
|
||||
_Py_EnsureTstateNotNULL(tstate);
|
||||
#endif
|
||||
|
||||
#ifdef Py_GIL_DISABLED
|
||||
return &((_PyThreadStateImpl*)tstate)->freelists;
|
||||
#else
|
||||
return &tstate->interp->object_state.freelists;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PYSTATE_H */
|
||||
21
Dependencies/Python/include/internal/pycore_pystats.h
vendored
Normal file
21
Dependencies/Python/include/internal/pycore_pystats.h
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
#ifndef Py_INTERNAL_PYSTATS_H
|
||||
#define Py_INTERNAL_PYSTATS_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#ifdef Py_STATS
|
||||
extern void _Py_StatsOn(void);
|
||||
extern void _Py_StatsOff(void);
|
||||
extern void _Py_StatsClear(void);
|
||||
extern int _Py_PrintSpecializationStats(int to_file);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_PYSTATS_H
|
||||
39
Dependencies/Python/include/internal/pycore_pythonrun.h
vendored
Normal file
39
Dependencies/Python/include/internal/pycore_pythonrun.h
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
#ifndef Py_INTERNAL_PYTHONRUN_H
|
||||
#define Py_INTERNAL_PYTHONRUN_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
extern int _PyRun_SimpleFileObject(
|
||||
FILE *fp,
|
||||
PyObject *filename,
|
||||
int closeit,
|
||||
PyCompilerFlags *flags);
|
||||
|
||||
extern int _PyRun_AnyFileObject(
|
||||
FILE *fp,
|
||||
PyObject *filename,
|
||||
int closeit,
|
||||
PyCompilerFlags *flags);
|
||||
|
||||
extern int _PyRun_InteractiveLoopObject(
|
||||
FILE *fp,
|
||||
PyObject *filename,
|
||||
PyCompilerFlags *flags);
|
||||
|
||||
extern const char* _Py_SourceAsString(
|
||||
PyObject *cmd,
|
||||
const char *funcname,
|
||||
const char *what,
|
||||
PyCompilerFlags *cf,
|
||||
PyObject **cmd_copy);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif // !Py_INTERNAL_PYTHONRUN_H
|
||||
|
||||
159
Dependencies/Python/include/internal/pycore_pythread.h
vendored
Normal file
159
Dependencies/Python/include/internal/pycore_pythread.h
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
#ifndef Py_INTERNAL_PYTHREAD_H
|
||||
#define Py_INTERNAL_PYTHREAD_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
#include "dynamic_annotations.h" // _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX
|
||||
#include "pycore_llist.h" // struct llist_node
|
||||
|
||||
// Get _POSIX_THREADS and _POSIX_SEMAPHORES macros if available
|
||||
#if (defined(HAVE_UNISTD_H) && !defined(_POSIX_THREADS) \
|
||||
&& !defined(_POSIX_SEMAPHORES))
|
||||
# include <unistd.h> // _POSIX_THREADS, _POSIX_SEMAPHORES
|
||||
#endif
|
||||
#if (defined(HAVE_PTHREAD_H) && !defined(_POSIX_THREADS) \
|
||||
&& !defined(_POSIX_SEMAPHORES))
|
||||
// This means pthreads are not implemented in libc headers, hence the macro
|
||||
// not present in <unistd.h>. But they still can be implemented as an
|
||||
// external library (e.g. gnu pth in pthread emulation)
|
||||
# include <pthread.h> // _POSIX_THREADS, _POSIX_SEMAPHORES
|
||||
#endif
|
||||
#if !defined(_POSIX_THREADS) && defined(__hpux) && defined(_SC_THREADS)
|
||||
// Check if we're running on HP-UX and _SC_THREADS is defined. If so, then
|
||||
// enough of the POSIX threads package is implemented to support Python
|
||||
// threads.
|
||||
//
|
||||
// This is valid for HP-UX 11.23 running on an ia64 system. If needed, add
|
||||
// a check of __ia64 to verify that we're running on an ia64 system instead
|
||||
// of a pa-risc system.
|
||||
# define _POSIX_THREADS
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(_POSIX_THREADS) || defined(HAVE_PTHREAD_STUBS)
|
||||
# define _USE_PTHREADS
|
||||
#endif
|
||||
|
||||
#if defined(_USE_PTHREADS) && defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
|
||||
// monotonic is supported statically. It doesn't mean it works on runtime.
|
||||
# define CONDATTR_MONOTONIC
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(HAVE_PTHREAD_STUBS)
|
||||
#include "cpython/pthread_stubs.h" // PTHREAD_KEYS_MAX
|
||||
#include <stdbool.h> // bool
|
||||
|
||||
// pthread_key
|
||||
struct py_stub_tls_entry {
|
||||
bool in_use;
|
||||
void *value;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct _pythread_runtime_state {
|
||||
int initialized;
|
||||
|
||||
#ifdef _USE_PTHREADS
|
||||
// This matches when thread_pthread.h is used.
|
||||
struct {
|
||||
/* NULL when pthread_condattr_setclock(CLOCK_MONOTONIC) is not supported. */
|
||||
pthread_condattr_t *ptr;
|
||||
# ifdef CONDATTR_MONOTONIC
|
||||
/* The value to which condattr_monotonic is set. */
|
||||
pthread_condattr_t val;
|
||||
# endif
|
||||
} _condattr_monotonic;
|
||||
|
||||
#endif // USE_PTHREADS
|
||||
|
||||
#if defined(HAVE_PTHREAD_STUBS)
|
||||
struct {
|
||||
struct py_stub_tls_entry tls_entries[PTHREAD_KEYS_MAX];
|
||||
} stubs;
|
||||
#endif
|
||||
|
||||
// Linked list of ThreadHandles
|
||||
struct llist_node handles;
|
||||
};
|
||||
|
||||
#define _pythread_RUNTIME_INIT(pythread) \
|
||||
{ \
|
||||
.handles = LLIST_INIT(pythread.handles), \
|
||||
}
|
||||
|
||||
#ifdef HAVE_FORK
|
||||
/* Private function to reinitialize a lock at fork in the child process.
|
||||
Reset the lock to the unlocked state.
|
||||
Return 0 on success, return -1 on error. */
|
||||
extern int _PyThread_at_fork_reinit(PyThread_type_lock *lock);
|
||||
extern void _PyThread_AfterFork(struct _pythread_runtime_state *state);
|
||||
#endif /* HAVE_FORK */
|
||||
|
||||
|
||||
// unset: -1 seconds, in nanoseconds
|
||||
#define PyThread_UNSET_TIMEOUT ((PyTime_t)(-1 * 1000 * 1000 * 1000))
|
||||
|
||||
// Exported for the _interpchannels module.
|
||||
PyAPI_FUNC(int) PyThread_ParseTimeoutArg(
|
||||
PyObject *arg,
|
||||
int blocking,
|
||||
PY_TIMEOUT_T *timeout);
|
||||
|
||||
/* Helper to acquire an interruptible lock with a timeout. If the lock acquire
|
||||
* is interrupted, signal handlers are run, and if they raise an exception,
|
||||
* PY_LOCK_INTR is returned. Otherwise, PY_LOCK_ACQUIRED or PY_LOCK_FAILURE
|
||||
* are returned, depending on whether the lock can be acquired within the
|
||||
* timeout.
|
||||
*/
|
||||
// Exported for the _interpchannels module.
|
||||
PyAPI_FUNC(PyLockStatus) PyThread_acquire_lock_timed_with_retries(
|
||||
PyThread_type_lock,
|
||||
PY_TIMEOUT_T microseconds);
|
||||
|
||||
typedef unsigned long long PyThread_ident_t;
|
||||
typedef Py_uintptr_t PyThread_handle_t;
|
||||
|
||||
#define PY_FORMAT_THREAD_IDENT_T "llu"
|
||||
#define Py_PARSE_THREAD_IDENT_T "K"
|
||||
|
||||
PyAPI_FUNC(PyThread_ident_t) PyThread_get_thread_ident_ex(void);
|
||||
|
||||
/* Thread joining APIs.
|
||||
*
|
||||
* These APIs have a strict contract:
|
||||
* - Either PyThread_join_thread or PyThread_detach_thread must be called
|
||||
* exactly once with the given handle.
|
||||
* - Calling neither PyThread_join_thread nor PyThread_detach_thread results
|
||||
* in a resource leak until the end of the process.
|
||||
* - Any other usage, such as calling both PyThread_join_thread and
|
||||
* PyThread_detach_thread, or calling them more than once (including
|
||||
* simultaneously), results in undefined behavior.
|
||||
*/
|
||||
PyAPI_FUNC(int) PyThread_start_joinable_thread(void (*func)(void *),
|
||||
void *arg,
|
||||
PyThread_ident_t* ident,
|
||||
PyThread_handle_t* handle);
|
||||
/*
|
||||
* Join a thread started with `PyThread_start_joinable_thread`.
|
||||
* This function cannot be interrupted. It returns 0 on success,
|
||||
* a non-zero value on failure.
|
||||
*/
|
||||
PyAPI_FUNC(int) PyThread_join_thread(PyThread_handle_t);
|
||||
/*
|
||||
* Detach a thread started with `PyThread_start_joinable_thread`, such
|
||||
* that its resources are relased as soon as it exits.
|
||||
* This function cannot be interrupted. It returns 0 on success,
|
||||
* a non-zero value on failure.
|
||||
*/
|
||||
PyAPI_FUNC(int) PyThread_detach_thread(PyThread_handle_t);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_PYTHREAD_H */
|
||||
154
Dependencies/Python/include/internal/pycore_qsbr.h
vendored
Normal file
154
Dependencies/Python/include/internal/pycore_qsbr.h
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
// The QSBR APIs (quiescent state-based reclamation) provide a mechanism for
|
||||
// the free-threaded build to safely reclaim memory when there may be
|
||||
// concurrent accesses.
|
||||
//
|
||||
// Many operations in the free-threaded build are protected by locks. However,
|
||||
// in some cases, we want to allow reads to happen concurrently with updates.
|
||||
// In this case, we need to delay freeing ("reclaiming") any memory that may be
|
||||
// concurrently accessed by a reader. The QSBR APIs provide a way to do this.
|
||||
#ifndef Py_INTERNAL_QSBR_H
|
||||
#define Py_INTERNAL_QSBR_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include "pycore_lock.h" // PyMutex
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
// The shared write sequence is always odd and incremented by two. Detached
|
||||
// threads are indicated by a read sequence of zero. This avoids collisions
|
||||
// between the offline state and any valid sequence number even if the
|
||||
// sequences numbers wrap around.
|
||||
#define QSBR_OFFLINE 0
|
||||
#define QSBR_INITIAL 1
|
||||
#define QSBR_INCR 2
|
||||
|
||||
// Wrap-around safe comparison. This is a holdover from the FreeBSD
|
||||
// implementation, which uses 32-bit sequence numbers. We currently use 64-bit
|
||||
// sequence numbers, so wrap-around is unlikely.
|
||||
#define QSBR_LT(a, b) ((int64_t)((a)-(b)) < 0)
|
||||
#define QSBR_LEQ(a, b) ((int64_t)((a)-(b)) <= 0)
|
||||
|
||||
struct _qsbr_shared;
|
||||
struct _PyThreadStateImpl; // forward declare to avoid circular dependency
|
||||
|
||||
// Per-thread state
|
||||
struct _qsbr_thread_state {
|
||||
// Last observed write sequence (or 0 if detached)
|
||||
uint64_t seq;
|
||||
|
||||
// Shared (per-interpreter) QSBR state
|
||||
struct _qsbr_shared *shared;
|
||||
|
||||
// Thread state (or NULL)
|
||||
PyThreadState *tstate;
|
||||
|
||||
// Used to defer advancing write sequence a fixed number of times
|
||||
int deferrals;
|
||||
|
||||
// Is this thread state allocated?
|
||||
bool allocated;
|
||||
struct _qsbr_thread_state *freelist_next;
|
||||
};
|
||||
|
||||
// Padding to avoid false sharing
|
||||
struct _qsbr_pad {
|
||||
struct _qsbr_thread_state qsbr;
|
||||
char __padding[64 - sizeof(struct _qsbr_thread_state)];
|
||||
};
|
||||
|
||||
// Per-interpreter state
|
||||
struct _qsbr_shared {
|
||||
// Write sequence: always odd, incremented by two
|
||||
uint64_t wr_seq;
|
||||
|
||||
// Minimum observed read sequence of all QSBR thread states
|
||||
uint64_t rd_seq;
|
||||
|
||||
// Array of QSBR thread states.
|
||||
struct _qsbr_pad *array;
|
||||
Py_ssize_t size;
|
||||
|
||||
// Freelist of unused _qsbr_thread_states (protected by mutex)
|
||||
PyMutex mutex;
|
||||
struct _qsbr_thread_state *freelist;
|
||||
};
|
||||
|
||||
static inline uint64_t
|
||||
_Py_qsbr_shared_current(struct _qsbr_shared *shared)
|
||||
{
|
||||
return _Py_atomic_load_uint64_acquire(&shared->wr_seq);
|
||||
}
|
||||
|
||||
// Reports a quiescent state: the caller no longer holds any pointer to shared
|
||||
// data not protected by locks or reference counts.
|
||||
static inline void
|
||||
_Py_qsbr_quiescent_state(struct _qsbr_thread_state *qsbr)
|
||||
{
|
||||
uint64_t seq = _Py_qsbr_shared_current(qsbr->shared);
|
||||
_Py_atomic_store_uint64_release(&qsbr->seq, seq);
|
||||
}
|
||||
|
||||
// Have the read sequences advanced to the given goal? Like `_Py_qsbr_poll()`,
|
||||
// but does not perform a scan of threads.
|
||||
static inline bool
|
||||
_Py_qbsr_goal_reached(struct _qsbr_thread_state *qsbr, uint64_t goal)
|
||||
{
|
||||
uint64_t rd_seq = _Py_atomic_load_uint64(&qsbr->shared->rd_seq);
|
||||
return QSBR_LEQ(goal, rd_seq);
|
||||
}
|
||||
|
||||
// Advance the write sequence and return the new goal. This should be called
|
||||
// after data is removed. The returned goal is used with `_Py_qsbr_poll()` to
|
||||
// determine when it is safe to reclaim (free) the memory.
|
||||
extern uint64_t
|
||||
_Py_qsbr_advance(struct _qsbr_shared *shared);
|
||||
|
||||
// Batches requests to advance the write sequence. This advances the write
|
||||
// sequence every N calls, which reduces overhead but increases time to
|
||||
// reclamation. Returns the new goal.
|
||||
extern uint64_t
|
||||
_Py_qsbr_deferred_advance(struct _qsbr_thread_state *qsbr);
|
||||
|
||||
// Have the read sequences advanced to the given goal? If this returns true,
|
||||
// it safe to reclaim any memory tagged with the goal (or earlier goal).
|
||||
extern bool
|
||||
_Py_qsbr_poll(struct _qsbr_thread_state *qsbr, uint64_t goal);
|
||||
|
||||
// Called when thread attaches to interpreter
|
||||
extern void
|
||||
_Py_qsbr_attach(struct _qsbr_thread_state *qsbr);
|
||||
|
||||
// Called when thread detaches from interpreter
|
||||
extern void
|
||||
_Py_qsbr_detach(struct _qsbr_thread_state *qsbr);
|
||||
|
||||
// Reserves (allocates) a QSBR state and returns its index.
|
||||
extern Py_ssize_t
|
||||
_Py_qsbr_reserve(PyInterpreterState *interp);
|
||||
|
||||
// Associates a PyThreadState with the QSBR state at the given index
|
||||
extern void
|
||||
_Py_qsbr_register(struct _PyThreadStateImpl *tstate,
|
||||
PyInterpreterState *interp, Py_ssize_t index);
|
||||
|
||||
// Disassociates a PyThreadState from the QSBR state and frees the QSBR state.
|
||||
extern void
|
||||
_Py_qsbr_unregister(PyThreadState *tstate);
|
||||
|
||||
extern void
|
||||
_Py_qsbr_fini(PyInterpreterState *interp);
|
||||
|
||||
extern void
|
||||
_Py_qsbr_after_fork(struct _PyThreadStateImpl *tstate);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_QSBR_H */
|
||||
21
Dependencies/Python/include/internal/pycore_range.h
vendored
Normal file
21
Dependencies/Python/include/internal/pycore_range.h
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
#ifndef Py_INTERNAL_RANGE_H
|
||||
#define Py_INTERNAL_RANGE_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef Py_BUILD_CORE
|
||||
# error "this header requires Py_BUILD_CORE define"
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
PyObject_HEAD
|
||||
long start;
|
||||
long step;
|
||||
long len;
|
||||
} _PyRangeIterObject;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_INTERNAL_RANGE_H */
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user