diff --git a/ext/json/ext/generator/generator.c b/ext/json/ext/generator/generator.c index 6ece9f05..35d543a7 100644 --- a/ext/json/ext/generator/generator.c +++ b/ext/json/ext/generator/generator.c @@ -128,7 +128,7 @@ typedef struct _search_state { #endif /* HAVE_SIMD */ } search_state; -static ALWAYS_INLINE() void search_flush(search_state *search) +ALWAYS_INLINE(static) void search_flush(search_state *search) { // Do not remove this conditional without profiling, specifically escape-heavy text. // escape_UTF8_char_basic will advance search->ptr and search->cursor (effectively a search_flush). @@ -171,7 +171,7 @@ static inline unsigned char search_escape_basic(search_state *search) return 0; } -static ALWAYS_INLINE() void escape_UTF8_char_basic(search_state *search) +ALWAYS_INLINE(static) void escape_UTF8_char_basic(search_state *search) { const unsigned char ch = (unsigned char)*search->ptr; switch (ch) { @@ -258,7 +258,7 @@ static inline void escape_UTF8_char(search_state *search, unsigned char ch_len) #ifdef HAVE_SIMD -static ALWAYS_INLINE() char *copy_remaining_bytes(search_state *search, unsigned long vec_len, unsigned long len) +ALWAYS_INLINE(static) char *copy_remaining_bytes(search_state *search, unsigned long vec_len, unsigned long len) { // Flush the buffer so everything up until the last 'len' characters are unflushed. search_flush(search); @@ -281,7 +281,7 @@ static ALWAYS_INLINE() char *copy_remaining_bytes(search_state *search, unsigned #ifdef HAVE_SIMD_NEON -static ALWAYS_INLINE() unsigned char neon_next_match(search_state *search) +ALWAYS_INLINE(static) unsigned char neon_next_match(search_state *search) { uint64_t mask = search->matches_mask; uint32_t index = trailing_zeros64(mask) >> 2; @@ -395,7 +395,7 @@ static inline unsigned char search_escape_basic_neon(search_state *search) #ifdef HAVE_SIMD_SSE2 -static ALWAYS_INLINE() unsigned char sse2_next_match(search_state *search) +ALWAYS_INLINE(static) unsigned char sse2_next_match(search_state *search) { int mask = search->matches_mask; int index = trailing_zeros(mask); @@ -419,7 +419,7 @@ static ALWAYS_INLINE() unsigned char sse2_next_match(search_state *search) #define TARGET_SSE2 #endif -static TARGET_SSE2 ALWAYS_INLINE() unsigned char search_escape_basic_sse2(search_state *search) +ALWAYS_INLINE(static) TARGET_SSE2 unsigned char search_escape_basic_sse2(search_state *search) { if (RB_UNLIKELY(search->has_matches)) { // There are more matches if search->matches_mask > 0. @@ -1123,8 +1123,7 @@ struct hash_foreach_arg { bool mixed_keys_encountered; }; -NOINLINE() -static void +NOINLINE(static) void json_inspect_hash_with_mixed_keys(struct hash_foreach_arg *arg) { if (arg->mixed_keys_encountered) { diff --git a/ext/json/ext/parser/parser.c b/ext/json/ext/parser/parser.c index 0216eef9..5b7cd835 100644 --- a/ext/json/ext/parser/parser.c +++ b/ext/json/ext/parser/parser.c @@ -88,7 +88,7 @@ static void rvalue_cache_insert_at(rvalue_cache *cache, int index, VALUE rstring #if JSON_CPU_LITTLE_ENDIAN_64BITS #if __has_builtin(__builtin_bswap64) #undef rstring_cache_memcmp -static ALWAYS_INLINE() int rstring_cache_memcmp(const char *str, const char *rptr, const long length) +ALWAYS_INLINE(static) int rstring_cache_memcmp(const char *str, const char *rptr, const long length) { // The libc memcmp has numerous complex optimizations, but in this particular case, // we know the string is small (JSON_RVALUE_CACHE_MAX_ENTRY_LENGTH), so being able to @@ -117,7 +117,7 @@ static ALWAYS_INLINE() int rstring_cache_memcmp(const char *str, const char *rpt #endif #endif -static ALWAYS_INLINE() int rstring_cache_cmp(const char *str, const long length, VALUE rstring) +ALWAYS_INLINE(static) int rstring_cache_cmp(const char *str, const long length, VALUE rstring) { const char *rstring_ptr; long rstring_length; @@ -131,7 +131,7 @@ static ALWAYS_INLINE() int rstring_cache_cmp(const char *str, const long length, } } -static ALWAYS_INLINE() VALUE rstring_cache_fetch(rvalue_cache *cache, const char *str, const long length) +ALWAYS_INLINE(static) VALUE rstring_cache_fetch(rvalue_cache *cache, const char *str, const long length) { int low = 0; int high = cache->length - 1; @@ -540,7 +540,7 @@ json_eat_comments(JSON_ParserState *state) } } -static ALWAYS_INLINE() void +ALWAYS_INLINE(static) void json_eat_whitespace(JSON_ParserState *state) { while (true) { @@ -661,7 +661,7 @@ static inline const char *json_next_backslash(const char *pe, const char *string return NULL; } -static NOINLINE() VALUE json_string_unescape(JSON_ParserState *state, JSON_ParserConfig *config, const char *string, const char *stringEnd, bool is_name, JSON_UnescapePositions *positions) +NOINLINE(static) VALUE json_string_unescape(JSON_ParserState *state, JSON_ParserConfig *config, const char *string, const char *stringEnd, bool is_name, JSON_UnescapePositions *positions) { bool intern = is_name || config->freeze; bool symbolize = is_name && config->symbolize_names; @@ -946,7 +946,7 @@ static const bool string_scan_table[256] = { static SIMD_Implementation simd_impl = SIMD_NONE; #endif /* HAVE_SIMD */ -static ALWAYS_INLINE() bool string_scan(JSON_ParserState *state) +ALWAYS_INLINE(static) bool string_scan(JSON_ParserState *state) { #ifdef HAVE_SIMD #if defined(HAVE_SIMD_NEON) @@ -1015,7 +1015,7 @@ static VALUE json_parse_escaped_string(JSON_ParserState *state, JSON_ParserConfi return Qfalse; } -static ALWAYS_INLINE() VALUE json_parse_string(JSON_ParserState *state, JSON_ParserConfig *config, bool is_name) +ALWAYS_INLINE(static) VALUE json_parse_string(JSON_ParserState *state, JSON_ParserConfig *config, bool is_name) { state->cursor++; const char *start = state->cursor; diff --git a/ext/json/ext/simd/simd.h b/ext/json/ext/simd/simd.h index c9e3b3ec..f8e5ee18 100644 --- a/ext/json/ext/simd/simd.h +++ b/ext/json/ext/simd/simd.h @@ -73,14 +73,14 @@ static inline SIMD_Implementation find_simd_implementation(void) #define HAVE_SIMD_NEON 1 // See: https://community.arm.com/arm-community-blogs/b/servers-and-cloud-computing-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon -static ALWAYS_INLINE() uint64_t neon_match_mask(uint8x16_t matches) +ALWAYS_INLINE(static) uint64_t neon_match_mask(uint8x16_t matches) { const uint8x8_t res = vshrn_n_u16(vreinterpretq_u16_u8(matches), 4); const uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(res), 0); return mask & 0x8888888888888888ull; } -static ALWAYS_INLINE() uint64_t compute_chunk_mask_neon(const char *ptr) +ALWAYS_INLINE(static) uint64_t compute_chunk_mask_neon(const char *ptr) { uint8x16_t chunk = vld1q_u8((const unsigned char *)ptr); @@ -93,7 +93,7 @@ static ALWAYS_INLINE() uint64_t compute_chunk_mask_neon(const char *ptr) return neon_match_mask(needs_escape); } -static ALWAYS_INLINE() int string_scan_simd_neon(const char **ptr, const char *end, uint64_t *mask) +ALWAYS_INLINE(static) int string_scan_simd_neon(const char **ptr, const char *end, uint64_t *mask) { while (*ptr + sizeof(uint8x16_t) <= end) { uint64_t chunk_mask = compute_chunk_mask_neon(*ptr); @@ -140,7 +140,7 @@ static inline uint8x16x4_t load_uint8x16_4(const unsigned char *table) #define _mm_cmpgt_epu8(a, b) _mm_xor_si128(_mm_cmple_epu8(a, b), _mm_set1_epi8(-1)) #define _mm_cmplt_epu8(a, b) _mm_cmpgt_epu8(b, a) -static TARGET_SSE2 ALWAYS_INLINE() int compute_chunk_mask_sse2(const char *ptr) +ALWAYS_INLINE(static) TARGET_SSE2 int compute_chunk_mask_sse2(const char *ptr) { __m128i chunk = _mm_loadu_si128((__m128i const*)ptr); // Trick: c < 32 || c == 34 can be factored as c ^ 2 < 33 @@ -151,7 +151,7 @@ static TARGET_SSE2 ALWAYS_INLINE() int compute_chunk_mask_sse2(const char *ptr) return _mm_movemask_epi8(needs_escape); } -static TARGET_SSE2 ALWAYS_INLINE() int string_scan_simd_sse2(const char **ptr, const char *end, int *mask) +ALWAYS_INLINE(static) TARGET_SSE2 int string_scan_simd_sse2(const char **ptr, const char *end, int *mask) { while (*ptr + sizeof(__m128i) <= end) { int chunk_mask = compute_chunk_mask_sse2(*ptr);