!C99Shell v. 2.0 [PHP 7 Update] [25.02.2019]!

Software: nginx/1.23.4. PHP/5.6.40-65+ubuntu20.04.1+deb.sury.org+1 

uname -a: Linux foro-restaurado-2 5.15.0-1040-oracle #46-Ubuntu SMP Fri Jul 14 21:47:21 UTC 2023
aarch64
 

uid=33(www-data) gid=33(www-data) groups=33(www-data) 

Safe-mode: OFF (not secure)

/usr/src/linux-oracle-5.13-headers-5.13.0-1018/arch/x86/include/asm/   drwxr-xr-x
Free 83.22 GB of 96.73 GB (86.04%)
Home    Back    Forward    UPDIR    Refresh    Search    Buffer    Encoder    Tools    Proc.    FTP brute    Sec.    SQL    PHP-code    Update    Feedback    Self remove    Logout    


Viewing file:     cmpxchg.h (7.75 KB)      -rw-r--r--
Select action/file-type:
(+) | (+) | (+) | Code (+) | Session (+) | (+) | SDB (+) | (+) | (+) | (+) | (+) | (+) |
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASM_X86_CMPXCHG_H
#define ASM_X86_CMPXCHG_H

#include <linux/compiler.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h> /* Provides LOCK_PREFIX */

/*
 * Non-existent functions to indicate usage errors at link time
 * (or compile-time if the compiler implements __compiletime_error().
 */
extern void __xchg_wrong_size(void)
    __compiletime_error("Bad argument size for xchg");
extern void __cmpxchg_wrong_size(void)
    __compiletime_error("Bad argument size for cmpxchg");
extern void __xadd_wrong_size(void)
    __compiletime_error("Bad argument size for xadd");
extern void __add_wrong_size(void)
    __compiletime_error("Bad argument size for add");

/*
 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
 * -1 because sizeof will never return -1, thereby making those switch
 * case statements guaranteed dead code which the compiler will
 * eliminate, and allowing the "missing symbol in the default case" to
 * indicate a usage error.
 */
#define __X86_CASE_B    1
#define __X86_CASE_W    2
#define __X86_CASE_L    4
#ifdef CONFIG_64BIT
#define __X86_CASE_Q    8
#else
#define    __X86_CASE_Q    -1        /* sizeof will never return -1 */
#endif

/* 
 * An exchange-type operation, which takes a value and a pointer, and
 * returns the old value.
 */
#define __xchg_op(ptr, arg, op, lock)                    \
    ({                                \
            __typeof__ (*(ptr)) __ret = (arg);            \
        switch (sizeof(*(ptr))) {                \
        case __X86_CASE_B:                    \
            asm volatile (lock #op "b %b0, %1\n"        \
                      : "+q" (__ret), "+m" (*(ptr))    \
                      : : "memory", "cc");        \
            break;                        \
        case __X86_CASE_W:                    \
            asm volatile (lock #op "w %w0, %1\n"        \
                      : "+r" (__ret), "+m" (*(ptr))    \
                      : : "memory", "cc");        \
            break;                        \
        case __X86_CASE_L:                    \
            asm volatile (lock #op "l %0, %1\n"        \
                      : "+r" (__ret), "+m" (*(ptr))    \
                      : : "memory", "cc");        \
            break;                        \
        case __X86_CASE_Q:                    \
            asm volatile (lock #op "q %q0, %1\n"        \
                      : "+r" (__ret), "+m" (*(ptr))    \
                      : : "memory", "cc");        \
            break;                        \
        default:                        \
            __ ## op ## _wrong_size();            \
        }                            \
        __ret;                            \
    })

/*
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
 * Since this is generally used to protect other memory information, we
 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
 * information around.
 */
#define arch_xchg(ptr, v)    __xchg_op((ptr), (v), xchg, "")

/*
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
 * store NEW in MEM.  Return the initial value in MEM.  Success is
 * indicated by comparing RETURN with OLD.
 */
#define __raw_cmpxchg(ptr, old, new, size, lock)            \
({                                    \
    __typeof__(*(ptr)) __ret;                    \
    __typeof__(*(ptr)) __old = (old);                \
    __typeof__(*(ptr)) __new = (new);                \
    switch (size) {                            \
    case __X86_CASE_B:                        \
    {                                \
        volatile u8 *__ptr = (volatile u8 *)(ptr);        \
        asm volatile(lock "cmpxchgb %2,%1"            \
                 : "=a" (__ret), "+m" (*__ptr)        \
                 : "q" (__new), "0" (__old)            \
                 : "memory");                \
        break;                            \
    }                                \
    case __X86_CASE_W:                        \
    {                                \
        volatile u16 *__ptr = (volatile u16 *)(ptr);        \
        asm volatile(lock "cmpxchgw %2,%1"            \
                 : "=a" (__ret), "+m" (*__ptr)        \
                 : "r" (__new), "0" (__old)            \
                 : "memory");                \
        break;                            \
    }                                \
    case __X86_CASE_L:                        \
    {                                \
        volatile u32 *__ptr = (volatile u32 *)(ptr);        \
        asm volatile(lock "cmpxchgl %2,%1"            \
                 : "=a" (__ret), "+m" (*__ptr)        \
                 : "r" (__new), "0" (__old)            \
                 : "memory");                \
        break;                            \
    }                                \
    case __X86_CASE_Q:                        \
    {                                \
        volatile u64 *__ptr = (volatile u64 *)(ptr);        \
        asm volatile(lock "cmpxchgq %2,%1"            \
                 : "=a" (__ret), "+m" (*__ptr)        \
                 : "r" (__new), "0" (__old)            \
                 : "memory");                \
        break;                            \
    }                                \
    default:                            \
        __cmpxchg_wrong_size();                    \
    }                                \
    __ret;                                \
})

#define __cmpxchg(ptr, old, new, size)                    \
    __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)

#define __sync_cmpxchg(ptr, old, new, size)                \
    __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")

#define __cmpxchg_local(ptr, old, new, size)                \
    __raw_cmpxchg((ptr), (old), (new), (size), "")

#ifdef CONFIG_X86_32
# include <asm/cmpxchg_32.h>
#else
# include <asm/cmpxchg_64.h>
#endif

#define arch_cmpxchg(ptr, old, new)                    \
    __cmpxchg(ptr, old, new, sizeof(*(ptr)))

#define arch_sync_cmpxchg(ptr, old, new)                \
    __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))

#define arch_cmpxchg_local(ptr, old, new)                \
    __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))


#define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock)        \
({                                    \
    bool success;                            \
    __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);        \
    __typeof__(*(_ptr)) __old = *_old;                \
    __typeof__(*(_ptr)) __new = (_new);                \
    switch (size) {                            \
    case __X86_CASE_B:                        \
    {                                \
        volatile u8 *__ptr = (volatile u8 *)(_ptr);        \
        asm volatile(lock "cmpxchgb %[new], %[ptr]"        \
                 CC_SET(z)                    \
                 : CC_OUT(z) (success),            \
                   [ptr] "+m" (*__ptr),            \
                   [old] "+a" (__old)            \
                 : [new] "q" (__new)            \
                 : "memory");                \
        break;                            \
    }                                \
    case __X86_CASE_W:                        \
    {                                \
        volatile u16 *__ptr = (volatile u16 *)(_ptr);        \
        asm volatile(lock "cmpxchgw %[new], %[ptr]"        \
                 CC_SET(z)                    \
                 : CC_OUT(z) (success),            \
                   [ptr] "+m" (*__ptr),            \
                   [old] "+a" (__old)            \
                 : [new] "r" (__new)            \
                 : "memory");                \
        break;                            \
    }                                \
    case __X86_CASE_L:                        \
    {                                \
        volatile u32 *__ptr = (volatile u32 *)(_ptr);        \
        asm volatile(lock "cmpxchgl %[new], %[ptr]"        \
                 CC_SET(z)                    \
                 : CC_OUT(z) (success),            \
                   [ptr] "+m" (*__ptr),            \
                   [old] "+a" (__old)            \
                 : [new] "r" (__new)            \
                 : "memory");                \
        break;                            \
    }                                \
    case __X86_CASE_Q:                        \
    {                                \
        volatile u64 *__ptr = (volatile u64 *)(_ptr);        \
        asm volatile(lock "cmpxchgq %[new], %[ptr]"        \
                 CC_SET(z)                    \
                 : CC_OUT(z) (success),            \
                   [ptr] "+m" (*__ptr),            \
                   [old] "+a" (__old)            \
                 : [new] "r" (__new)            \
                 : "memory");                \
        break;                            \
    }                                \
    default:                            \
        __cmpxchg_wrong_size();                    \
    }                                \
    if (unlikely(!success))                        \
        *_old = __old;                        \
    likely(success);                        \
})

#define __try_cmpxchg(ptr, pold, new, size)                \
    __raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)

#define arch_try_cmpxchg(ptr, pold, new)                 \
    __try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr)))

/*
 * xadd() adds "inc" to "*ptr" and atomically returns the previous
 * value of "*ptr".
 *
 * xadd() is locked when multiple CPUs are online
 */
#define __xadd(ptr, inc, lock)    __xchg_op((ptr), (inc), xadd, lock)
#define xadd(ptr, inc)        __xadd((ptr), (inc), LOCK_PREFIX)

#define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2)            \
({                                    \
    bool __ret;                            \
    __typeof__(*(p1)) __old1 = (o1), __new1 = (n1);            \
    __typeof__(*(p2)) __old2 = (o2), __new2 = (n2);            \
    BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long));            \
    BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long));            \
    VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long)));        \
    VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2));    \
    asm volatile(pfx "cmpxchg%c5b %1"                \
             CC_SET(e)                        \
             : CC_OUT(e) (__ret),                \
               "+m" (*(p1)), "+m" (*(p2)),            \
               "+a" (__old1), "+d" (__old2)            \
             : "i" (2 * sizeof(long)),                \
               "b" (__new1), "c" (__new2));            \
    __ret;                                \
})

#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \
    __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)

#define arch_cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
    __cmpxchg_double(, p1, p2, o1, o2, n1, n2)

#endif    /* ASM_X86_CMPXCHG_H */

:: Command execute ::

Enter:
 
Select:
 

:: Search ::
  - regexp 

:: Upload ::
 
[ Read-Only ]

:: Make Dir ::
 
[ Read-Only ]
:: Make File ::
 
[ Read-Only ]

:: Go Dir ::
 
:: Go File ::
 

--[ c99shell v. 2.0 [PHP 7 Update] [25.02.2019] maintained by HackingTool | HackingTool | Generation time: 0.0046 ]--