2
0
mirror of https://github.com/checkpoint-restore/criu synced 2025-08-31 14:25:49 +00:00

include: add common header files for loongarch64

Signed-off-by: znley <shanjiantao@loongson.cn>
This commit is contained in:
znley
2023-06-12 11:23:38 +08:00
committed by Andrei Vagin
parent 8a24d4872e
commit b304106e6b
5 changed files with 150 additions and 0 deletions

View File

@@ -0,0 +1,62 @@
#ifndef __CR_ATOMIC_H__
#define __CR_ATOMIC_H__
#include <linux/types.h>
#include "common/compiler.h"
typedef struct {
int counter;
} atomic_t;
static inline int atomic_read(const atomic_t *v)
{
return (*(volatile int *)&(v)->counter);
}
static inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
}
static inline int __atomic_add(int i, atomic_t *v)
{
int result;
asm volatile("amadd_db.w %1, %2, %0" : "+ZB"(v->counter), "=&r"(result) : "r"(i) : "memory");
return result + i;
}
static inline void atomic_add(int i, atomic_t *v)
{
__atomic_add(i, v);
}
static inline int atomic_add_return(int i, atomic_t *v)
{
return __atomic_add(i, v);
}
#define atomic_sub(i, v) atomic_add(-(int)i, v)
#define atomic_sub_return(i, v) atomic_add_return(-(int)i, v)
#define atomic_inc(v) atomic_add(1, v)
#define atomic_inc_return(v) atomic_add_return(1, v)
#define atomic_dec(v) atomic_sub(1, v)
#define atomic_dec_return(v) atomic_sub_return(1, v)
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
int ret;
asm volatile("1: \n"
" ll.w %0, %1 \n"
" bne %0, %2, 2f \n"
" or $t0, %3, $zero \n"
" sc.w $t0, %1 \n"
" beqz $t0, 1b \n"
"2: \n"
" dbar 0 \n"
: "=&r"(ret), "+ZB"(ptr->counter)
: "r"(old), "r"(new)
: "t0", "memory");
return ret;
}
#endif /* __CR_ATOMIC_H__ */

View File

@@ -0,0 +1,24 @@
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
#include "common/asm-generic/bitops.h"
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((1UL << ((nr) / BITS_PER_LONG)) - 1)
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long res, mask;
mask = BIT_MASK(nr);
asm volatile("amor_db.d %0, %2, %1" : "=&r"(res), "+ZB"(addr[BIT_WORD(nr)]) : "r"(mask) : "memory");
return (res & mask) != 0;
}
#endif

View File

@@ -0,0 +1,6 @@
#ifndef __CR_BITSPERLONG_H__
#define __CR_BITSPERLONG_H__
#define BITS_PER_LONG _LOONGARCH_SZLONG
#endif /* __CR_BITSPERLONG_H__ */

View File

@@ -0,0 +1,19 @@
#ifndef __CR_LINKAGE_H__
#define __CR_LINKAGE_H__
#define __ALIGN .align 2
#define __ALIGN_STR ".align 2"
#define GLOBAL(name) \
.globl name; \
name:
#define ENTRY(name) \
.globl name; \
__ALIGN; \
.type name, @function; \
name:
#define END(sym) .size sym, .- sym
#endif /* __CR_LINKAGE_H__ */

View File

@@ -0,0 +1,39 @@
#ifndef __CR_ASM_PAGE_H__
#define __CR_ASM_PAGE_H__
#define ARCH_HAS_LONG_PAGES
#ifndef CR_NOGLIBC
#include <string.h> /* ffsl() */
#include <unistd.h> /* _SC_PAGESIZE */
static unsigned __page_size;
static unsigned __page_shift;
static inline unsigned page_size(void)
{
if (!__page_size)
__page_size = sysconf(_SC_PAGESIZE);
return __page_size;
}
static inline unsigned page_shift(void)
{
if (!__page_shift)
__page_shift = (ffsl(page_size()) - 1);
return __page_shift;
}
#define PAGE_SIZE page_size()
#define PAGE_SHIFT page_shift()
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define PAGE_PFN(addr) ((addr) / PAGE_SIZE)
#else /* CR_NOGLIBC */
extern unsigned page_size(void);
#define PAGE_SIZE page_size()
#endif /* CR_NOGLIBC */
#endif /* __CR_ASM_PAGE_H__ */