atomic笔记
inline void ice_atomic_set(ice_atomic_t* v, int i)
{
v->counter = i;
}
/*
* ice_atomic_inc - increment ice_atomic variable
* @v: pointer of type ice_atomic_t
*
* Atomically increments @v by 1. Note that the guaranteed useful
* range of an ice_atomic_t is only 24 bits.
*
* Inlined because this operation is performance critical.
*/
inline void ice_atomic_inc(ice_atomic_t *v)
{
__asm__ __volatile__(
"lock ; incl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
/**
* ice_atomic_dec_and_test - decrement and test
* @v: pointer of type ice_atomic_t
*
* Atomically decrements @v by 1 and returns true if the result is 0,
* or false for all other cases. Note that the guaranteed useful
* range of an ice_atomic_t is only 24 bits.
*
* Inlined because this operation is performance critical.
*/
inline int ice_atomic_dec_and_test(ice_atomic_t *v)
{
unsigned char c;
__asm__ __volatile__(
"lock ; decl %0; sete %1"
:"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory");
return c != 0;
}
/**
* ice_atomic_exchange_add - same as InterlockedExchangeAdd. This
* didn't come from atomic.h (the code was derived from similar code
* in /usr/include/asm/rwsem.h)
*
* Inlined because this operation is performance critical.
*/
inline int ice_atomic_exchange_add(int i, ice_atomic_t* v)
{
int tmp = i;
__asm__ __volatile__(
"lock ; xadd %0,(%2)"
:"+r"(tmp), "=m"(v->counter)
:"r"(v), "m"(v->counter)
: "memory");
return tmp + i;
}