1 /*********************************************\r
2 * File: netsync.h\r
3 * Purpose: NETAPI Synchronization primitives\r
4 **************************************************************\r
5 * FILE: netsync.h\r
6 * \r
7 * DESCRIPTION: netapi synch utilities header file for user space transport\r
8 * library\r
9 * \r
10 * REVISION HISTORY: rev 0.0.1 \r
11 *\r
12 * Copyright (c) Texas Instruments Incorporated 2010-2011\r
13 * \r
14 * Redistribution and use in source and binary forms, with or without \r
15 * modification, are permitted provided that the following conditions \r
16 * are met:\r
17 *\r
18 * Redistributions of source code must retain the above copyright \r
19 * notice, this list of conditions and the following disclaimer.\r
20 *\r
21 * Redistributions in binary form must reproduce the above copyright\r
22 * notice, this list of conditions and the following disclaimer in the \r
23 * documentation and/or other materials provided with the \r
24 * distribution.\r
25 *\r
26 * Neither the name of Texas Instruments Incorporated nor the names of\r
27 * its contributors may be used to endorse or promote products derived\r
28 * from this software without specific prior written permission.\r
29 *\r
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \r
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT \r
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT \r
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, \r
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT \r
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\r
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\r
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \r
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \r
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
41 \r
42 **********************************************/\r
43 #ifndef NETAPI_SYNC_H\r
44 #define NETAPI_SYNCH_H\r
45 \r
46 /*--------------------------*/\r
47 /*----------spinlock--------*/\r
48 /*--------------------------*/\r
49 \r
50 typedef int NETAPI_SPINLOCK_T;\r
51 \r
52 #define NETAPI_SPINLOCK_LOCKVAL 1\r
53 #define NETAPI_SPINLOCK_UNLOCKVAL 0 //never change!!\r
54 #define NETAPI_SPINLOCK_UNLOCKED_INITIALIZER (NETAPI_SPINLOCK_UNLOCKVAL)\r
55 \r
56 /* init a lock */\r
57 static inline void netapi_spinlock_init (NETAPI_SPINLOCK_T * val) {*val=NETAPI_SPINLOCK_UNLOCKVAL;}\r
58 \r
59 /* lock a spinlock */\r
60 static inline void netapi_spinlock_lock(NETAPI_SPINLOCK_T * val)\r
61 {\r
62 while(__sync_lock_test_and_set(val, NETAPI_SPINLOCK_LOCKVAL))\r
63 {\r
64 asm volatile("nop" :: );\r
65 asm volatile("nop" :: );\r
66 asm volatile("nop" :: );\r
67 asm volatile("nop" :: );\r
68 }\r
69 }\r
70 \r
71 /* try to get lock 1 time. Return 1 if ok, 0 if un-successful */\r
72 static inline int netapi_spinlock_try_lock( NETAPI_SPINLOCK_T* val)\r
73 {\r
74 int i=0;\r
75 if (__sync_lock_test_and_set(val, NETAPI_SPINLOCK_LOCKVAL)) return 0;\r
76 return 1;\r
77 }\r
78 \r
79 \r
80 /* unlock a spinlock. */\r
81 static inline void netapi_spinlock_unlock(NETAPI_SPINLOCK_T * val)\r
82 {\r
83 __sync_lock_release(val);\r
84 }\r
85 \r
86 /* poll a lock, return 0 if unlocked, NETAPI_SPINLOCK_LOCKVAL if locked */\r
87 static inline int netapi_spinlock_is_locked(NETAPI_SPINLOCK_T * val)\r
88 {\r
89 return *val;\r
90 }\r
91 \r
92 /*--------------------------*/\r
93 /*----------rwlock--------*/\r
94 /*--------------------------*/\r
95 \r
96 /* a rw lock strucuture */\r
97 typedef struct RWLOCK_Tag\r
98 {\r
99 NETAPI_SPINLOCK_T lock_outer; //lock this structure. very short duration lock\r
100 NETAPI_SPINLOCK_T lock_w; //real write lock\r
101 unsigned long n_readers; /* # readers active */\r
102 } NETAPI_RWLOCK_T;\r
103 \r
104 //initialize a rw lock \r
105 static inline void netapi_rwlock_init(NETAPI_RWLOCK_T * p_lock)\r
106 {\r
107 netapi_spinlock_init(&p_lock->lock_outer);\r
108 netapi_spinlock_init(&p_lock->lock_w);\r
109 p_lock->n_readers=0;\r
110 }\r
111 \r
112 // lock a write lock. \r
113 static inline void netapi_rwlock_write_lock(NETAPI_RWLOCK_T * p_lock)\r
114 {\r
115 int ret;\r
116 while(1)\r
117 {\r
118 netapi_spinlock_lock(&p_lock->lock_outer); //get outer lock - now nothing can change\r
119 // check for 0 readers\r
120 if(p_lock->n_readers)\r
121 {\r
122 netapi_spinlock_unlock(&p_lock->lock_outer); //give up outer & spin\r
123 asm volatile("nop" :: );\r
124 asm volatile("nop" :: );\r
125 asm volatile("nop" :: );\r
126 asm volatile("nop" :: );\r
127 continue;\r
128 }\r
129 \r
130 //ok, no readers. see if we can get writer lock\r
131 ret=netapi_spinlock_try_lock(&p_lock->lock_w); //try get writer lock 1 time\r
132 if(!ret) \r
133 {\r
134 netapi_spinlock_unlock(&p_lock->lock_outer); //give up outer & spin\r
135 asm volatile("nop" :: );\r
136 asm volatile("nop" :: );\r
137 asm volatile("nop" :: );\r
138 asm volatile("nop" :: );\r
139 continue; /* try again */\r
140 }\r
141 netapi_spinlock_unlock(&p_lock->lock_outer); //got write lock=> no other writer, no readers! Keep the writelock but unlock the outer.\r
142 return;\r
143 }\r
144 }\r
145 \r
146 //unlock a writer part of rwlock */\r
147 static inline void netapi_rwlock_write_unlock(NETAPI_RWLOCK_T * p_lock)\r
148 {\r
149 netapi_spinlock_unlock(&p_lock->lock_w);\r
150 }\r
151 \r
152 //grab a read lock\r
153 //=> can be other readers, but no writer\r
154 static inline void netapi_rwlock_read_lock(NETAPI_RWLOCK_T * p_lock)\r
155 {\r
156 int ret;\r
157 \r
158 while(1)\r
159 {\r
160 /*1st grab outer lock. once we have it, nothing can change */\r
161 netapi_spinlock_lock(&p_lock->lock_outer);\r
162 \r
163 /* see if there is a writer */\r
164 ret= netapi_spinlock_is_locked(&p_lock->lock_w);\r
165 \r
166 //there is a writer\r
167 if (ret)\r
168 {\r
169 netapi_spinlock_unlock(&p_lock->lock_outer); //give up outer and spin\r
170 asm volatile("nop" :: );\r
171 asm volatile("nop" :: );\r
172 asm volatile("nop" :: );\r
173 asm volatile("nop" :: );\r
174 continue; \r
175 }\r
176 \r
177 /* there is no writer so we can read!*/\r
178 p_lock->n_readers+=1;\r
179 \r
180 /* mb ? */\r
181 __sync_synchronize(); //make sure every core sees that n_readers has changed\r
182 \r
183 /* now give back the outer lock */\r
184 netapi_spinlock_unlock(&p_lock->lock_outer);\r
185 return;\r
186 }\r
187 }\r
188 \r
189 //rw_lock reader unlock\r
190 static inline void netapi_rwlock_read_unlock(NETAPI_RWLOCK_T * p_lock)\r
191 {\r
192 //grab outer\r
193 netapi_spinlock_lock(&p_lock->lock_outer);\r
194 \r
195 //decrement # of readers. Make sure all cores see update\r
196 p_lock->n_readers--;\r
197 __sync_synchronize(); \r
198 //TBD: need to check for <0? \r
199 \r
200 /* give up the outer */\r
201 netapi_spinlock_unlock(&p_lock->lock_outer);\r
202 }\r
203 \r
204 /*--------------------------*/\r
205 /*----------atomic32--------*/\r
206 /*--------------------------*/\r
207 typedef struct NETAPI_ATOMIC32_tag\r
208 {\r
209 long val;\r
210 } NETAPI_ATOMIC32_T;\r
211 \r
212 #define NETAPI_ATOMIC_INIT32(x) {x}\r
213 static inline int netapi_atomic_read32(NETAPI_ATOMIC32_T *p) {return p->val;}\r
214 \r
215 static inline void netapi_atomic_set32(NETAPI_ATOMIC32_T *p, int val) \r
216 {__sync_fetch_and_add(&p->val,0); } //todo crude, why not p->val=val?\r
217 \r
218 static inline void netapi_atomic_add32(NETAPI_ATOMIC32_T *p, int val)\r
219 {__sync_fetch_and_add(&p->val,val);}\r
220 \r
221 static inline void netapi_atomic_sub32(NETAPI_ATOMIC32_T *p, int val)\r
222 {__sync_fetch_and_sub(&p->val,val);}\r
223 \r
224 #define NETAPI_atomic_inc32(p) netapi_atomic_add32(p,1);\r
225 #define NETAPI_atomic_dec32(p) netapi_atomic_sub32(p,1);\r
226 \r
227 static inline int netapi_atomic_add_return32(NETAPI_ATOMIC32_T *p, int val)\r
228 {return __sync_add_and_fetch(&p->val,val);}\r
229 \r
230 static inline int netapi_atomic_sub_return32(NETAPI_ATOMIC32_T *p, int val)\r
231 {return __sync_sub_and_fetch(&p->val,val);}\r
232 \r
233 static inline int netapi_atomic_inc_and_test32(NETAPI_ATOMIC32_T *p)\r
234 {return __sync_add_and_fetch(&p->val,1);}\r
235 \r
236 static inline int netapi_atomic_dec_and_test32(NETAPI_ATOMIC32_T *p)\r
237 {return !__sync_sub_and_fetch(&p->val,1);}\r
238 \r
239 static inline int netapi_atomic_test_and_set32(NETAPI_ATOMIC32_T *p)\r
240 {return (! _sync_lock_test_and_set(p, 1));}\r
241 \r
242 #define netapi_atomic_clear32(p) netapi_atomic_set32(p,0);\r
243 \r
244 /*--------------------------*/\r
245 /*----------atomic64--------*/\r
246 /*--------------------------*/\r
247 typedef struct NETAPI_ATOMIC64_Tag\r
248 {\r
249 NETAPI_SPINLOCK_T lock;\r
250 long long val;\r
251 } NETAPI_ATOMIC64_T;\r
252 \r
253 #define NETAPI_ATOMIC_INIT64(x) {NETAPI_SPINLOCK_UNLOCKED_INITIALIZER,x}\r
254 \r
255 static inline long long netapi_atomic_read64(NETAPI_ATOMIC64_T *p)\r
256 {\r
257 long long latch_val;\r
258 netapi_spinlock_lock(&p->lock); //acquire lock\r
259 latch_val = p->val;\r
260 netapi_spinlock_unlock(&p->lock); //free lock\r
261 return latch_val;\r
262 }\r
263 \r
264 static inline void netapi_atomic_set64(NETAPI_ATOMIC64_T *p,long long val)\r
265 {\r
266 netapi_spinlock_lock(&p->lock); //acquire lock\r
267 p->val = val;\r
268 //__sync_synchronize(); //todo: may not need as unlock will do this also probably\r
269 netapi_spinlock_unlock(&p->lock); //free lock\r
270 }\r
271 \r
272 static inline void netapi_atomic_add64(NETAPI_ATOMIC64_T *p, long long val) \r
273 {\r
274 netapi_spinlock_lock(&p->lock); //acquire lock\r
275 p->val += val;\r
276 //__sync_synchronize(); //todo: may not need as unlock will do this also probably\r
277 netapi_spinlock_unlock(&p->lock); //free lock\r
278 }\r
279 \r
280 /*******************************************************\r
281 ****************memory barrier************************\r
282 ******************************************************/\r
283 static inline void netapi_mb(){__sync_synchronize();}\r
284 static inline void netapi_rmb(){__sync_synchronize();}\r
285 static inline void netapi_wmb(){__sync_synchronize();}\r
286 \r
287 \r
288 #endif\r