|
5 | 5 | * SPDX-License-Identifier: Apache-2.0
|
6 | 6 | */
|
7 | 7 |
|
8 |
| -#include "posix_clock.h" |
9 |
| - |
10 |
| -#include <zephyr/kernel.h> |
11 | 8 | #include <errno.h>
|
| 9 | +#include <time.h> |
| 10 | + |
12 | 11 | #include <zephyr/posix/time.h>
|
13 | 12 | #include <zephyr/posix/sys/time.h>
|
14 | 13 | #include <zephyr/posix/unistd.h>
|
15 |
| -#include <zephyr/internal/syscall_handler.h> |
16 |
| -#include <zephyr/spinlock.h> |
17 |
| - |
18 |
| -/* |
19 |
| - * `k_uptime_get` returns a timestamp based on an always increasing |
20 |
| - * value from the system start. To support the `CLOCK_REALTIME` |
21 |
| - * clock, this `rt_clock_base` records the time that the system was |
22 |
| - * started. This can either be set via 'clock_settime', or could be |
23 |
| - * set from a real time clock, if such hardware is present. |
24 |
| - */ |
25 |
| -static struct timespec rt_clock_base; |
26 |
| -static struct k_spinlock rt_clock_base_lock; |
27 |
| - |
28 |
| -/** |
29 |
| - * @brief Get clock time specified by clock_id. |
30 |
| - * |
31 |
| - * See IEEE 1003.1 |
32 |
| - */ |
33 |
| -int z_impl___posix_clock_get_base(clockid_t clock_id, struct timespec *base) |
34 |
| -{ |
35 |
| - switch (clock_id) { |
36 |
| - case CLOCK_MONOTONIC: |
37 |
| - base->tv_sec = 0; |
38 |
| - base->tv_nsec = 0; |
39 |
| - break; |
40 |
| - |
41 |
| - case CLOCK_REALTIME: |
42 |
| - K_SPINLOCK(&rt_clock_base_lock) { |
43 |
| - *base = rt_clock_base; |
44 |
| - } |
45 |
| - break; |
46 |
| - |
47 |
| - default: |
48 |
| - errno = EINVAL; |
49 |
| - return -1; |
50 |
| - } |
51 |
| - |
52 |
| - return 0; |
53 |
| -} |
54 |
| - |
55 |
| -#ifdef CONFIG_USERSPACE |
56 |
| -int z_vrfy___posix_clock_get_base(clockid_t clock_id, struct timespec *ts) |
57 |
| -{ |
58 |
| - K_OOPS(K_SYSCALL_MEMORY_WRITE(ts, sizeof(*ts))); |
59 |
| - return z_impl___posix_clock_get_base(clock_id, ts); |
60 |
| -} |
61 |
| -#include <zephyr/syscalls/__posix_clock_get_base_mrsh.c> |
62 |
| -#endif |
63 |
| - |
64 |
| -int z_clock_gettime(clockid_t clock_id, struct timespec *ts) |
65 |
| -{ |
66 |
| - struct timespec base; |
67 | 14 |
|
68 |
| - switch (clock_id) { |
69 |
| - case CLOCK_MONOTONIC: |
70 |
| - base.tv_sec = 0; |
71 |
| - base.tv_nsec = 0; |
72 |
| - break; |
| 15 | +extern int z_clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, |
| 16 | + struct timespec *rmtp); |
| 17 | +extern int z_clock_gettime(clockid_t clock_id, struct timespec *ts); |
| 18 | +extern int z_clock_settime(clockid_t clock_id, const struct timespec *tp); |
73 | 19 |
|
74 |
| - case CLOCK_REALTIME: |
75 |
| - (void)__posix_clock_get_base(clock_id, &base); |
76 |
| - break; |
77 |
| - |
78 |
| - default: |
79 |
| - errno = EINVAL; |
80 |
| - return -1; |
81 |
| - } |
82 |
| - |
83 |
| - uint64_t ticks = k_uptime_ticks(); |
84 |
| - uint64_t elapsed_secs = ticks / CONFIG_SYS_CLOCK_TICKS_PER_SEC; |
85 |
| - uint64_t nremainder = ticks - elapsed_secs * CONFIG_SYS_CLOCK_TICKS_PER_SEC; |
86 |
| - |
87 |
| - ts->tv_sec = (time_t) elapsed_secs; |
88 |
| - /* For ns 32 bit conversion can be used since its smaller than 1sec. */ |
89 |
| - ts->tv_nsec = (int32_t) k_ticks_to_ns_floor32(nremainder); |
90 |
| - |
91 |
| - ts->tv_sec += base.tv_sec; |
92 |
| - ts->tv_nsec += base.tv_nsec; |
93 |
| - if (ts->tv_nsec >= NSEC_PER_SEC) { |
94 |
| - ts->tv_sec++; |
95 |
| - ts->tv_nsec -= NSEC_PER_SEC; |
96 |
| - } |
97 |
| - |
98 |
| - return 0; |
99 |
| -} |
100 | 20 | int clock_gettime(clockid_t clock_id, struct timespec *ts)
|
101 | 21 | {
|
102 | 22 | return z_clock_gettime(clock_id, ts);
|
@@ -132,34 +52,6 @@ int clock_getres(clockid_t clock_id, struct timespec *res)
|
132 | 52 | * Note that only the `CLOCK_REALTIME` clock can be set using this
|
133 | 53 | * call.
|
134 | 54 | */
|
135 |
| -int z_clock_settime(clockid_t clock_id, const struct timespec *tp) |
136 |
| -{ |
137 |
| - struct timespec base; |
138 |
| - k_spinlock_key_t key; |
139 |
| - |
140 |
| - if (clock_id != CLOCK_REALTIME) { |
141 |
| - errno = EINVAL; |
142 |
| - return -1; |
143 |
| - } |
144 |
| - |
145 |
| - if (tp->tv_nsec < 0 || tp->tv_nsec >= NSEC_PER_SEC) { |
146 |
| - errno = EINVAL; |
147 |
| - return -1; |
148 |
| - } |
149 |
| - |
150 |
| - uint64_t elapsed_nsecs = k_ticks_to_ns_floor64(k_uptime_ticks()); |
151 |
| - int64_t delta = (int64_t)NSEC_PER_SEC * tp->tv_sec + tp->tv_nsec |
152 |
| - - elapsed_nsecs; |
153 |
| - |
154 |
| - base.tv_sec = delta / NSEC_PER_SEC; |
155 |
| - base.tv_nsec = delta % NSEC_PER_SEC; |
156 |
| - |
157 |
| - key = k_spin_lock(&rt_clock_base_lock); |
158 |
| - rt_clock_base = base; |
159 |
| - k_spin_unlock(&rt_clock_base_lock, key); |
160 |
| - |
161 |
| - return 0; |
162 |
| -} |
163 | 55 | int clock_settime(clockid_t clock_id, const struct timespec *tp)
|
164 | 56 | {
|
165 | 57 | return z_clock_settime(clock_id, tp);
|
@@ -194,84 +86,11 @@ int usleep(useconds_t useconds)
|
194 | 86 | return 0;
|
195 | 87 | }
|
196 | 88 |
|
197 |
| -/** |
198 |
| - * @brief Suspend execution for a nanosecond interval, or |
199 |
| - * until some absolute time relative to the specified clock. |
200 |
| - * |
201 |
| - * See IEEE 1003.1 |
202 |
| - */ |
203 |
| -int z_clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, |
204 |
| - struct timespec *rmtp) |
205 |
| -{ |
206 |
| - uint64_t ns; |
207 |
| - uint64_t us; |
208 |
| - uint64_t uptime_ns; |
209 |
| - k_spinlock_key_t key; |
210 |
| - const bool update_rmtp = rmtp != NULL; |
211 |
| - |
212 |
| - if (!((clock_id == CLOCK_REALTIME) || (clock_id == CLOCK_MONOTONIC))) { |
213 |
| - errno = EINVAL; |
214 |
| - return -1; |
215 |
| - } |
216 |
| - |
217 |
| - if (rqtp == NULL) { |
218 |
| - errno = EFAULT; |
219 |
| - return -1; |
220 |
| - } |
221 |
| - |
222 |
| - if ((rqtp->tv_sec < 0) || (rqtp->tv_nsec < 0) || (rqtp->tv_nsec >= NSEC_PER_SEC)) { |
223 |
| - errno = EINVAL; |
224 |
| - return -1; |
225 |
| - } |
226 |
| - |
227 |
| - if ((flags & TIMER_ABSTIME) == 0 && unlikely(rqtp->tv_sec >= ULLONG_MAX / NSEC_PER_SEC)) { |
228 |
| - ns = rqtp->tv_nsec + NSEC_PER_SEC + |
229 |
| - (uint64_t)k_sleep(K_SECONDS(rqtp->tv_sec - 1)) * NSEC_PER_MSEC; |
230 |
| - } else { |
231 |
| - ns = (uint64_t)rqtp->tv_sec * NSEC_PER_SEC + rqtp->tv_nsec; |
232 |
| - } |
233 |
| - |
234 |
| - uptime_ns = k_ticks_to_ns_ceil64(sys_clock_tick_get()); |
235 |
| - |
236 |
| - if (flags & TIMER_ABSTIME && clock_id == CLOCK_REALTIME) { |
237 |
| - key = k_spin_lock(&rt_clock_base_lock); |
238 |
| - ns -= rt_clock_base.tv_sec * NSEC_PER_SEC + rt_clock_base.tv_nsec; |
239 |
| - k_spin_unlock(&rt_clock_base_lock, key); |
240 |
| - } |
241 |
| - |
242 |
| - if ((flags & TIMER_ABSTIME) == 0) { |
243 |
| - ns += uptime_ns; |
244 |
| - } |
245 |
| - |
246 |
| - if (ns <= uptime_ns) { |
247 |
| - goto do_rmtp_update; |
248 |
| - } |
249 |
| - |
250 |
| - us = DIV_ROUND_UP(ns, NSEC_PER_USEC); |
251 |
| - do { |
252 |
| - us = k_sleep(K_TIMEOUT_ABS_US(us)) * 1000; |
253 |
| - } while (us != 0); |
254 |
| - |
255 |
| -do_rmtp_update: |
256 |
| - if (update_rmtp) { |
257 |
| - rmtp->tv_sec = 0; |
258 |
| - rmtp->tv_nsec = 0; |
259 |
| - } |
260 |
| - |
261 |
| - return 0; |
262 |
| -} |
263 |
| - |
264 | 89 | int nanosleep(const struct timespec *rqtp, struct timespec *rmtp)
|
265 | 90 | {
|
266 | 91 | return z_clock_nanosleep(CLOCK_MONOTONIC, 0, rqtp, rmtp);
|
267 | 92 | }
|
268 | 93 |
|
269 |
| -int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, |
270 |
| - struct timespec *rmtp) |
271 |
| -{ |
272 |
| - return z_clock_nanosleep(clock_id, flags, rqtp, rmtp); |
273 |
| -} |
274 |
| - |
275 | 94 | /**
|
276 | 95 | * @brief Get current real time.
|
277 | 96 | *
|
@@ -304,23 +123,3 @@ int clock_getcpuclockid(pid_t pid, clockid_t *clock_id)
|
304 | 123 |
|
305 | 124 | return 0;
|
306 | 125 | }
|
307 |
| - |
308 |
| -#ifdef CONFIG_ZTEST |
309 |
| -#include <zephyr/ztest.h> |
310 |
| -static void reset_clock_base(void) |
311 |
| -{ |
312 |
| - K_SPINLOCK(&rt_clock_base_lock) { |
313 |
| - rt_clock_base = (struct timespec){0}; |
314 |
| - } |
315 |
| -} |
316 |
| - |
317 |
| -static void clock_base_reset_rule_after(const struct ztest_unit_test *test, void *data) |
318 |
| -{ |
319 |
| - ARG_UNUSED(test); |
320 |
| - ARG_UNUSED(data); |
321 |
| - |
322 |
| - reset_clock_base(); |
323 |
| -} |
324 |
| - |
325 |
| -ZTEST_RULE(clock_base_reset_rule, NULL, clock_base_reset_rule_after); |
326 |
| -#endif /* CONFIG_ZTEST */ |
0 commit comments