#ifdef CONFIG_TIME_NS
-static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct
vdso_data *vd)
+static __always_inline const struct vdso_time_data
*__ppc_get_vdso_u_timens_data(void)
{
- return (void *)vd + (1U << CONFIG_PAGE_SHIFT);
+ struct vdso_time_data *time_data;
+
+ asm(
+ " bcl 20, 31, .+4\n"
+ "0: mflr %0\n"
+ " addis %0, %0, (vdso_u_timens_data - 0b)@ha\n"
+ " addi %0, %0, (vdso_u_timens_data - 0b)@l\n"
+ : "=r" (time_data) :: "lr");
+
+ return time_data;
Please don't do that, it kills optimisation efforts done when
implementing VDSO time. Commit ce7d8056e38b ("powerpc/vdso: Prepare for
switching VDSO to generic C implementation.") explains why.
For time data, the bcl/mflr dance is done by get_datapage macro called
by cvdso_call macro in gettimeofday.S, and given to
__cvdso_clock_gettime_data() by __c_kernel_clock_gettime() in
vgettimeofday.c . Use that information and don't redo the bcl/mflr
sequence.
See for instance function __c_kernel_clock_getres():
Before your series it is 30 instructions.
After your series it is 59 instructions.
It is even more obvious with __c_kernel_time()
Before your series it has 12 instructions,
After your series it has 26 instructions.
Before
00001408 <__c_kernel_time>:
1408: 81 44 00 04 lwz r10,4(r4)
140c: 6d 49 80 00 xoris r9,r10,32768
1410: 2c 09 ff ff cmpwi r9,-1
1414: 40 82 00 08 bne 141c <__c_kernel_time+0x14>
1418: 38 84 40 00 addi r4,r4,16384
141c: 2c 03 00 00 cmpwi r3,0
1420: 81 44 00 20 lwz r10,32(r4)
1424: 81 64 00 24 lwz r11,36(r4)
1428: 41 82 00 08 beq 1430 <__c_kernel_time+0x28>
142c: 91 63 00 00 stw r11,0(r3)
1430: 7d 63 5b 78 mr r3,r11
1434: 4e 80 00 20 blr
Versus after
00001534 <__c_kernel_time>:
1534: 81 44 00 04 lwz r10,4(r4)
1538: 6d 49 80 00 xoris r9,r10,32768
153c: 2c 09 ff ff cmpwi r9,-1
1540: 41 82 00 20 beq 1560 <__c_kernel_time+0x2c>
1544: 2c 03 00 00 cmpwi r3,0
1548: 81 44 00 20 lwz r10,32(r4)
154c: 81 64 00 24 lwz r11,36(r4)
1550: 41 82 00 08 beq 1558 <__c_kernel_time+0x24>
1554: 91 63 00 00 stw r11,0(r3)
1558: 7d 63 5b 78 mr r3,r11
155c: 4e 80 00 20 blr
1560: 7c 08 02 a6 mflr r0
1564: 2c 03 00 00 cmpwi r3,0
1568: 90 01 00 04 stw r0,4(r1)
156c: 42 9f 00 05 bcl 20,4*cr7+so,1570 <__c_kernel_time+0x3c>
1570: 7c 88 02 a6 mflr r4
1574: 3c 84 ff ff addis r4,r4,-1
1578: 38 84 2a 90 addi r4,r4,10896
157c: 81 44 00 20 lwz r10,32(r4)
1580: 81 64 00 24 lwz r11,36(r4)
1584: 41 82 00 08 beq 158c <__c_kernel_time+0x58>
1588: 91 63 00 00 stw r11,0(r3)
158c: 80 01 00 04 lwz r0,4(r1)
1590: 7d 63 5b 78 mr r3,r11
1594: 7c 08 03 a6 mtlr r0
1598: 4e 80 00 20 blr
Christophe