Newer
Older
#ifndef ARCH_X86_PROCESSOR_H
#define ARCH_X86_PROCESSOR_H
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
namespace processor {
inline ulong read_cr0() {
ulong r;
asm volatile ("mov %%cr0, %0" : "=r"(r));
return r;
}
inline void write_cr0(ulong r) {
asm volatile ("mov %0, %%cr0" : : "r"(r));
}
inline ulong read_cr2() {
ulong r;
asm volatile ("mov %%cr2, %0" : "=r"(r));
return r;
}
inline void write_cr2(ulong r) {
asm volatile ("mov %0, %%cr2" : : "r"(r));
}
inline ulong read_cr3() {
ulong r;
asm volatile ("mov %%cr3, %0" : "=r"(r));
return r;
}
inline void write_cr3(ulong r) {
asm volatile ("mov %0, %%cr3" : : "r"(r));
}
inline ulong read_cr4() {
ulong r;
asm volatile ("mov %%cr4, %0" : "=r"(r));
return r;
}
inline void write_cr4(ulong r) {
asm volatile ("mov %0, %%cr4" : : "r"(r));
}
inline ulong read_cr8() {
ulong r;
asm volatile ("mov %%cr8, %0" : "=r"(r));
return r;
}
inline void write_cr8(ulong r) {
asm volatile ("mov %0, %%cr8" : : "r"(r));
}
desc_ptr(u16 limit, ulong addr) : limit(limit), addr(addr) {}
u16 limit;
ulong addr;
inline void lgdt(const desc_ptr& ptr) {
asm volatile ("lgdt %0" : : "m"(ptr));
}
inline void sgdt(desc_ptr& ptr) {
asm volatile ("sgdt %0" : "=m"(ptr));
}
inline void lidt(const desc_ptr& ptr) {
asm volatile ("lidt %0" : : "m"(ptr));
}
inline void sidt(desc_ptr& ptr) {
asm volatile ("sidt %0" : "=m"(ptr));
}
inline void ltr(u16 tr) {
asm volatile("ltr %0" : : "rm"(tr));
}
inline u16 str() {
u16 tr;
asm volatile("str %0" : "=rm"(tr));
return tr;
}
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
inline u16 read_cs() {
u16 r;
asm volatile ("mov %%cs, %0" : "=rm"(r));
return r;
}
inline u16 read_ds() {
u16 r;
asm volatile ("mov %%ds, %0" : "=rm"(r));
return r;
}
inline void write_ds(u16 r) {
asm volatile ("mov %0, %%ds" : : "rm"(r));
}
inline u16 read_es() {
u16 r;
asm volatile ("mov %%es, %0" : "=rm"(r));
return r;
}
inline void write_es(u16 r) {
asm volatile ("mov %0, %%es" : : "rm"(r));
}
inline u16 read_fs() {
u16 r;
asm volatile ("mov %%fs, %0" : "=rm"(r));
return r;
}
inline void write_fs(u16 r) {
asm volatile ("mov %0, %%fs" : : "rm"(r));
}
inline u16 read_gs() {
u16 r;
asm volatile ("mov %%gs, %0" : "=rm"(r));
return r;
}
inline void write_gs(u16 r) {
asm volatile ("mov %0, %%gs" : : "rm"(r));
}
inline u16 read_ss() {
u16 r;
asm volatile ("mov %%ss, %0" : "=rm"(r));
return r;
}
inline void write_ss(u16 r) {
asm volatile ("mov %0, %%ss" : : "rm"(r));
}
inline u64 rdmsr(u32 index) {
u32 lo, hi;
asm volatile ("rdmsr" : "=a"(lo), "=d"(hi) : "c"(index));
return lo | ((u64)hi << 32);
}
inline void wrmsr(u32 index, u64 data) {
u32 lo = data, hi = data >> 32;
asm volatile ("wrmsr" : : "c"(index), "a"(lo), "d"(hi));
}
inline void halt_no_interrupts() {
asm volatile ("cli; hlt");
}
inline void sti_hlt() {
asm volatile ("sti; hlt");
}
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
inline u8 inb (u16 port)
{
u8 r;
asm volatile ("inb %1, %0":"=a" (r):"dN" (port));
return r;
}
inline u16 inw (u16 port)
{
u16 r;
asm volatile ("inw %1, %0":"=a" (r):"dN" (port));
return r;
}
inline u32 inl (u16 port)
{
u32 r;
asm volatile ("inl %1, %0":"=a" (r):"dN" (port));
return r;
}
inline void outb (u8 val, u16 port)
{
asm volatile ("outb %0, %1"::"a" (val), "dN" (port));
}
inline void outw (u16 val, u16 port)
{
asm volatile ("outw %0, %1"::"a" (val), "dN" (port));
}
inline void outl (u32 val, u16 port)
{
asm volatile ("outl %0, %1"::"a" (val), "dN" (port));
}
inline void sti()
{
asm volatile ("sti");
}
inline void cli()
{
asm volatile ("cli");
}
inline u64 rdtsc()
{
u32 lo, hi;
asm("rdtsc" : "=a"(lo), "=d"(hi));
return lo | (u64(hi) << 32);
}
struct task_state_segment {
u32 reserved0;
u64 rsp[3];
u64 ist[8]; // ist[0] is reserved
u32 reserved1;
u32 reserved2;
u16 reserved3;
u16 io_bitmap_base;
} __attribute__((packed));
struct aligned_task_state_segment {
u32 pad; // force 64-bit structures to be aligned
task_state_segment tss;
} __attribute__((packed, aligned(8)));