@@ -13,7 +13,7 @@ macro_rules! exclude_operand_sizes {
13
13
}
14
14
}
15
15
16
- #[ allow( clippy:: upper_case_acronyms) ]
16
+ #[ allow( dead_code , clippy:: upper_case_acronyms) ]
17
17
#[ derive( Copy , Clone , PartialEq , Eq ) ]
18
18
#[ repr( u8 ) ]
19
19
pub enum X86Register {
@@ -33,6 +33,14 @@ pub enum X86Register {
33
33
R13 = 13 ,
34
34
R14 = 14 ,
35
35
R15 = 15 ,
36
+ MM0 = 16 ,
37
+ MM1 = 17 ,
38
+ MM2 = 18 ,
39
+ MM3 = 19 ,
40
+ MM4 = 20 ,
41
+ MM5 = 21 ,
42
+ MM6 = 22 ,
43
+ MM7 = 23 ,
36
44
}
37
45
use X86Register :: * ;
38
46
@@ -266,6 +274,35 @@ impl X86Instruction {
266
274
}
267
275
}
268
276
277
+ /// Move to / from / between MMX (float mantissa)
278
+ #[ allow( dead_code) ]
279
+ #[ inline]
280
+ pub const fn mov_mmx ( size : OperandSize , source : X86Register , destination : X86Register ) -> Self {
281
+ exclude_operand_sizes ! (
282
+ size,
283
+ OperandSize :: S0 | OperandSize :: S8 | OperandSize :: S16 | OperandSize :: S32
284
+ ) ;
285
+ if ( destination as u8 ) & 16 != 0 {
286
+ Self {
287
+ size,
288
+ opcode_escape_sequence : 1 ,
289
+ opcode : if ( source as u8 ) & 16 != 0 { 0x6F } else { 0x6E } ,
290
+ first_operand : ( destination as u8 ) & 0xF ,
291
+ second_operand : ( source as u8 ) & 0xF ,
292
+ ..Self :: DEFAULT
293
+ }
294
+ } else {
295
+ Self {
296
+ size,
297
+ opcode_escape_sequence : 1 ,
298
+ opcode : 0x7E ,
299
+ first_operand : ( source as u8 ) & 0xF ,
300
+ second_operand : ( destination as u8 ) & 0xF ,
301
+ ..Self :: DEFAULT
302
+ }
303
+ }
304
+ }
305
+
269
306
/// Conditionally move source to destination
270
307
#[ inline]
271
308
pub const fn cmov (
0 commit comments