1 /** 2 * D header file for perf_event_open system call. 3 * 4 * Converted from linux userspace header, comments included. 5 * 6 * Authors: Max Haughton 7 */ 8 module core.sys.linux.perf_event; 9 version (linux) : extern (C): 10 @nogc: 11 nothrow: 12 13 import core.sys.posix.sys.ioctl; 14 import core.sys.posix.unistd; 15 16 version (HPPA) version = HPPA_Any; 17 version (HPPA64) version = HPPA_Any; 18 version (PPC) version = PPC_Any; 19 version (PPC64) version = PPC_Any; 20 version (RISCV32) version = RISCV_Any; 21 version (RISCV64) version = RISCV_Any; 22 version (S390) version = IBMZ_Any; 23 version (SPARC) version = SPARC_Any; 24 version (SPARC64) version = SPARC_Any; 25 version (SystemZ) version = IBMZ_Any; 26 27 version (X86_64) 28 { 29 version (D_X32) 30 enum __NR_perf_event_open = 0x40000000 + 298; 31 else 32 enum __NR_perf_event_open = 298; 33 } 34 else version (X86) 35 { 36 enum __NR_perf_event_open = 336; 37 } 38 else version (ARM) 39 { 40 enum __NR_perf_event_open = 364; 41 } 42 else version (AArch64) 43 { 44 enum __NR_perf_event_open = 241; 45 } 46 else version (HPPA_Any) 47 { 48 enum __NR_perf_event_open = 318; 49 } 50 else version (IBMZ_Any) 51 { 52 enum __NR_perf_event_open = 331; 53 } 54 else version (MIPS32) 55 { 56 enum __NR_perf_event_open = 4333; 57 } 58 else version (MIPS64) 59 { 60 version (MIPS_N32) 61 enum __NR_perf_event_open = 6296; 62 else version (MIPS_N64) 63 enum __NR_perf_event_open = 5292; 64 else 65 static assert(0, "Architecture not supported"); 66 } 67 else version (PPC_Any) 68 { 69 enum __NR_perf_event_open = 319; 70 } 71 else version (RISCV_Any) 72 { 73 enum __NR_perf_event_open = 241; 74 } 75 else version (SPARC_Any) 76 { 77 enum __NR_perf_event_open = 327; 78 } 79 else version (LoongArch64) 80 { 81 enum __NR_perf_event_open = 241; 82 } 83 else 84 { 85 static assert(0, "Architecture not supported"); 86 } 87 extern (C) extern long syscall(long __sysno, ...); 88 static long perf_event_open(perf_event_attr* hw_event, pid_t pid, int cpu, int group_fd, ulong flags) 89 { 90 return syscall(__NR_perf_event_open, hw_event, pid, cpu, group_fd, flags); 91 } 92 /* 93 * User-space ABI bits: 94 */ 95 96 /** 97 * attr.type 98 */ 99 enum perf_type_id 100 { 101 PERF_TYPE_HARDWARE = 0, 102 PERF_TYPE_SOFTWARE = 1, 103 PERF_TYPE_TRACEPOINT = 2, 104 PERF_TYPE_HW_CACHE = 3, 105 PERF_TYPE_RAW = 4, 106 PERF_TYPE_BREAKPOINT = 5, 107 108 PERF_TYPE_MAX = 6 /* non-ABI */ 109 } 110 /** 111 * Generalized performance event event_id types, used by the 112 * attr.event_id parameter of the sys_perf_event_open() 113 * syscall: 114 */ 115 enum perf_hw_id 116 { 117 /// 118 PERF_COUNT_HW_CPU_CYCLES = 0, 119 /// 120 PERF_COUNT_HW_INSTRUCTIONS = 1, 121 /// 122 PERF_COUNT_HW_CACHE_REFERENCES = 2, 123 /// 124 PERF_COUNT_HW_CACHE_MISSES = 3, 125 /// 126 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, 127 /// 128 PERF_COUNT_HW_BRANCH_MISSES = 5, 129 /// 130 PERF_COUNT_HW_BUS_CYCLES = 6, 131 /// 132 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, 133 /// 134 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, 135 /// 136 PERF_COUNT_HW_REF_CPU_CYCLES = 9, 137 /// 138 PERF_COUNT_HW_MAX = 10 /* non-ABI */ 139 } 140 141 /** 142 * Generalized hardware cache events: 143 * 144 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x 145 * { read, write, prefetch } x 146 * { accesses, misses } 147 */ 148 enum perf_hw_cache_id 149 { 150 /// 151 PERF_COUNT_HW_CACHE_L1D = 0, 152 /// 153 PERF_COUNT_HW_CACHE_L1I = 1, 154 /// 155 PERF_COUNT_HW_CACHE_LL = 2, 156 /// 157 PERF_COUNT_HW_CACHE_DTLB = 3, 158 /// 159 PERF_COUNT_HW_CACHE_ITLB = 4, 160 /// 161 PERF_COUNT_HW_CACHE_BPU = 5, 162 /// 163 PERF_COUNT_HW_CACHE_NODE = 6, 164 /// 165 PERF_COUNT_HW_CACHE_MAX = 7 /* non-ABI */ 166 } 167 /// 168 enum perf_hw_cache_op_id 169 { 170 /// 171 PERF_COUNT_HW_CACHE_OP_READ = 0, 172 /// 173 PERF_COUNT_HW_CACHE_OP_WRITE = 1, 174 /// 175 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, 176 /// 177 PERF_COUNT_HW_CACHE_OP_MAX = 3 /* non-ABI */ 178 } 179 /// 180 enum perf_hw_cache_op_result_id 181 { 182 /// 183 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, 184 /// 185 PERF_COUNT_HW_CACHE_RESULT_MISS = 1, 186 /// 187 PERF_COUNT_HW_CACHE_RESULT_MAX = 2 /* non-ABI */ 188 } 189 190 /** 191 * Special "software" events provided by the kernel, even if the hardware 192 * does not support performance events. These events measure various 193 * physical and sw events of the kernel (and allow the profiling of them as 194 * well): 195 */ 196 enum perf_sw_ids 197 { 198 /// 199 PERF_COUNT_SW_CPU_CLOCK = 0, 200 /// 201 PERF_COUNT_SW_TASK_CLOCK = 1, 202 /// 203 PERF_COUNT_SW_PAGE_FAULTS = 2, 204 /// 205 PERF_COUNT_SW_CONTEXT_SWITCHES = 3, 206 /// 207 PERF_COUNT_SW_CPU_MIGRATIONS = 4, 208 /// 209 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 210 /// 211 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 212 /// 213 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, 214 /// 215 PERF_COUNT_SW_EMULATION_FAULTS = 8, 216 /// 217 PERF_COUNT_SW_DUMMY = 9, 218 /// 219 PERF_COUNT_SW_BPF_OUTPUT = 10, 220 /// 221 PERF_COUNT_SW_MAX = 11 /* non-ABI */ 222 } 223 224 /** 225 * Bits that can be set in attr.sample_type to request information 226 * in the overflow packets. 227 */ 228 enum perf_event_sample_format 229 { 230 /// 231 PERF_SAMPLE_IP = 1U << 0, 232 /// 233 PERF_SAMPLE_TID = 1U << 1, 234 /// 235 PERF_SAMPLE_TIME = 1U << 2, 236 /// 237 PERF_SAMPLE_ADDR = 1U << 3, 238 /// 239 PERF_SAMPLE_READ = 1U << 4, 240 /// 241 PERF_SAMPLE_CALLCHAIN = 1U << 5, 242 /// 243 PERF_SAMPLE_ID = 1U << 6, 244 /// 245 PERF_SAMPLE_CPU = 1U << 7, 246 /// 247 PERF_SAMPLE_PERIOD = 1U << 8, 248 /// 249 PERF_SAMPLE_STREAM_ID = 1U << 9, 250 /// 251 PERF_SAMPLE_RAW = 1U << 10, 252 /// 253 PERF_SAMPLE_BRANCH_STACK = 1U << 11, 254 /// 255 PERF_SAMPLE_REGS_USER = 1U << 12, 256 /// 257 PERF_SAMPLE_STACK_USER = 1U << 13, 258 /// 259 PERF_SAMPLE_WEIGHT = 1U << 14, 260 /// 261 PERF_SAMPLE_DATA_SRC = 1U << 15, 262 /// 263 PERF_SAMPLE_IDENTIFIER = 1U << 16, 264 /// 265 PERF_SAMPLE_TRANSACTION = 1U << 17, 266 /// 267 PERF_SAMPLE_REGS_INTR = 1U << 18, 268 /// 269 PERF_SAMPLE_PHYS_ADDR = 1U << 19, 270 /// 271 PERF_SAMPLE_MAX = 1U << 20 /* non-ABI */ 272 } 273 274 /** 275 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set 276 * 277 * If the user does not pass priv level information via branch_sample_type, 278 * the kernel uses the event's priv level. Branch and event priv levels do 279 * not have to match. Branch priv level is checked for permissions. 280 * 281 * The branch types can be combined, however BRANCH_ANY covers all types 282 * of branches and therefore it supersedes all the other types. 283 */ 284 enum perf_branch_sample_type_shift 285 { 286 PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /** user branches */ 287 PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /** kernel branches */ 288 PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /** hypervisor branches */ 289 290 PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /** any branch types */ 291 PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /** any call branch */ 292 PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /** any return branch */ 293 PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /** indirect calls */ 294 PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /** transaction aborts */ 295 PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /** in transaction */ 296 PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /** not in transaction */ 297 PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /** conditional branches */ 298 299 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /** call/ret stack */ 300 PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /** indirect jumps */ 301 PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /** direct call */ 302 303 PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /** no flags */ 304 PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /** no cycles */ 305 306 PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /** save branch type */ 307 308 PERF_SAMPLE_BRANCH_MAX_SHIFT = 17 /** non-ABI */ 309 } 310 /// 311 enum perf_branch_sample_type 312 { 313 PERF_SAMPLE_BRANCH_USER = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_USER_SHIFT, 314 PERF_SAMPLE_BRANCH_KERNEL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_KERNEL_SHIFT, 315 PERF_SAMPLE_BRANCH_HV = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_HV_SHIFT, 316 PERF_SAMPLE_BRANCH_ANY = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ANY_SHIFT, 317 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, 318 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, 319 PERF_SAMPLE_BRANCH_IND_CALL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, 320 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, 321 PERF_SAMPLE_BRANCH_IN_TX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_IN_TX_SHIFT, 322 PERF_SAMPLE_BRANCH_NO_TX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_NO_TX_SHIFT, 323 PERF_SAMPLE_BRANCH_COND = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_COND_SHIFT, 324 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, 325 PERF_SAMPLE_BRANCH_IND_JUMP = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, 326 PERF_SAMPLE_BRANCH_CALL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_CALL_SHIFT, 327 PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, 328 PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, 329 PERF_SAMPLE_BRANCH_TYPE_SAVE = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, 330 PERF_SAMPLE_BRANCH_MAX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_MAX_SHIFT 331 } 332 333 /** 334 * Common flow change classification 335 */ 336 enum 337 { 338 PERF_BR_UNKNOWN = 0, /** unknown */ 339 PERF_BR_COND = 1, /** conditional */ 340 PERF_BR_UNCOND = 2, /** unconditional */ 341 PERF_BR_IND = 3, /** indirect */ 342 PERF_BR_CALL = 4, /** function call */ 343 PERF_BR_IND_CALL = 5, /** indirect function call */ 344 PERF_BR_RET = 6, /** function return */ 345 PERF_BR_SYSCALL = 7, /** syscall */ 346 PERF_BR_SYSRET = 8, /** syscall return */ 347 PERF_BR_COND_CALL = 9, /** conditional function call */ 348 PERF_BR_COND_RET = 10, /** conditional function return */ 349 PERF_BR_MAX = 11 350 } 351 352 /// 353 enum PERF_SAMPLE_BRANCH_PLM_ALL = perf_branch_sample_type.PERF_SAMPLE_BRANCH_USER 354 | perf_branch_sample_type.PERF_SAMPLE_BRANCH_KERNEL 355 | perf_branch_sample_type.PERF_SAMPLE_BRANCH_HV; 356 357 /** 358 * Values to determine ABI of the registers dump. 359 */ 360 enum perf_sample_regs_abi 361 { 362 /// 363 PERF_SAMPLE_REGS_ABI_NONE = 0, 364 /// 365 PERF_SAMPLE_REGS_ABI_32 = 1, 366 /// 367 PERF_SAMPLE_REGS_ABI_64 = 2 368 } 369 370 /** 371 * Values for the memory transaction event qualifier, mostly for 372 * abort events. Multiple bits can be set. 373 */ 374 enum 375 { 376 PERF_TXN_ELISION = 1 << 0, /** From elision */ 377 PERF_TXN_TRANSACTION = 1 << 1, /** From transaction */ 378 PERF_TXN_SYNC = 1 << 2, /** Instruction is related */ 379 PERF_TXN_ASYNC = 1 << 3, /** Instruction not related */ 380 PERF_TXN_RETRY = 1 << 4, /** Retry possible */ 381 PERF_TXN_CONFLICT = 1 << 5, /** Conflict abort */ 382 PERF_TXN_CAPACITY_WRITE = 1 << 6, /** Capacity write abort */ 383 PERF_TXN_CAPACITY_READ = 1 << 7, /** Capacity read abort */ 384 385 PERF_TXN_MAX = 1 << 8, /** non-ABI */ 386 387 /** bits 32..63 are reserved for the abort code */ 388 389 ///PERF_TXN_ABORT_MASK = 0xffffffff << 32, 390 PERF_TXN_ABORT_SHIFT = 32 391 } 392 393 /** 394 * The format of the data returned by read() on a perf event fd, 395 * as specified by attr.read_format: 396 * --- 397 * struct read_format { 398 * { u64 value; 399 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 400 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 401 * { u64 id; } && PERF_FORMAT_ID 402 * } && !PERF_FORMAT_GROUP 403 * 404 * { u64 nr; 405 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 406 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 407 * { u64 value; 408 * { u64 id; } && PERF_FORMAT_ID 409 * } cntr[nr]; 410 * } && PERF_FORMAT_GROUP 411 * }; 412 * --- 413 */ 414 enum perf_event_read_format 415 { 416 /// 417 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 418 /// 419 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 420 /// 421 PERF_FORMAT_ID = 1U << 2, 422 /// 423 PERF_FORMAT_GROUP = 1U << 3, 424 PERF_FORMAT_MAX = 1U << 4 /** non-ABI */ 425 } 426 427 enum PERF_ATTR_SIZE_VER0 = 64; /** sizeof first published struct */ 428 enum PERF_ATTR_SIZE_VER1 = 72; /** add: config2 */ 429 enum PERF_ATTR_SIZE_VER2 = 80; /** add: branch_sample_type */ 430 enum PERF_ATTR_SIZE_VER3 = 96; /** add: sample_regs_user */ 431 /* add: sample_stack_user */ 432 enum PERF_ATTR_SIZE_VER4 = 104; /** add: sample_regs_intr */ 433 enum PERF_ATTR_SIZE_VER5 = 112; /** add: aux_watermark */ 434 435 /** 436 * Hardware event_id to monitor via a performance monitoring event: 437 * 438 * @sample_max_stack: Max number of frame pointers in a callchain, 439 * should be < /proc/sys/kernel/perf_event_max_stack 440 */ 441 struct perf_event_attr 442 { 443 /** 444 *Major type: hardware/software/tracepoint/etc. 445 */ 446 uint type; 447 448 /** 449 * Size of the attr structure, for fwd/bwd compat. 450 */ 451 uint size; 452 453 /** 454 * Type specific configuration information. 455 */ 456 ulong config; 457 /// 458 union 459 { 460 /// 461 ulong sample_period; 462 /// 463 ulong sample_freq; 464 } 465 /// 466 ulong sample_type; 467 /// 468 ulong read_format; 469 470 // mixin(bitfields!( 471 // ulong, "disabled", 1, 472 // ulong, "inherit", 1, 473 // ulong, "pinned", 1, 474 // ulong, "exclusive", 1, 475 // ulong, "exclude_user", 1, 476 // ulong, "exclude_kernel", 1, 477 // ulong, "exclude_hv", 1, 478 // ulong, "exclude_idle", 1, 479 // ulong, "mmap", 1, 480 // ulong, "comm", 1, 481 // ulong, "freq", 1, 482 // ulong, "inherit_stat", 1, 483 // ulong, "enable_on_exec", 1, 484 // ulong, "task", 1, 485 // ulong, "watermark", 1, 486 // ulong, "precise_ip", 2, 487 // ulong, "mmap_data", 1, 488 // ulong, "sample_id_all", 1, 489 // ulong, "exclude_host", 1, 490 // ulong, "exclude_guest", 1, 491 // ulong, "exclude_callchain_kernel", 1, 492 // ulong, "exclude_callchain_user", 1, 493 // ulong, "mmap2", 1, 494 // ulong, "comm_exec", 1, 495 // ulong, "use_clockid", 1, 496 // ulong, "context_switch", 1, 497 // ulong, "write_backward", 1, 498 // ulong, "namespaces", 1, 499 // ulong, "__reserved_1", 35)); 500 private ulong perf_event_attr_bitmanip; 501 /// 502 @property ulong disabled() @safe pure nothrow @nogc const 503 { 504 auto result = (perf_event_attr_bitmanip & 1U) >> 0U; 505 return cast(ulong) result; 506 } 507 /// 508 @property void disabled(ulong v) @safe pure nothrow @nogc 509 { 510 assert(v >= disabled_min, 511 "Value is smaller than the minimum value of bitfield 'disabled'"); 512 assert(v <= disabled_max, 513 "Value is greater than the maximum value of bitfield 'disabled'"); 514 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 515 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 1U)) | ( 516 (cast(typeof(perf_event_attr_bitmanip)) v << 0U) & 1U)); 517 } 518 519 enum ulong disabled_min = cast(ulong) 0U; 520 enum ulong disabled_max = cast(ulong) 1U; 521 /// 522 @property ulong inherit() @safe pure nothrow @nogc const 523 { 524 auto result = (perf_event_attr_bitmanip & 2U) >> 1U; 525 return cast(ulong) result; 526 } 527 /// 528 @property void inherit(ulong v) @safe pure nothrow @nogc 529 { 530 assert(v >= inherit_min, 531 "Value is smaller than the minimum value of bitfield 'inherit'"); 532 assert(v <= inherit_max, 533 "Value is greater than the maximum value of bitfield 'inherit'"); 534 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 535 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 2U)) | ( 536 (cast(typeof(perf_event_attr_bitmanip)) v << 1U) & 2U)); 537 } 538 539 enum ulong inherit_min = cast(ulong) 0U; 540 enum ulong inherit_max = cast(ulong) 1U; 541 /// 542 @property ulong pinned() @safe pure nothrow @nogc const 543 { 544 auto result = (perf_event_attr_bitmanip & 4U) >> 2U; 545 return cast(ulong) result; 546 } 547 /// 548 @property void pinned(ulong v) @safe pure nothrow @nogc 549 { 550 assert(v >= pinned_min, 551 "Value is smaller than the minimum value of bitfield 'pinned'"); 552 assert(v <= pinned_max, 553 "Value is greater than the maximum value of bitfield 'pinned'"); 554 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 555 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 4U)) | ( 556 (cast(typeof(perf_event_attr_bitmanip)) v << 2U) & 4U)); 557 } 558 559 enum ulong pinned_min = cast(ulong) 0U; 560 enum ulong pinned_max = cast(ulong) 1U; 561 /// 562 @property ulong exclusive() @safe pure nothrow @nogc const 563 { 564 auto result = (perf_event_attr_bitmanip & 8U) >> 3U; 565 return cast(ulong) result; 566 } 567 /// 568 @property void exclusive(ulong v) @safe pure nothrow @nogc 569 { 570 assert(v >= exclusive_min, 571 "Value is smaller than the minimum value of bitfield 'exclusive'"); 572 assert(v <= exclusive_max, 573 "Value is greater than the maximum value of bitfield 'exclusive'"); 574 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 575 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 8U)) | ( 576 (cast(typeof(perf_event_attr_bitmanip)) v << 3U) & 8U)); 577 } 578 579 enum ulong exclusive_min = cast(ulong) 0U; 580 enum ulong exclusive_max = cast(ulong) 1U; 581 /// 582 @property ulong exclude_user() @safe pure nothrow @nogc const 583 { 584 auto result = (perf_event_attr_bitmanip & 16U) >> 4U; 585 return cast(ulong) result; 586 } 587 /// 588 @property void exclude_user(ulong v) @safe pure nothrow @nogc 589 { 590 assert(v >= exclude_user_min, 591 "Value is smaller than the minimum value of bitfield 'exclude_user'"); 592 assert(v <= exclude_user_max, 593 "Value is greater than the maximum value of bitfield 'exclude_user'"); 594 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 595 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 16U)) | ( 596 (cast(typeof(perf_event_attr_bitmanip)) v << 4U) & 16U)); 597 } 598 599 enum ulong exclude_user_min = cast(ulong) 0U; 600 enum ulong exclude_user_max = cast(ulong) 1U; 601 /// 602 @property ulong exclude_kernel() @safe pure nothrow @nogc const 603 { 604 auto result = (perf_event_attr_bitmanip & 32U) >> 5U; 605 return cast(ulong) result; 606 } 607 /// 608 @property void exclude_kernel(ulong v) @safe pure nothrow @nogc 609 { 610 assert(v >= exclude_kernel_min, 611 "Value is smaller than the minimum value of bitfield 'exclude_kernel'"); 612 assert(v <= exclude_kernel_max, 613 "Value is greater than the maximum value of bitfield 'exclude_kernel'"); 614 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 615 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 32U)) | ( 616 (cast(typeof(perf_event_attr_bitmanip)) v << 5U) & 32U)); 617 } 618 619 enum ulong exclude_kernel_min = cast(ulong) 0U; 620 enum ulong exclude_kernel_max = cast(ulong) 1U; 621 /// 622 @property ulong exclude_hv() @safe pure nothrow @nogc const 623 { 624 auto result = (perf_event_attr_bitmanip & 64U) >> 6U; 625 return cast(ulong) result; 626 } 627 /// 628 @property void exclude_hv(ulong v) @safe pure nothrow @nogc 629 { 630 assert(v >= exclude_hv_min, 631 "Value is smaller than the minimum value of bitfield 'exclude_hv'"); 632 assert(v <= exclude_hv_max, 633 "Value is greater than the maximum value of bitfield 'exclude_hv'"); 634 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 635 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 64U)) | ( 636 (cast(typeof(perf_event_attr_bitmanip)) v << 6U) & 64U)); 637 } 638 639 enum ulong exclude_hv_min = cast(ulong) 0U; 640 enum ulong exclude_hv_max = cast(ulong) 1U; 641 /// 642 @property ulong exclude_idle() @safe pure nothrow @nogc const 643 { 644 auto result = (perf_event_attr_bitmanip & 128U) >> 7U; 645 return cast(ulong) result; 646 } 647 /// 648 @property void exclude_idle(ulong v) @safe pure nothrow @nogc 649 { 650 assert(v >= exclude_idle_min, 651 "Value is smaller than the minimum value of bitfield 'exclude_idle'"); 652 assert(v <= exclude_idle_max, 653 "Value is greater than the maximum value of bitfield 'exclude_idle'"); 654 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 655 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 128U)) | ( 656 (cast(typeof(perf_event_attr_bitmanip)) v << 7U) & 128U)); 657 } 658 659 enum ulong exclude_idle_min = cast(ulong) 0U; 660 enum ulong exclude_idle_max = cast(ulong) 1U; 661 /// 662 @property ulong mmap() @safe pure nothrow @nogc const 663 { 664 auto result = (perf_event_attr_bitmanip & 256U) >> 8U; 665 return cast(ulong) result; 666 } 667 /// 668 @property void mmap(ulong v) @safe pure nothrow @nogc 669 { 670 assert(v >= mmap_min, "Value is smaller than the minimum value of bitfield 'mmap'"); 671 assert(v <= mmap_max, "Value is greater than the maximum value of bitfield 'mmap'"); 672 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 673 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 256U)) | ( 674 (cast(typeof(perf_event_attr_bitmanip)) v << 8U) & 256U)); 675 } 676 677 enum ulong mmap_min = cast(ulong) 0U; 678 enum ulong mmap_max = cast(ulong) 1U; 679 /// 680 @property ulong comm() @safe pure nothrow @nogc const 681 { 682 auto result = (perf_event_attr_bitmanip & 512U) >> 9U; 683 return cast(ulong) result; 684 } 685 /// 686 @property void comm(ulong v) @safe pure nothrow @nogc 687 { 688 assert(v >= comm_min, "Value is smaller than the minimum value of bitfield 'comm'"); 689 assert(v <= comm_max, "Value is greater than the maximum value of bitfield 'comm'"); 690 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 691 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 512U)) | ( 692 (cast(typeof(perf_event_attr_bitmanip)) v << 9U) & 512U)); 693 } 694 695 enum ulong comm_min = cast(ulong) 0U; 696 enum ulong comm_max = cast(ulong) 1U; 697 /// 698 @property ulong freq() @safe pure nothrow @nogc const 699 { 700 auto result = (perf_event_attr_bitmanip & 1024U) >> 10U; 701 return cast(ulong) result; 702 } 703 /// 704 @property void freq(ulong v) @safe pure nothrow @nogc 705 { 706 assert(v >= freq_min, "Value is smaller than the minimum value of bitfield 'freq'"); 707 assert(v <= freq_max, "Value is greater than the maximum value of bitfield 'freq'"); 708 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 709 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 1024U)) | ( 710 (cast(typeof(perf_event_attr_bitmanip)) v << 10U) & 1024U)); 711 } 712 713 enum ulong freq_min = cast(ulong) 0U; 714 enum ulong freq_max = cast(ulong) 1U; 715 /// 716 @property ulong inherit_stat() @safe pure nothrow @nogc const 717 { 718 auto result = (perf_event_attr_bitmanip & 2048U) >> 11U; 719 return cast(ulong) result; 720 } 721 /// 722 @property void inherit_stat(ulong v) @safe pure nothrow @nogc 723 { 724 assert(v >= inherit_stat_min, 725 "Value is smaller than the minimum value of bitfield 'inherit_stat'"); 726 assert(v <= inherit_stat_max, 727 "Value is greater than the maximum value of bitfield 'inherit_stat'"); 728 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 729 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 2048U)) | ( 730 (cast(typeof(perf_event_attr_bitmanip)) v << 11U) & 2048U)); 731 } 732 733 enum ulong inherit_stat_min = cast(ulong) 0U; 734 enum ulong inherit_stat_max = cast(ulong) 1U; 735 /// 736 @property ulong enable_on_exec() @safe pure nothrow @nogc const 737 { 738 auto result = (perf_event_attr_bitmanip & 4096U) >> 12U; 739 return cast(ulong) result; 740 } 741 /// 742 @property void enable_on_exec(ulong v) @safe pure nothrow @nogc 743 { 744 assert(v >= enable_on_exec_min, 745 "Value is smaller than the minimum value of bitfield 'enable_on_exec'"); 746 assert(v <= enable_on_exec_max, 747 "Value is greater than the maximum value of bitfield 'enable_on_exec'"); 748 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 749 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 4096U)) | ( 750 (cast(typeof(perf_event_attr_bitmanip)) v << 12U) & 4096U)); 751 } 752 753 enum ulong enable_on_exec_min = cast(ulong) 0U; 754 enum ulong enable_on_exec_max = cast(ulong) 1U; 755 /// 756 @property ulong task() @safe pure nothrow @nogc const 757 { 758 auto result = (perf_event_attr_bitmanip & 8192U) >> 13U; 759 return cast(ulong) result; 760 } 761 /// 762 @property void task(ulong v) @safe pure nothrow @nogc 763 { 764 assert(v >= task_min, "Value is smaller than the minimum value of bitfield 'task'"); 765 assert(v <= task_max, "Value is greater than the maximum value of bitfield 'task'"); 766 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 767 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 8192U)) | ( 768 (cast(typeof(perf_event_attr_bitmanip)) v << 13U) & 8192U)); 769 } 770 771 enum ulong task_min = cast(ulong) 0U; 772 enum ulong task_max = cast(ulong) 1U; 773 /// 774 @property ulong watermark() @safe pure nothrow @nogc const 775 { 776 auto result = (perf_event_attr_bitmanip & 16384U) >> 14U; 777 return cast(ulong) result; 778 } 779 /// 780 @property void watermark(ulong v) @safe pure nothrow @nogc 781 { 782 assert(v >= watermark_min, 783 "Value is smaller than the minimum value of bitfield 'watermark'"); 784 assert(v <= watermark_max, 785 "Value is greater than the maximum value of bitfield 'watermark'"); 786 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 787 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 16384U)) | ( 788 (cast(typeof(perf_event_attr_bitmanip)) v << 14U) & 16384U)); 789 } 790 791 enum ulong watermark_min = cast(ulong) 0U; 792 enum ulong watermark_max = cast(ulong) 1U; 793 /// 794 @property ulong precise_ip() @safe pure nothrow @nogc const 795 { 796 auto result = (perf_event_attr_bitmanip & 98304U) >> 15U; 797 return cast(ulong) result; 798 } 799 /// 800 @property void precise_ip(ulong v) @safe pure nothrow @nogc 801 { 802 assert(v >= precise_ip_min, 803 "Value is smaller than the minimum value of bitfield 'precise_ip'"); 804 assert(v <= precise_ip_max, 805 "Value is greater than the maximum value of bitfield 'precise_ip'"); 806 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 807 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 98304U)) | ( 808 (cast(typeof(perf_event_attr_bitmanip)) v << 15U) & 98304U)); 809 } 810 811 enum ulong precise_ip_min = cast(ulong) 0U; 812 enum ulong precise_ip_max = cast(ulong) 3U; 813 /// 814 @property ulong mmap_data() @safe pure nothrow @nogc const 815 { 816 auto result = (perf_event_attr_bitmanip & 131072U) >> 17U; 817 return cast(ulong) result; 818 } 819 /// 820 @property void mmap_data(ulong v) @safe pure nothrow @nogc 821 { 822 assert(v >= mmap_data_min, 823 "Value is smaller than the minimum value of bitfield 'mmap_data'"); 824 assert(v <= mmap_data_max, 825 "Value is greater than the maximum value of bitfield 'mmap_data'"); 826 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 827 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 131072U)) | ( 828 (cast(typeof(perf_event_attr_bitmanip)) v << 17U) & 131072U)); 829 } 830 831 enum ulong mmap_data_min = cast(ulong) 0U; 832 enum ulong mmap_data_max = cast(ulong) 1U; 833 /// 834 @property ulong sample_id_all() @safe pure nothrow @nogc const 835 { 836 auto result = (perf_event_attr_bitmanip & 262144U) >> 18U; 837 return cast(ulong) result; 838 } 839 /// 840 @property void sample_id_all(ulong v) @safe pure nothrow @nogc 841 { 842 assert(v >= sample_id_all_min, 843 "Value is smaller than the minimum value of bitfield 'sample_id_all'"); 844 assert(v <= sample_id_all_max, 845 "Value is greater than the maximum value of bitfield 'sample_id_all'"); 846 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 847 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 262144U)) | ( 848 (cast(typeof(perf_event_attr_bitmanip)) v << 18U) & 262144U)); 849 } 850 851 enum ulong sample_id_all_min = cast(ulong) 0U; 852 enum ulong sample_id_all_max = cast(ulong) 1U; 853 /// 854 @property ulong exclude_host() @safe pure nothrow @nogc const 855 { 856 auto result = (perf_event_attr_bitmanip & 524288U) >> 19U; 857 return cast(ulong) result; 858 } 859 /// 860 @property void exclude_host(ulong v) @safe pure nothrow @nogc 861 { 862 assert(v >= exclude_host_min, 863 "Value is smaller than the minimum value of bitfield 'exclude_host'"); 864 assert(v <= exclude_host_max, 865 "Value is greater than the maximum value of bitfield 'exclude_host'"); 866 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 867 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 524288U)) | ( 868 (cast(typeof(perf_event_attr_bitmanip)) v << 19U) & 524288U)); 869 } 870 871 enum ulong exclude_host_min = cast(ulong) 0U; 872 enum ulong exclude_host_max = cast(ulong) 1U; 873 /// 874 @property ulong exclude_guest() @safe pure nothrow @nogc const 875 { 876 auto result = (perf_event_attr_bitmanip & 1048576U) >> 20U; 877 return cast(ulong) result; 878 } 879 /// 880 @property void exclude_guest(ulong v) @safe pure nothrow @nogc 881 { 882 assert(v >= exclude_guest_min, 883 "Value is smaller than the minimum value of bitfield 'exclude_guest'"); 884 assert(v <= exclude_guest_max, 885 "Value is greater than the maximum value of bitfield 'exclude_guest'"); 886 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 887 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 1048576U)) | ( 888 (cast(typeof(perf_event_attr_bitmanip)) v << 20U) & 1048576U)); 889 } 890 891 enum ulong exclude_guest_min = cast(ulong) 0U; 892 enum ulong exclude_guest_max = cast(ulong) 1U; 893 /// 894 @property ulong exclude_callchain_kernel() @safe pure nothrow @nogc const 895 { 896 auto result = (perf_event_attr_bitmanip & 2097152U) >> 21U; 897 return cast(ulong) result; 898 } 899 /// 900 @property void exclude_callchain_kernel(ulong v) @safe pure nothrow @nogc 901 { 902 assert(v >= exclude_callchain_kernel_min, 903 "Value is smaller than the minimum value of bitfield 'exclude_callchain_kernel'"); 904 assert(v <= exclude_callchain_kernel_max, 905 "Value is greater than the maximum value of bitfield 'exclude_callchain_kernel'"); 906 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 907 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 2097152U)) | ( 908 (cast(typeof(perf_event_attr_bitmanip)) v << 21U) & 2097152U)); 909 } 910 911 enum ulong exclude_callchain_kernel_min = cast(ulong) 0U; 912 enum ulong exclude_callchain_kernel_max = cast(ulong) 1U; 913 /// 914 @property ulong exclude_callchain_user() @safe pure nothrow @nogc const 915 { 916 auto result = (perf_event_attr_bitmanip & 4194304U) >> 22U; 917 return cast(ulong) result; 918 } 919 /// 920 @property void exclude_callchain_user(ulong v) @safe pure nothrow @nogc 921 { 922 assert(v >= exclude_callchain_user_min, 923 "Value is smaller than the minimum value of bitfield 'exclude_callchain_user'"); 924 assert(v <= exclude_callchain_user_max, 925 "Value is greater than the maximum value of bitfield 'exclude_callchain_user'"); 926 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 927 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 4194304U)) | ( 928 (cast(typeof(perf_event_attr_bitmanip)) v << 22U) & 4194304U)); 929 } 930 931 enum ulong exclude_callchain_user_min = cast(ulong) 0U; 932 enum ulong exclude_callchain_user_max = cast(ulong) 1U; 933 /// 934 @property ulong mmap2() @safe pure nothrow @nogc const 935 { 936 auto result = (perf_event_attr_bitmanip & 8388608U) >> 23U; 937 return cast(ulong) result; 938 } 939 /// 940 @property void mmap2(ulong v) @safe pure nothrow @nogc 941 { 942 assert(v >= mmap2_min, 943 "Value is smaller than the minimum value of bitfield 'mmap2'"); 944 assert(v <= mmap2_max, 945 "Value is greater than the maximum value of bitfield 'mmap2'"); 946 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 947 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 8388608U)) | ( 948 (cast(typeof(perf_event_attr_bitmanip)) v << 23U) & 8388608U)); 949 } 950 951 enum ulong mmap2_min = cast(ulong) 0U; 952 enum ulong mmap2_max = cast(ulong) 1U; 953 /// 954 @property ulong comm_exec() @safe pure nothrow @nogc const 955 { 956 auto result = (perf_event_attr_bitmanip & 16777216U) >> 24U; 957 return cast(ulong) result; 958 } 959 /// 960 @property void comm_exec(ulong v) @safe pure nothrow @nogc 961 { 962 assert(v >= comm_exec_min, 963 "Value is smaller than the minimum value of bitfield 'comm_exec'"); 964 assert(v <= comm_exec_max, 965 "Value is greater than the maximum value of bitfield 'comm_exec'"); 966 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 967 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 16777216U)) | ( 968 (cast(typeof(perf_event_attr_bitmanip)) v << 24U) & 16777216U)); 969 } 970 971 enum ulong comm_exec_min = cast(ulong) 0U; 972 enum ulong comm_exec_max = cast(ulong) 1U; 973 /// 974 @property ulong use_clockid() @safe pure nothrow @nogc const 975 { 976 auto result = (perf_event_attr_bitmanip & 33554432U) >> 25U; 977 return cast(ulong) result; 978 } 979 /// 980 @property void use_clockid(ulong v) @safe pure nothrow @nogc 981 { 982 assert(v >= use_clockid_min, 983 "Value is smaller than the minimum value of bitfield 'use_clockid'"); 984 assert(v <= use_clockid_max, 985 "Value is greater than the maximum value of bitfield 'use_clockid'"); 986 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 987 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 33554432U)) | ( 988 (cast(typeof(perf_event_attr_bitmanip)) v << 25U) & 33554432U)); 989 } 990 991 enum ulong use_clockid_min = cast(ulong) 0U; 992 enum ulong use_clockid_max = cast(ulong) 1U; 993 /// 994 @property ulong context_switch() @safe pure nothrow @nogc const 995 { 996 auto result = (perf_event_attr_bitmanip & 67108864U) >> 26U; 997 return cast(ulong) result; 998 } 999 /// 1000 @property void context_switch(ulong v) @safe pure nothrow @nogc 1001 { 1002 assert(v >= context_switch_min, 1003 "Value is smaller than the minimum value of bitfield 'context_switch'"); 1004 assert(v <= context_switch_max, 1005 "Value is greater than the maximum value of bitfield 'context_switch'"); 1006 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 1007 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 67108864U)) | ( 1008 (cast(typeof(perf_event_attr_bitmanip)) v << 26U) & 67108864U)); 1009 } 1010 1011 enum ulong context_switch_min = cast(ulong) 0U; 1012 enum ulong context_switch_max = cast(ulong) 1U; 1013 /// 1014 @property ulong write_backward() @safe pure nothrow @nogc const 1015 { 1016 auto result = (perf_event_attr_bitmanip & 134217728U) >> 27U; 1017 return cast(ulong) result; 1018 } 1019 /// 1020 @property void write_backward(ulong v) @safe pure nothrow @nogc 1021 { 1022 assert(v >= write_backward_min, 1023 "Value is smaller than the minimum value of bitfield 'write_backward'"); 1024 assert(v <= write_backward_max, 1025 "Value is greater than the maximum value of bitfield 'write_backward'"); 1026 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 1027 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 134217728U)) | ( 1028 (cast(typeof(perf_event_attr_bitmanip)) v << 27U) & 134217728U)); 1029 } 1030 1031 enum ulong write_backward_min = cast(ulong) 0U; 1032 enum ulong write_backward_max = cast(ulong) 1U; 1033 /// 1034 @property ulong namespaces() @safe pure nothrow @nogc const 1035 { 1036 auto result = (perf_event_attr_bitmanip & 268435456U) >> 28U; 1037 return cast(ulong) result; 1038 } 1039 /// 1040 @property void namespaces(ulong v) @safe pure nothrow @nogc 1041 { 1042 assert(v >= namespaces_min, 1043 "Value is smaller than the minimum value of bitfield 'namespaces'"); 1044 assert(v <= namespaces_max, 1045 "Value is greater than the maximum value of bitfield 'namespaces'"); 1046 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 1047 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 268435456U)) | ( 1048 (cast(typeof(perf_event_attr_bitmanip)) v << 28U) & 268435456U)); 1049 } 1050 1051 enum ulong namespaces_min = cast(ulong) 0U; 1052 enum ulong namespaces_max = cast(ulong) 1U; 1053 /// 1054 @property ulong __reserved_1() @safe pure nothrow @nogc const 1055 { 1056 auto result = (perf_event_attr_bitmanip & 18446744073172680704UL) >> 29U; 1057 return cast(ulong) result; 1058 } 1059 /// 1060 @property void __reserved_1(ulong v) @safe pure nothrow @nogc 1061 { 1062 assert(v >= __reserved_1_min, 1063 "Value is smaller than the minimum value of bitfield '__reserved_1'"); 1064 assert(v <= __reserved_1_max, 1065 "Value is greater than the maximum value of bitfield '__reserved_1'"); 1066 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 1067 (perf_event_attr_bitmanip & (-1 - cast( 1068 typeof(perf_event_attr_bitmanip)) 18446744073172680704UL)) | ( 1069 (cast(typeof(perf_event_attr_bitmanip)) v << 29U) & 18446744073172680704UL)); 1070 } 1071 1072 enum ulong __reserved_1_min = cast(ulong) 0U; 1073 enum ulong __reserved_1_max = cast(ulong) 34359738367UL; 1074 /// 1075 union 1076 { 1077 uint wakeup_events; /** wakeup every n events */ 1078 uint wakeup_watermark; /** bytes before wakeup */ 1079 } 1080 /// 1081 uint bp_type; 1082 1083 union 1084 { 1085 /// 1086 ulong bp_addr; 1087 ulong config1; /** extension of config */ 1088 } 1089 1090 union 1091 { 1092 /// 1093 ulong bp_len; 1094 ulong config2; /** extension of config1 */ 1095 } 1096 1097 ulong branch_sample_type; /** enum perf_branch_sample_type */ 1098 1099 /** 1100 * Defines set of user regs to dump on samples. 1101 * See asm/perf_regs.h for details. 1102 */ 1103 ulong sample_regs_user; 1104 1105 /** 1106 * Defines size of the user stack to dump on samples. 1107 */ 1108 uint sample_stack_user; 1109 /// 1110 int clockid; 1111 1112 /** 1113 * Defines set of regs to dump for each sample 1114 * state captured on: 1115 * - precise = 0: PMU interrupt 1116 * - precise > 0: sampled instruction 1117 * 1118 * See asm/perf_regs.h for details. 1119 */ 1120 ulong sample_regs_intr; 1121 1122 /** 1123 * Wakeup watermark for AUX area 1124 */ 1125 uint aux_watermark; 1126 /// 1127 ushort sample_max_stack; 1128 /** align to __u64 */ 1129 ushort __reserved_2; 1130 } 1131 /// 1132 extern (D) auto perf_flags(T)(auto ref T attr) 1133 { 1134 return *(&attr.read_format + 1); 1135 } 1136 1137 /** 1138 * Ioctls that can be done on a perf event fd: 1139 */ 1140 enum PERF_EVENT_IOC_ENABLE = _IO('$', 0); 1141 /// 1142 enum PERF_EVENT_IOC_DISABLE = _IO('$', 1); 1143 /// 1144 enum PERF_EVENT_IOC_REFRESH = _IO('$', 2); 1145 /// 1146 enum PERF_EVENT_IOC_RESET = _IO('$', 3); 1147 /// 1148 enum PERF_EVENT_IOC_PERIOD = _IOW!ulong('$', 4); 1149 /// 1150 enum PERF_EVENT_IOC_SET_OUTPUT = _IO('$', 5); 1151 /// 1152 enum PERF_EVENT_IOC_SET_FILTER = _IOW!(char*)('$', 6); 1153 /// 1154 enum PERF_EVENT_IOC_ID = _IOR!(ulong*)('$', 7); 1155 /// 1156 enum PERF_EVENT_IOC_SET_BPF = _IOW!uint('$', 8); 1157 /// 1158 enum PERF_EVENT_IOC_PAUSE_OUTPUT = _IOW!uint('$', 9); 1159 1160 /// 1161 enum perf_event_ioc_flags 1162 { 1163 PERF_IOC_FLAG_GROUP = 1U << 0 1164 } 1165 1166 /** 1167 * Structure of the page that can be mapped via mmap 1168 */ 1169 struct perf_event_mmap_page 1170 { 1171 uint version_; /** version number of this structure */ 1172 uint compat_version; /** lowest version this is compat with */ 1173 1174 /** 1175 * Bits needed to read the hw events in user-space. 1176 * --- 1177 * u32 seq, time_mult, time_shift, index, width; 1178 * u64 count, enabled, running; 1179 * u64 cyc, time_offset; 1180 * s64 pmc = 0; 1181 * 1182 * do { 1183 * seq = pc->lock; 1184 * barrier() 1185 * 1186 * enabled = pc->time_enabled; 1187 * running = pc->time_running; 1188 * 1189 * if (pc->cap_usr_time && enabled != running) { 1190 * cyc = rdtsc(); 1191 * time_offset = pc->time_offset; 1192 * time_mult = pc->time_mult; 1193 * time_shift = pc->time_shift; 1194 * } 1195 * 1196 * index = pc->index; 1197 * count = pc->offset; 1198 * if (pc->cap_user_rdpmc && index) { 1199 * width = pc->pmc_width; 1200 * pmc = rdpmc(index - 1); 1201 * } 1202 * 1203 * barrier(); 1204 * } while (pc->lock != seq); 1205 * --- 1206 * NOTE: for obvious reason this only works on self-monitoring 1207 * processes. 1208 */ 1209 uint lock; /** seqlock for synchronization */ 1210 uint index; /** hardware event identifier */ 1211 long offset; /** add to hardware event value */ 1212 ulong time_enabled; /** time event active */ 1213 ulong time_running; /** time event on cpu */ 1214 /// 1215 union 1216 { 1217 /// 1218 ulong capabilities; 1219 1220 struct 1221 { 1222 /* mixin(bitfields!(ulong, "cap_bit0", 1, ulong, "cap_bit0_is_deprecated", 1, ulong, 1223 "cap_user_rdpmc", 1, ulong, "cap_user_time", 1, ulong, 1224 "cap_user_time_zero", 1, ulong, "cap_____res", 59)); */ 1225 1226 private ulong mmap_page_bitmanip; 1227 /// 1228 @property ulong cap_bit0() @safe pure nothrow @nogc const 1229 { 1230 auto result = (mmap_page_bitmanip & 1U) >> 0U; 1231 return cast(ulong) result; 1232 } 1233 /// 1234 @property void cap_bit0(ulong v) @safe pure nothrow @nogc 1235 { 1236 assert(v >= cap_bit0_min, 1237 "Value is smaller than the minimum value of bitfield 'cap_bit0'"); 1238 assert(v <= cap_bit0_max, 1239 "Value is greater than the maximum value of bitfield 'cap_bit0'"); 1240 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))( 1241 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 1U)) | ( 1242 (cast(typeof(mmap_page_bitmanip)) v << 0U) & 1U)); 1243 } 1244 1245 enum ulong cap_bit0_min = cast(ulong) 0U; 1246 enum ulong cap_bit0_max = cast(ulong) 1U; 1247 /// 1248 @property ulong cap_bit0_is_deprecated() @safe pure nothrow @nogc const 1249 { 1250 auto result = (mmap_page_bitmanip & 2U) >> 1U; 1251 return cast(ulong) result; 1252 } 1253 /// 1254 @property void cap_bit0_is_deprecated(ulong v) @safe pure nothrow @nogc 1255 { 1256 assert(v >= cap_bit0_is_deprecated_min, 1257 "Value is smaller than the minimum value of bitfield 'cap_bit0_is_deprecated'"); 1258 assert(v <= cap_bit0_is_deprecated_max, 1259 "Value is greater than the maximum value of bitfield 'cap_bit0_is_deprecated'"); 1260 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))( 1261 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 2U)) | ( 1262 (cast(typeof(mmap_page_bitmanip)) v << 1U) & 2U)); 1263 } 1264 1265 enum ulong cap_bit0_is_deprecated_min = cast(ulong) 0U; 1266 enum ulong cap_bit0_is_deprecated_max = cast(ulong) 1U; 1267 /// 1268 @property ulong cap_user_rdpmc() @safe pure nothrow @nogc const 1269 { 1270 auto result = (mmap_page_bitmanip & 4U) >> 2U; 1271 return cast(ulong) result; 1272 } 1273 /// 1274 @property void cap_user_rdpmc(ulong v) @safe pure nothrow @nogc 1275 { 1276 assert(v >= cap_user_rdpmc_min, 1277 "Value is smaller than the minimum value of bitfield 'cap_user_rdpmc'"); 1278 assert(v <= cap_user_rdpmc_max, 1279 "Value is greater than the maximum value of bitfield 'cap_user_rdpmc'"); 1280 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))( 1281 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 4U)) | ( 1282 (cast(typeof(mmap_page_bitmanip)) v << 2U) & 4U)); 1283 } 1284 1285 enum ulong cap_user_rdpmc_min = cast(ulong) 0U; 1286 enum ulong cap_user_rdpmc_max = cast(ulong) 1U; 1287 /// 1288 @property ulong cap_user_time() @safe pure nothrow @nogc const 1289 { 1290 auto result = (mmap_page_bitmanip & 8U) >> 3U; 1291 return cast(ulong) result; 1292 } 1293 /// 1294 @property void cap_user_time(ulong v) @safe pure nothrow @nogc 1295 { 1296 assert(v >= cap_user_time_min, 1297 "Value is smaller than the minimum value of bitfield 'cap_user_time'"); 1298 assert(v <= cap_user_time_max, 1299 "Value is greater than the maximum value of bitfield 'cap_user_time'"); 1300 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))( 1301 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 8U)) | ( 1302 (cast(typeof(mmap_page_bitmanip)) v << 3U) & 8U)); 1303 } 1304 1305 enum ulong cap_user_time_min = cast(ulong) 0U; 1306 enum ulong cap_user_time_max = cast(ulong) 1U; 1307 /// 1308 @property ulong cap_user_time_zero() @safe pure nothrow @nogc const 1309 { 1310 auto result = (mmap_page_bitmanip & 16U) >> 4U; 1311 return cast(ulong) result; 1312 } 1313 /// 1314 @property void cap_user_time_zero(ulong v) @safe pure nothrow @nogc 1315 { 1316 assert(v >= cap_user_time_zero_min, 1317 "Value is smaller than the minimum value of bitfield 'cap_user_time_zero'"); 1318 assert(v <= cap_user_time_zero_max, 1319 "Value is greater than the maximum value of bitfield 'cap_user_time_zero'"); 1320 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))( 1321 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 16U)) | ( 1322 (cast(typeof(mmap_page_bitmanip)) v << 4U) & 16U)); 1323 } 1324 1325 enum ulong cap_user_time_zero_min = cast(ulong) 0U; 1326 enum ulong cap_user_time_zero_max = cast(ulong) 1U; 1327 /// 1328 @property ulong cap_____res() @safe pure nothrow @nogc const 1329 { 1330 auto result = (mmap_page_bitmanip & 18446744073709551584UL) >> 5U; 1331 return cast(ulong) result; 1332 } 1333 /// 1334 @property void cap_____res(ulong v) @safe pure nothrow @nogc 1335 { 1336 assert(v >= cap_____res_min, 1337 "Value is smaller than the minimum value of bitfield 'cap_____res'"); 1338 assert(v <= cap_____res_max, 1339 "Value is greater than the maximum value of bitfield 'cap_____res'"); 1340 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))((mmap_page_bitmanip & ( 1341 -1 - cast(typeof(mmap_page_bitmanip)) 18446744073709551584UL)) | ( 1342 (cast(typeof(mmap_page_bitmanip)) v << 5U) & 18446744073709551584UL)); 1343 } 1344 1345 enum ulong cap_____res_min = cast(ulong) 0U; 1346 enum ulong cap_____res_max = cast(ulong) 576460752303423487UL; 1347 } 1348 } 1349 1350 /** 1351 * If cap_user_rdpmc this field provides the bit-width of the value 1352 * read using the rdpmc() or equivalent instruction. This can be used 1353 * to sign extend the result like: 1354 * 1355 * pmc <<= 64 - width; 1356 * pmc >>= 64 - width; // signed shift right 1357 * count += pmc; 1358 */ 1359 ushort pmc_width; 1360 1361 /** 1362 * If cap_usr_time the below fields can be used to compute the time 1363 * delta since time_enabled (in ns) using rdtsc or similar. 1364 * 1365 * u64 quot, rem; 1366 * u64 delta; 1367 * 1368 * quot = (cyc >> time_shift); 1369 * rem = cyc & (((u64)1 << time_shift) - 1); 1370 * delta = time_offset + quot * time_mult + 1371 * ((rem * time_mult) >> time_shift); 1372 * 1373 * Where time_offset,time_mult,time_shift and cyc are read in the 1374 * seqcount loop described above. This delta can then be added to 1375 * enabled and possible running (if index), improving the scaling: 1376 * 1377 * enabled += delta; 1378 * if (index) 1379 * running += delta; 1380 * 1381 * quot = count / running; 1382 * rem = count % running; 1383 * count = quot * enabled + (rem * enabled) / running; 1384 */ 1385 ushort time_shift; 1386 /// 1387 uint time_mult; 1388 /// 1389 ulong time_offset; 1390 /** 1391 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated 1392 * from sample timestamps. 1393 * 1394 * time = timestamp - time_zero; 1395 * quot = time / time_mult; 1396 * rem = time % time_mult; 1397 * cyc = (quot << time_shift) + (rem << time_shift) / time_mult; 1398 * 1399 * And vice versa: 1400 * 1401 * quot = cyc >> time_shift; 1402 * rem = cyc & (((u64)1 << time_shift) - 1); 1403 * timestamp = time_zero + quot * time_mult + 1404 * ((rem * time_mult) >> time_shift); 1405 */ 1406 ulong time_zero; 1407 uint size; /** Header size up to __reserved[] fields. */ 1408 1409 /** 1410 * Hole for extension of the self monitor capabilities 1411 */ 1412 1413 ubyte[948] __reserved; /** align to 1k. */ 1414 1415 /** 1416 * Control data for the mmap() data buffer. 1417 * 1418 * User-space reading the @data_head value should issue an smp_rmb(), 1419 * after reading this value. 1420 * 1421 * When the mapping is PROT_WRITE the @data_tail value should be 1422 * written by userspace to reflect the last read data, after issueing 1423 * an smp_mb() to separate the data read from the ->data_tail store. 1424 * In this case the kernel will not over-write unread data. 1425 * 1426 * See perf_output_put_handle() for the data ordering. 1427 * 1428 * data_{offset,size} indicate the location and size of the perf record 1429 * buffer within the mmapped area. 1430 */ 1431 ulong data_head; /** head in the data section */ 1432 ulong data_tail; /** user-space written tail */ 1433 ulong data_offset; /** where the buffer starts */ 1434 ulong data_size; /** data buffer size */ 1435 1436 /** 1437 * AUX area is defined by aux_{offset,size} fields that should be set 1438 * by the userspace, so that 1439 * --- 1440 * aux_offset >= data_offset + data_size 1441 * --- 1442 * prior to mmap()ing it. Size of the mmap()ed area should be aux_size. 1443 * 1444 * Ring buffer pointers aux_{head,tail} have the same semantics as 1445 * data_{head,tail} and same ordering rules apply. 1446 */ 1447 ulong aux_head; 1448 /// 1449 ulong aux_tail; 1450 /// 1451 ulong aux_offset; 1452 /// 1453 ulong aux_size; 1454 } 1455 /// 1456 enum PERF_RECORD_MISC_CPUMODE_MASK = 7 << 0; 1457 /// 1458 enum PERF_RECORD_MISC_CPUMODE_UNKNOWN = 0 << 0; 1459 /// 1460 enum PERF_RECORD_MISC_KERNEL = 1 << 0; 1461 /// 1462 enum PERF_RECORD_MISC_USER = 2 << 0; 1463 /// 1464 enum PERF_RECORD_MISC_HYPERVISOR = 3 << 0; 1465 /// 1466 enum PERF_RECORD_MISC_GUEST_KERNEL = 4 << 0; 1467 /// 1468 enum PERF_RECORD_MISC_GUEST_USER = 5 << 0; 1469 1470 /** 1471 * Indicates that /proc/PID/maps parsing are truncated by time out. 1472 */ 1473 enum PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT = 1 << 12; 1474 /** 1475 * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on 1476 * different events so can reuse the same bit position. 1477 * Ditto PERF_RECORD_MISC_SWITCH_OUT. 1478 */ 1479 enum PERF_RECORD_MISC_MMAP_DATA = 1 << 13; 1480 /// 1481 enum PERF_RECORD_MISC_COMM_EXEC = 1 << 13; 1482 /// 1483 enum PERF_RECORD_MISC_SWITCH_OUT = 1 << 13; 1484 /** 1485 * Indicates that the content of PERF_SAMPLE_IP points to 1486 * the actual instruction that triggered the event. See also 1487 * perf_event_attr::precise_ip. 1488 */ 1489 enum PERF_RECORD_MISC_EXACT_IP = 1 << 14; 1490 /** 1491 * Reserve the last bit to indicate some extended misc field 1492 */ 1493 enum PERF_RECORD_MISC_EXT_RESERVED = 1 << 15; 1494 /// 1495 struct perf_event_header 1496 { 1497 /// 1498 uint type; 1499 /// 1500 ushort misc; 1501 /// 1502 ushort size; 1503 } 1504 /// 1505 struct perf_ns_link_info 1506 { 1507 /// 1508 ulong dev; 1509 /// 1510 ulong ino; 1511 } 1512 1513 enum 1514 { 1515 /// 1516 NET_NS_INDEX = 0, 1517 /// 1518 UTS_NS_INDEX = 1, 1519 /// 1520 IPC_NS_INDEX = 2, 1521 /// 1522 PID_NS_INDEX = 3, 1523 /// 1524 USER_NS_INDEX = 4, 1525 /// 1526 MNT_NS_INDEX = 5, 1527 /// 1528 CGROUP_NS_INDEX = 6, 1529 NR_NAMESPACES = 7 /** number of available namespaces */ 1530 } 1531 /// 1532 enum perf_event_type 1533 { 1534 /** 1535 * If perf_event_attr.sample_id_all is set then all event types will 1536 * have the sample_type selected fields related to where/when 1537 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU, 1538 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed 1539 * just after the perf_event_header and the fields already present for 1540 * the existing fields, i.e. at the end of the payload. That way a newer 1541 * perf.data file will be supported by older perf tools, with these new 1542 * optional fields being ignored. 1543 * --- 1544 * struct sample_id { 1545 * { u32 pid, tid; } && PERF_SAMPLE_TID 1546 * { u64 time; } && PERF_SAMPLE_TIME 1547 * { u64 id; } && PERF_SAMPLE_ID 1548 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 1549 * { u32 cpu, res; } && PERF_SAMPLE_CPU 1550 * { u64 id; } && PERF_SAMPLE_IDENTIFIER 1551 * } && perf_event_attr::sample_id_all 1552 * --- 1553 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The 1554 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed 1555 * relative to header.size. 1556 */ 1557 1558 /* 1559 * The MMAP events record the PROT_EXEC mappings so that we can 1560 * correlate userspace IPs to code. They have the following structure: 1561 * --- 1562 * struct { 1563 * struct perf_event_header header; 1564 * 1565 * u32 pid, tid; 1566 * u64 addr; 1567 * u64 len; 1568 * u64 pgoff; 1569 * char filename[]; 1570 * struct sample_id sample_id; 1571 * }; 1572 * --- 1573 */ 1574 PERF_RECORD_MMAP = 1, 1575 1576 /** 1577 * --- 1578 * struct { 1579 * struct perf_event_header header; 1580 * u64 id; 1581 * u64 lost; 1582 * struct sample_id sample_id; 1583 * }; 1584 * --- 1585 */ 1586 PERF_RECORD_LOST = 2, 1587 1588 /** 1589 * --- 1590 * struct { 1591 * struct perf_event_header header; 1592 * 1593 * u32 pid, tid; 1594 * char comm[]; 1595 * struct sample_id sample_id; 1596 * }; 1597 * --- 1598 */ 1599 PERF_RECORD_COMM = 3, 1600 1601 /** 1602 * --- 1603 * struct { 1604 * struct perf_event_header header; 1605 * u32 pid, ppid; 1606 * u32 tid, ptid; 1607 * u64 time; 1608 * struct sample_id sample_id; 1609 * }; 1610 * --- 1611 */ 1612 PERF_RECORD_EXIT = 4, 1613 1614 /** 1615 * --- 1616 * struct { 1617 * struct perf_event_header header; 1618 * u64 time; 1619 * u64 id; 1620 * u64 stream_id; 1621 * struct sample_id sample_id; 1622 * }; 1623 * --- 1624 */ 1625 PERF_RECORD_THROTTLE = 5, 1626 PERF_RECORD_UNTHROTTLE = 6, 1627 /** 1628 * --- 1629 * struct { 1630 * struct perf_event_header header; 1631 * u32 pid, ppid; 1632 * u32 tid, ptid; 1633 * u64 time; 1634 * struct sample_id sample_id; 1635 * }; 1636 * --- 1637 */ 1638 PERF_RECORD_FORK = 7, 1639 /** 1640 * --- 1641 * struct { 1642 * struct perf_event_header header; 1643 * u32 pid, tid; 1644 * 1645 * struct read_format values; 1646 * struct sample_id sample_id; 1647 * }; 1648 * --- 1649 */ 1650 PERF_RECORD_READ = 8, 1651 /** 1652 * --- 1653 * struct { 1654 * struct perf_event_header header; 1655 * 1656 * # 1657 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. 1658 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position 1659 * # is fixed relative to header. 1660 * # 1661 * 1662 * { u64 id; } && PERF_SAMPLE_IDENTIFIER 1663 * { u64 ip; } && PERF_SAMPLE_IP 1664 * { u32 pid, tid; } && PERF_SAMPLE_TID 1665 * { u64 time; } && PERF_SAMPLE_TIME 1666 * { u64 addr; } && PERF_SAMPLE_ADDR 1667 * { u64 id; } && PERF_SAMPLE_ID 1668 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 1669 * { u32 cpu, res; } && PERF_SAMPLE_CPU 1670 * { u64 period; } && PERF_SAMPLE_PERIOD 1671 * 1672 * { struct read_format values; } && PERF_SAMPLE_READ 1673 * 1674 * { u64 nr, 1675 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 1676 * 1677 * # 1678 * # The RAW record below is opaque data wrt the ABI 1679 * # 1680 * # That is, the ABI doesn't make any promises wrt to 1681 * # the stability of its content, it may vary depending 1682 * # on event, hardware, kernel version and phase of 1683 * # the moon. 1684 * # 1685 * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 1686 * # 1687 * 1688 * { u32 size; 1689 * char data[size];}&& PERF_SAMPLE_RAW 1690 * 1691 * { u64 nr; 1692 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK 1693 * 1694 * { u64 abi; # enum perf_sample_regs_abi 1695 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER 1696 * 1697 * { u64 size; 1698 * char data[size]; 1699 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER 1700 * 1701 * { u64 weight; } && PERF_SAMPLE_WEIGHT 1702 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC 1703 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION 1704 * { u64 abi; # enum perf_sample_regs_abi 1705 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR 1706 * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR 1707 * }; 1708 * --- 1709 */ 1710 PERF_RECORD_SAMPLE = 9, 1711 1712 /** 1713 * --- 1714 * The MMAP2 records are an augmented version of MMAP, they add 1715 * maj, min, ino numbers to be used to uniquely identify each mapping 1716 * 1717 * struct { 1718 * struct perf_event_header header; 1719 * 1720 * u32 pid, tid; 1721 * u64 addr; 1722 * u64 len; 1723 * u64 pgoff; 1724 * u32 maj; 1725 * u32 min; 1726 * u64 ino; 1727 * u64 ino_generation; 1728 * u32 prot, flags; 1729 * char filename[]; 1730 * struct sample_id sample_id; 1731 * }; 1732 * --- 1733 */ 1734 PERF_RECORD_MMAP2 = 10, 1735 1736 /** 1737 * Records that new data landed in the AUX buffer part. 1738 * --- 1739 * struct { 1740 * struct perf_event_header header; 1741 * 1742 * u64 aux_offset; 1743 * u64 aux_size; 1744 * u64 flags; 1745 * struct sample_id sample_id; 1746 * }; 1747 * --- 1748 */ 1749 PERF_RECORD_AUX = 11, 1750 1751 /** 1752 * --- 1753 * Indicates that instruction trace has started 1754 * 1755 * struct { 1756 * struct perf_event_header header; 1757 * u32 pid; 1758 * u32 tid; 1759 * }; 1760 * --- 1761 */ 1762 PERF_RECORD_ITRACE_START = 12, 1763 1764 /** 1765 * Records the dropped/lost sample number. 1766 * --- 1767 * struct { 1768 * struct perf_event_header header; 1769 * 1770 * u64 lost; 1771 * struct sample_id sample_id; 1772 * }; 1773 * --- 1774 */ 1775 PERF_RECORD_LOST_SAMPLES = 13, 1776 1777 /** 1778 * 1779 * Records a context switch in or out (flagged by 1780 * PERF_RECORD_MISC_SWITCH_OUT). See also 1781 * PERF_RECORD_SWITCH_CPU_WIDE. 1782 * --- 1783 * struct { 1784 * struct perf_event_header header; 1785 * struct sample_id sample_id; 1786 * }; 1787 * --- 1788 */ 1789 PERF_RECORD_SWITCH = 14, 1790 1791 /** 1792 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and 1793 * next_prev_tid that are the next (switching out) or previous 1794 * (switching in) pid/tid. 1795 * --- 1796 * struct { 1797 * struct perf_event_header header; 1798 * u32 next_prev_pid; 1799 * u32 next_prev_tid; 1800 * struct sample_id sample_id; 1801 * }; 1802 * --- 1803 */ 1804 PERF_RECORD_SWITCH_CPU_WIDE = 15, 1805 1806 /** 1807 * --- 1808 * struct { 1809 * struct perf_event_header header; 1810 * u32 pid; 1811 * u32 tid; 1812 * u64 nr_namespaces; 1813 * { u64 dev, inode; } [nr_namespaces]; 1814 * struct sample_id sample_id; 1815 * }; 1816 * --- 1817 */ 1818 PERF_RECORD_NAMESPACES = 16, 1819 1820 PERF_RECORD_MAX = 17 /* non-ABI */ 1821 } 1822 /// 1823 enum PERF_MAX_STACK_DEPTH = 127; 1824 /// 1825 enum PERF_MAX_CONTEXTS_PER_STACK = 8; 1826 /// 1827 enum perf_callchain_context 1828 { 1829 /// 1830 PERF_CONTEXT_HV = cast(ulong)-32, 1831 /// 1832 PERF_CONTEXT_KERNEL = cast(ulong)-128, 1833 /// 1834 PERF_CONTEXT_USER = cast(ulong)-512, 1835 /// 1836 PERF_CONTEXT_GUEST = cast(ulong)-2048, 1837 /// 1838 PERF_CONTEXT_GUEST_KERNEL = cast(ulong)-2176, 1839 /// 1840 PERF_CONTEXT_GUEST_USER = cast(ulong)-2560, 1841 /// 1842 PERF_CONTEXT_MAX = cast(ulong)-4095 1843 } 1844 1845 /** 1846 * PERF_RECORD_AUX::flags bits 1847 */ 1848 enum PERF_AUX_FLAG_TRUNCATED = 0x01; /** record was truncated to fit */ 1849 enum PERF_AUX_FLAG_OVERWRITE = 0x02; /** snapshot from overwrite mode */ 1850 enum PERF_AUX_FLAG_PARTIAL = 0x04; /** record contains gaps */ 1851 enum PERF_AUX_FLAG_COLLISION = 0x08; /** sample collided with another */ 1852 /// 1853 enum PERF_FLAG_FD_NO_GROUP = 1UL << 0; 1854 /// 1855 enum PERF_FLAG_FD_OUTPUT = 1UL << 1; 1856 enum PERF_FLAG_PID_CGROUP = 1UL << 2; /** pid=cgroup id, per-cpu mode only */ 1857 enum PERF_FLAG_FD_CLOEXEC = 1UL << 3; /** O_CLOEXEC */ 1858 ///perm_mem_data_src is endian specific. 1859 version (LittleEndian) 1860 { 1861 /// 1862 union perf_mem_data_src 1863 { 1864 /// 1865 ulong val; 1866 1867 struct 1868 { 1869 /* mixin(bitfields!(ulong, "mem_op", 5, ulong, "mem_lvl", 14, ulong, 1870 "mem_snoop", 5, ulong, "mem_lock", 2, ulong, "mem_dtlb", 7, ulong, 1871 "mem_lvl_num", 4, ulong, "mem_remote", 1, ulong, 1872 "mem_snoopx", 2, ulong, "mem_rsvd", 24)); */ 1873 1874 private ulong perf_mem_data_src_bitmanip; 1875 /// 1876 @property ulong mem_op() @safe pure nothrow @nogc const 1877 { 1878 auto result = (perf_mem_data_src_bitmanip & 31U) >> 0U; 1879 return cast(ulong) result; 1880 } 1881 /// 1882 @property void mem_op(ulong v) @safe pure nothrow @nogc 1883 { 1884 assert(v >= mem_op_min, 1885 "Value is smaller than the minimum value of bitfield 'mem_op'"); 1886 assert(v <= mem_op_max, 1887 "Value is greater than the maximum value of bitfield 'mem_op'"); 1888 perf_mem_data_src_bitmanip = cast( 1889 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 1890 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 31U)) | ( 1891 (cast(typeof(perf_mem_data_src_bitmanip)) v << 0U) & 31U)); 1892 } 1893 1894 enum ulong mem_op_min = cast(ulong) 0U; 1895 enum ulong mem_op_max = cast(ulong) 31U; 1896 /// 1897 @property ulong mem_lvl() @safe pure nothrow @nogc const 1898 { 1899 auto result = (perf_mem_data_src_bitmanip & 524256U) >> 5U; 1900 return cast(ulong) result; 1901 } 1902 /// 1903 @property void mem_lvl(ulong v) @safe pure nothrow @nogc 1904 { 1905 assert(v >= mem_lvl_min, 1906 "Value is smaller than the minimum value of bitfield 'mem_lvl'"); 1907 assert(v <= mem_lvl_max, 1908 "Value is greater than the maximum value of bitfield 'mem_lvl'"); 1909 perf_mem_data_src_bitmanip = cast( 1910 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 1911 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 524256U)) | ( 1912 (cast(typeof(perf_mem_data_src_bitmanip)) v << 5U) & 524256U)); 1913 } 1914 1915 enum ulong mem_lvl_min = cast(ulong) 0U; 1916 enum ulong mem_lvl_max = cast(ulong) 16383U; 1917 /// 1918 @property ulong mem_snoop() @safe pure nothrow @nogc const 1919 { 1920 auto result = (perf_mem_data_src_bitmanip & 16252928U) >> 19U; 1921 return cast(ulong) result; 1922 } 1923 /// 1924 @property void mem_snoop(ulong v) @safe pure nothrow @nogc 1925 { 1926 assert(v >= mem_snoop_min, 1927 "Value is smaller than the minimum value of bitfield 'mem_snoop'"); 1928 assert(v <= mem_snoop_max, 1929 "Value is greater than the maximum value of bitfield 'mem_snoop'"); 1930 perf_mem_data_src_bitmanip = cast( 1931 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 1932 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 16252928U)) | ( 1933 (cast(typeof(perf_mem_data_src_bitmanip)) v << 19U) & 16252928U)); 1934 } 1935 1936 enum ulong mem_snoop_min = cast(ulong) 0U; 1937 enum ulong mem_snoop_max = cast(ulong) 31U; 1938 /// 1939 @property ulong mem_lock() @safe pure nothrow @nogc const 1940 { 1941 auto result = (perf_mem_data_src_bitmanip & 50331648U) >> 24U; 1942 return cast(ulong) result; 1943 } 1944 /// 1945 @property void mem_lock(ulong v) @safe pure nothrow @nogc 1946 { 1947 assert(v >= mem_lock_min, 1948 "Value is smaller than the minimum value of bitfield 'mem_lock'"); 1949 assert(v <= mem_lock_max, 1950 "Value is greater than the maximum value of bitfield 'mem_lock'"); 1951 perf_mem_data_src_bitmanip = cast( 1952 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 1953 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 50331648U)) | ( 1954 (cast(typeof(perf_mem_data_src_bitmanip)) v << 24U) & 50331648U)); 1955 } 1956 1957 enum ulong mem_lock_min = cast(ulong) 0U; 1958 enum ulong mem_lock_max = cast(ulong) 3U; 1959 /// 1960 @property ulong mem_dtlb() @safe pure nothrow @nogc const 1961 { 1962 auto result = (perf_mem_data_src_bitmanip & 8522825728UL) >> 26U; 1963 return cast(ulong) result; 1964 } 1965 /// 1966 @property void mem_dtlb(ulong v) @safe pure nothrow @nogc 1967 { 1968 assert(v >= mem_dtlb_min, 1969 "Value is smaller than the minimum value of bitfield 'mem_dtlb'"); 1970 assert(v <= mem_dtlb_max, 1971 "Value is greater than the maximum value of bitfield 'mem_dtlb'"); 1972 perf_mem_data_src_bitmanip = cast( 1973 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 1974 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 8522825728UL)) | ( 1975 (cast(typeof(perf_mem_data_src_bitmanip)) v << 26U) & 8522825728UL)); 1976 } 1977 1978 enum ulong mem_dtlb_min = cast(ulong) 0U; 1979 enum ulong mem_dtlb_max = cast(ulong) 127U; 1980 /// 1981 @property ulong mem_lvl_num() @safe pure nothrow @nogc const 1982 { 1983 auto result = (perf_mem_data_src_bitmanip & 128849018880UL) >> 33U; 1984 return cast(ulong) result; 1985 } 1986 /// 1987 @property void mem_lvl_num(ulong v) @safe pure nothrow @nogc 1988 { 1989 assert(v >= mem_lvl_num_min, 1990 "Value is smaller than the minimum value of bitfield 'mem_lvl_num'"); 1991 assert(v <= mem_lvl_num_max, 1992 "Value is greater than the maximum value of bitfield 'mem_lvl_num'"); 1993 perf_mem_data_src_bitmanip = cast( 1994 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 1995 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 128849018880UL)) | ( 1996 (cast(typeof(perf_mem_data_src_bitmanip)) v << 33U) & 128849018880UL)); 1997 } 1998 1999 enum ulong mem_lvl_num_min = cast(ulong) 0U; 2000 enum ulong mem_lvl_num_max = cast(ulong) 15U; 2001 /// 2002 @property ulong mem_remote() @safe pure nothrow @nogc const 2003 { 2004 auto result = (perf_mem_data_src_bitmanip & 137438953472UL) >> 37U; 2005 return cast(ulong) result; 2006 } 2007 /// 2008 @property void mem_remote(ulong v) @safe pure nothrow @nogc 2009 { 2010 assert(v >= mem_remote_min, 2011 "Value is smaller than the minimum value of bitfield 'mem_remote'"); 2012 assert(v <= mem_remote_max, 2013 "Value is greater than the maximum value of bitfield 'mem_remote'"); 2014 perf_mem_data_src_bitmanip = cast( 2015 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 2016 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 137438953472UL)) | ( 2017 (cast(typeof(perf_mem_data_src_bitmanip)) v << 37U) & 137438953472UL)); 2018 } 2019 2020 enum ulong mem_remote_min = cast(ulong) 0U; 2021 enum ulong mem_remote_max = cast(ulong) 1U; 2022 /// 2023 @property ulong mem_snoopx() @safe pure nothrow @nogc const 2024 { 2025 auto result = (perf_mem_data_src_bitmanip & 824633720832UL) >> 38U; 2026 return cast(ulong) result; 2027 } 2028 /// 2029 @property void mem_snoopx(ulong v) @safe pure nothrow @nogc 2030 { 2031 assert(v >= mem_snoopx_min, 2032 "Value is smaller than the minimum value of bitfield 'mem_snoopx'"); 2033 assert(v <= mem_snoopx_max, 2034 "Value is greater than the maximum value of bitfield 'mem_snoopx'"); 2035 perf_mem_data_src_bitmanip = cast( 2036 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 2037 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 824633720832UL)) | ( 2038 (cast(typeof(perf_mem_data_src_bitmanip)) v << 38U) & 824633720832UL)); 2039 } 2040 2041 enum ulong mem_snoopx_min = cast(ulong) 0U; 2042 enum ulong mem_snoopx_max = cast(ulong) 3U; 2043 /// 2044 @property ulong mem_rsvd() @safe pure nothrow @nogc const 2045 { 2046 auto result = (perf_mem_data_src_bitmanip & 18446742974197923840UL) >> 40U; 2047 return cast(ulong) result; 2048 } 2049 /// 2050 @property void mem_rsvd(ulong v) @safe pure nothrow @nogc 2051 { 2052 assert(v >= mem_rsvd_min, 2053 "Value is smaller than the minimum value of bitfield 'mem_rsvd'"); 2054 assert(v <= mem_rsvd_max, 2055 "Value is greater than the maximum value of bitfield 'mem_rsvd'"); 2056 perf_mem_data_src_bitmanip = cast( 2057 typeof(perf_mem_data_src_bitmanip))( 2058 (perf_mem_data_src_bitmanip & (-1 - cast( 2059 typeof(perf_mem_data_src_bitmanip)) 18446742974197923840UL)) | ( 2060 (cast(typeof(perf_mem_data_src_bitmanip)) v << 40U) & 18446742974197923840UL)); 2061 } 2062 2063 enum ulong mem_rsvd_min = cast(ulong) 0U; 2064 enum ulong mem_rsvd_max = cast(ulong) 16777215U; 2065 2066 } 2067 } 2068 } 2069 else 2070 { 2071 /// 2072 union perf_mem_data_src 2073 { 2074 /// 2075 ulong val; 2076 2077 struct 2078 { 2079 /* mixin(bitfields!(ulong, "mem_rsvd", 24, ulong, "mem_snoopx", 2, ulong, 2080 "mem_remote", 1, ulong, "mem_lvl_num", 4, ulong, "mem_dtlb", 7, ulong, 2081 "mem_lock", 2, ulong, "mem_snoop", 5, ulong, "mem_lvl", 2082 14, ulong, "mem_op", 5)); */ 2083 private ulong perf_mem_data_src; 2084 /// 2085 @property ulong mem_rsvd() @safe pure nothrow @nogc const 2086 { 2087 auto result = (perf_mem_data_src & 16777215U) >> 0U; 2088 return cast(ulong) result; 2089 } 2090 /// 2091 @property void mem_rsvd(ulong v) @safe pure nothrow @nogc 2092 { 2093 assert(v >= mem_rsvd_min, 2094 "Value is smaller than the minimum value of bitfield 'mem_rsvd'"); 2095 assert(v <= mem_rsvd_max, 2096 "Value is greater than the maximum value of bitfield 'mem_rsvd'"); 2097 perf_mem_data_src = cast(typeof(perf_mem_data_src))( 2098 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 16777215U)) | ( 2099 (cast(typeof(perf_mem_data_src)) v << 0U) & 16777215U)); 2100 } 2101 2102 enum ulong mem_rsvd_min = cast(ulong) 0U; 2103 enum ulong mem_rsvd_max = cast(ulong) 16777215U; 2104 /// 2105 @property ulong mem_snoopx() @safe pure nothrow @nogc const 2106 { 2107 auto result = (perf_mem_data_src & 50331648U) >> 24U; 2108 return cast(ulong) result; 2109 } 2110 /// 2111 @property void mem_snoopx(ulong v) @safe pure nothrow @nogc 2112 { 2113 assert(v >= mem_snoopx_min, 2114 "Value is smaller than the minimum value of bitfield 'mem_snoopx'"); 2115 assert(v <= mem_snoopx_max, 2116 "Value is greater than the maximum value of bitfield 'mem_snoopx'"); 2117 perf_mem_data_src = cast(typeof(perf_mem_data_src))( 2118 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 50331648U)) | ( 2119 (cast(typeof(perf_mem_data_src)) v << 24U) & 50331648U)); 2120 } 2121 2122 enum ulong mem_snoopx_min = cast(ulong) 0U; 2123 enum ulong mem_snoopx_max = cast(ulong) 3U; 2124 /// 2125 @property ulong mem_remote() @safe pure nothrow @nogc const 2126 { 2127 auto result = (perf_mem_data_src & 67108864U) >> 26U; 2128 return cast(ulong) result; 2129 } 2130 /// 2131 @property void mem_remote(ulong v) @safe pure nothrow @nogc 2132 { 2133 assert(v >= mem_remote_min, 2134 "Value is smaller than the minimum value of bitfield 'mem_remote'"); 2135 assert(v <= mem_remote_max, 2136 "Value is greater than the maximum value of bitfield 'mem_remote'"); 2137 perf_mem_data_src = cast(typeof(perf_mem_data_src))( 2138 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 67108864U)) | ( 2139 (cast(typeof(perf_mem_data_src)) v << 26U) & 67108864U)); 2140 } 2141 2142 enum ulong mem_remote_min = cast(ulong) 0U; 2143 enum ulong mem_remote_max = cast(ulong) 1U; 2144 /// 2145 @property ulong mem_lvl_num() @safe pure nothrow @nogc const 2146 { 2147 auto result = (perf_mem_data_src & 2013265920U) >> 27U; 2148 return cast(ulong) result; 2149 } 2150 /// 2151 @property void mem_lvl_num(ulong v) @safe pure nothrow @nogc 2152 { 2153 assert(v >= mem_lvl_num_min, 2154 "Value is smaller than the minimum value of bitfield 'mem_lvl_num'"); 2155 assert(v <= mem_lvl_num_max, 2156 "Value is greater than the maximum value of bitfield 'mem_lvl_num'"); 2157 perf_mem_data_src = cast(typeof(perf_mem_data_src))( 2158 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 2013265920U)) | ( 2159 (cast(typeof(perf_mem_data_src)) v << 27U) & 2013265920U)); 2160 } 2161 2162 enum ulong mem_lvl_num_min = cast(ulong) 0U; 2163 enum ulong mem_lvl_num_max = cast(ulong) 15U; 2164 /// 2165 @property ulong mem_dtlb() @safe pure nothrow @nogc const 2166 { 2167 auto result = (perf_mem_data_src & 272730423296UL) >> 31U; 2168 return cast(ulong) result; 2169 } 2170 /// 2171 @property void mem_dtlb(ulong v) @safe pure nothrow @nogc 2172 { 2173 assert(v >= mem_dtlb_min, 2174 "Value is smaller than the minimum value of bitfield 'mem_dtlb'"); 2175 assert(v <= mem_dtlb_max, 2176 "Value is greater than the maximum value of bitfield 'mem_dtlb'"); 2177 perf_mem_data_src = cast(typeof(perf_mem_data_src))( 2178 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 272730423296UL)) | ( 2179 (cast(typeof(perf_mem_data_src)) v << 31U) & 272730423296UL)); 2180 } 2181 2182 enum ulong mem_dtlb_min = cast(ulong) 0U; 2183 enum ulong mem_dtlb_max = cast(ulong) 127U; 2184 /// 2185 @property ulong mem_lock() @safe pure nothrow @nogc const 2186 { 2187 auto result = (perf_mem_data_src & 824633720832UL) >> 38U; 2188 return cast(ulong) result; 2189 } 2190 /// 2191 @property void mem_lock(ulong v) @safe pure nothrow @nogc 2192 { 2193 assert(v >= mem_lock_min, 2194 "Value is smaller than the minimum value of bitfield 'mem_lock'"); 2195 assert(v <= mem_lock_max, 2196 "Value is greater than the maximum value of bitfield 'mem_lock'"); 2197 perf_mem_data_src = cast(typeof(perf_mem_data_src))( 2198 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 824633720832UL)) | ( 2199 (cast(typeof(perf_mem_data_src)) v << 38U) & 824633720832UL)); 2200 } 2201 2202 enum ulong mem_lock_min = cast(ulong) 0U; 2203 enum ulong mem_lock_max = cast(ulong) 3U; 2204 /// 2205 @property ulong mem_snoop() @safe pure nothrow @nogc const 2206 { 2207 auto result = (perf_mem_data_src & 34084860461056UL) >> 40U; 2208 return cast(ulong) result; 2209 } 2210 /// 2211 @property void mem_snoop(ulong v) @safe pure nothrow @nogc 2212 { 2213 assert(v >= mem_snoop_min, 2214 "Value is smaller than the minimum value of bitfield 'mem_snoop'"); 2215 assert(v <= mem_snoop_max, 2216 "Value is greater than the maximum value of bitfield 'mem_snoop'"); 2217 perf_mem_data_src = cast(typeof(perf_mem_data_src))( 2218 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 34084860461056UL)) | ( 2219 (cast(typeof(perf_mem_data_src)) v << 40U) & 34084860461056UL)); 2220 } 2221 2222 enum ulong mem_snoop_min = cast(ulong) 0U; 2223 enum ulong mem_snoop_max = cast(ulong) 31U; 2224 /// 2225 @property ulong mem_lvl() @safe pure nothrow @nogc const 2226 { 2227 auto result = (perf_mem_data_src & 576425567931334656UL) >> 45U; 2228 return cast(ulong) result; 2229 } 2230 /// 2231 @property void mem_lvl(ulong v) @safe pure nothrow @nogc 2232 { 2233 assert(v >= mem_lvl_min, 2234 "Value is smaller than the minimum value of bitfield 'mem_lvl'"); 2235 assert(v <= mem_lvl_max, 2236 "Value is greater than the maximum value of bitfield 'mem_lvl'"); 2237 perf_mem_data_src = cast(typeof(perf_mem_data_src))((perf_mem_data_src & ( 2238 -1 - cast(typeof(perf_mem_data_src)) 576425567931334656UL)) | ( 2239 (cast(typeof(perf_mem_data_src)) v << 45U) & 576425567931334656UL)); 2240 } 2241 2242 enum ulong mem_lvl_min = cast(ulong) 0U; 2243 enum ulong mem_lvl_max = cast(ulong) 16383U; 2244 /// 2245 @property ulong mem_op() @safe pure nothrow @nogc const 2246 { 2247 auto result = (perf_mem_data_src & 17870283321406128128UL) >> 59U; 2248 return cast(ulong) result; 2249 } 2250 /// 2251 @property void mem_op(ulong v) @safe pure nothrow @nogc 2252 { 2253 assert(v >= mem_op_min, 2254 "Value is smaller than the minimum value of bitfield 'mem_op'"); 2255 assert(v <= mem_op_max, 2256 "Value is greater than the maximum value of bitfield 'mem_op'"); 2257 perf_mem_data_src = cast(typeof(perf_mem_data_src))((perf_mem_data_src & ( 2258 -1 - cast(typeof(perf_mem_data_src)) 17870283321406128128UL)) | ( 2259 (cast(typeof(perf_mem_data_src)) v << 59U) & 17870283321406128128UL)); 2260 } 2261 2262 enum ulong mem_op_min = cast(ulong) 0U; 2263 enum ulong mem_op_max = cast(ulong) 31U; 2264 } 2265 } 2266 } 2267 2268 /* snoop mode, ext */ 2269 /* remote */ 2270 /* memory hierarchy level number */ 2271 /* tlb access */ 2272 /* lock instr */ 2273 /* snoop mode */ 2274 /* memory hierarchy level */ 2275 /* type of opcode */ 2276 2277 /** type of opcode (load/store/prefetch,code) */ 2278 enum PERF_MEM_OP_NA = 0x01; /** not available */ 2279 enum PERF_MEM_OP_LOAD = 0x02; /** load instruction */ 2280 enum PERF_MEM_OP_STORE = 0x04; /** store instruction */ 2281 enum PERF_MEM_OP_PFETCH = 0x08; /** prefetch */ 2282 enum PERF_MEM_OP_EXEC = 0x10; /** code (execution) */ 2283 enum PERF_MEM_OP_SHIFT = 0; 2284 2285 /* memory hierarchy (memory level, hit or miss) */ 2286 enum PERF_MEM_LVL_NA = 0x01; /** not available */ 2287 enum PERF_MEM_LVL_HIT = 0x02; /** hit level */ 2288 enum PERF_MEM_LVL_MISS = 0x04; /** miss level */ 2289 enum PERF_MEM_LVL_L1 = 0x08; /** L1 */ 2290 enum PERF_MEM_LVL_LFB = 0x10; /** Line Fill Buffer */ 2291 enum PERF_MEM_LVL_L2 = 0x20; /** L2 */ 2292 enum PERF_MEM_LVL_L3 = 0x40; /** L3 */ 2293 enum PERF_MEM_LVL_LOC_RAM = 0x80; /** Local DRAM */ 2294 enum PERF_MEM_LVL_REM_RAM1 = 0x100; /** Remote DRAM (1 hop) */ 2295 enum PERF_MEM_LVL_REM_RAM2 = 0x200; /** Remote DRAM (2 hops) */ 2296 enum PERF_MEM_LVL_REM_CCE1 = 0x400; /** Remote Cache (1 hop) */ 2297 enum PERF_MEM_LVL_REM_CCE2 = 0x800; /** Remote Cache (2 hops) */ 2298 enum PERF_MEM_LVL_IO = 0x1000; /** I/O memory */ 2299 enum PERF_MEM_LVL_UNC = 0x2000; /** Uncached memory */ 2300 /// 2301 enum PERF_MEM_LVL_SHIFT = 5; 2302 2303 enum PERF_MEM_REMOTE_REMOTE = 0x01; /** Remote */ 2304 /// 2305 enum PERF_MEM_REMOTE_SHIFT = 37; 2306 2307 enum PERF_MEM_LVLNUM_L1 = 0x01; /** L1 */ 2308 enum PERF_MEM_LVLNUM_L2 = 0x02; /** L2 */ 2309 enum PERF_MEM_LVLNUM_L3 = 0x03; /** L3 */ 2310 enum PERF_MEM_LVLNUM_L4 = 0x04; /** L4 */ 2311 /* 5-0xa available */ 2312 enum PERF_MEM_LVLNUM_ANY_CACHE = 0x0b; /** Any cache */ 2313 enum PERF_MEM_LVLNUM_LFB = 0x0c; /** LFB */ 2314 enum PERF_MEM_LVLNUM_RAM = 0x0d; /** RAM */ 2315 enum PERF_MEM_LVLNUM_PMEM = 0x0e; /** PMEM */ 2316 enum PERF_MEM_LVLNUM_NA = 0x0f; /** N/A */ 2317 /// 2318 enum PERF_MEM_LVLNUM_SHIFT = 33; 2319 2320 /* snoop mode */ 2321 enum PERF_MEM_SNOOP_NA = 0x01; /** not available */ 2322 enum PERF_MEM_SNOOP_NONE = 0x02; /** no snoop */ 2323 enum PERF_MEM_SNOOP_HIT = 0x04; /** snoop hit */ 2324 enum PERF_MEM_SNOOP_MISS = 0x08; /** snoop miss */ 2325 enum PERF_MEM_SNOOP_HITM = 0x10; /** snoop hit modified */ 2326 /// 2327 enum PERF_MEM_SNOOP_SHIFT = 19; 2328 2329 enum PERF_MEM_SNOOPX_FWD = 0x01; /** forward */ 2330 /** 1 free */ 2331 enum PERF_MEM_SNOOPX_SHIFT = 37; 2332 2333 /** locked instruction */ 2334 enum PERF_MEM_LOCK_NA = 0x01; /** not available */ 2335 enum PERF_MEM_LOCK_LOCKED = 0x02; /** locked transaction */ 2336 /// 2337 enum PERF_MEM_LOCK_SHIFT = 24; 2338 2339 /* TLB access */ 2340 enum PERF_MEM_TLB_NA = 0x01; /** not available */ 2341 enum PERF_MEM_TLB_HIT = 0x02; /** hit level */ 2342 enum PERF_MEM_TLB_MISS = 0x04; /** miss level */ 2343 enum PERF_MEM_TLB_L1 = 0x08; /** L1 */ 2344 enum PERF_MEM_TLB_L2 = 0x10; /** L2 */ 2345 enum PERF_MEM_TLB_WK = 0x20; /** Hardware Walker*/ 2346 enum PERF_MEM_TLB_OS = 0x40; /** OS fault handler */ 2347 /// 2348 enum PERF_MEM_TLB_SHIFT = 26; 2349 2350 /** 2351 * single taken branch record layout: 2352 * 2353 * from: source instruction (may not always be a branch insn) 2354 * to: branch target 2355 * mispred: branch target was mispredicted 2356 * predicted: branch target was predicted 2357 * 2358 * support for mispred, predicted is optional. In case it 2359 * is not supported mispred = predicted = 0. 2360 * 2361 * in_tx: running in a hardware transaction 2362 * abort: aborting a hardware transaction 2363 * cycles: cycles from last branch (or 0 if not supported) 2364 * type: branch type 2365 */ 2366 struct perf_branch_entry 2367 { 2368 /// 2369 ulong from; 2370 /// 2371 ulong to; 2372 2373 /* mixin(bitfields!(ulong, "mispred", 1, ulong, "predicted", 1, ulong, 2374 "in_tx", 1, ulong, "abort", 1, ulong, "cycles", 16, ulong, "type", 2375 4, ulong, "reserved", 40)); */ 2376 private ulong perf_branch_entry_bitmanip; 2377 /// 2378 @property ulong mispred() @safe pure nothrow @nogc const 2379 { 2380 auto result = (perf_branch_entry_bitmanip & 1U) >> 0U; 2381 return cast(ulong) result; 2382 } 2383 /// 2384 @property void mispred(ulong v) @safe pure nothrow @nogc 2385 { 2386 assert(v >= mispred_min, 2387 "Value is smaller than the minimum value of bitfield 'mispred'"); 2388 assert(v <= mispred_max, 2389 "Value is greater than the maximum value of bitfield 'mispred'"); 2390 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))( 2391 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 1U)) | ( 2392 (cast(typeof(perf_branch_entry_bitmanip)) v << 0U) & 1U)); 2393 } 2394 2395 enum ulong mispred_min = cast(ulong) 0U; 2396 enum ulong mispred_max = cast(ulong) 1U; 2397 /// 2398 @property ulong predicted() @safe pure nothrow @nogc const 2399 { 2400 auto result = (perf_branch_entry_bitmanip & 2U) >> 1U; 2401 return cast(ulong) result; 2402 } 2403 /// 2404 @property void predicted(ulong v) @safe pure nothrow @nogc 2405 { 2406 assert(v >= predicted_min, 2407 "Value is smaller than the minimum value of bitfield 'predicted'"); 2408 assert(v <= predicted_max, 2409 "Value is greater than the maximum value of bitfield 'predicted'"); 2410 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))( 2411 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 2U)) | ( 2412 (cast(typeof(perf_branch_entry_bitmanip)) v << 1U) & 2U)); 2413 } 2414 2415 enum ulong predicted_min = cast(ulong) 0U; 2416 enum ulong predicted_max = cast(ulong) 1U; 2417 /// 2418 @property ulong in_tx() @safe pure nothrow @nogc const 2419 { 2420 auto result = (perf_branch_entry_bitmanip & 4U) >> 2U; 2421 return cast(ulong) result; 2422 } 2423 /// 2424 @property void in_tx(ulong v) @safe pure nothrow @nogc 2425 { 2426 assert(v >= in_tx_min, 2427 "Value is smaller than the minimum value of bitfield 'in_tx'"); 2428 assert(v <= in_tx_max, 2429 "Value is greater than the maximum value of bitfield 'in_tx'"); 2430 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))( 2431 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 4U)) | ( 2432 (cast(typeof(perf_branch_entry_bitmanip)) v << 2U) & 4U)); 2433 } 2434 2435 enum ulong in_tx_min = cast(ulong) 0U; 2436 enum ulong in_tx_max = cast(ulong) 1U; 2437 /// 2438 @property ulong abort() @safe pure nothrow @nogc const 2439 { 2440 auto result = (perf_branch_entry_bitmanip & 8U) >> 3U; 2441 return cast(ulong) result; 2442 } 2443 /// 2444 @property void abort(ulong v) @safe pure nothrow @nogc 2445 { 2446 assert(v >= abort_min, 2447 "Value is smaller than the minimum value of bitfield 'abort'"); 2448 assert(v <= abort_max, 2449 "Value is greater than the maximum value of bitfield 'abort'"); 2450 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))( 2451 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 8U)) | ( 2452 (cast(typeof(perf_branch_entry_bitmanip)) v << 3U) & 8U)); 2453 } 2454 2455 enum ulong abort_min = cast(ulong) 0U; 2456 enum ulong abort_max = cast(ulong) 1U; 2457 /// 2458 @property ulong cycles() @safe pure nothrow @nogc const 2459 { 2460 auto result = (perf_branch_entry_bitmanip & 1048560U) >> 4U; 2461 return cast(ulong) result; 2462 } 2463 /// 2464 @property void cycles(ulong v) @safe pure nothrow @nogc 2465 { 2466 assert(v >= cycles_min, 2467 "Value is smaller than the minimum value of bitfield 'cycles'"); 2468 assert(v <= cycles_max, 2469 "Value is greater than the maximum value of bitfield 'cycles'"); 2470 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))( 2471 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 1048560U)) | ( 2472 (cast(typeof(perf_branch_entry_bitmanip)) v << 4U) & 1048560U)); 2473 } 2474 2475 enum ulong cycles_min = cast(ulong) 0U; 2476 enum ulong cycles_max = cast(ulong) 65535U; 2477 /// 2478 @property ulong type() @safe pure nothrow @nogc const 2479 { 2480 auto result = (perf_branch_entry_bitmanip & 15728640U) >> 20U; 2481 return cast(ulong) result; 2482 } 2483 /// 2484 @property void type(ulong v) @safe pure nothrow @nogc 2485 { 2486 assert(v >= type_min, "Value is smaller than the minimum value of bitfield 'type'"); 2487 assert(v <= type_max, "Value is greater than the maximum value of bitfield 'type'"); 2488 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))( 2489 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 15728640U)) | ( 2490 (cast(typeof(perf_branch_entry_bitmanip)) v << 20U) & 15728640U)); 2491 } 2492 2493 enum ulong type_min = cast(ulong) 0U; 2494 enum ulong type_max = cast(ulong) 15U; 2495 /// 2496 @property ulong reserved() @safe pure nothrow @nogc const 2497 { 2498 auto result = (perf_branch_entry_bitmanip & 18446744073692774400UL) >> 24U; 2499 return cast(ulong) result; 2500 } 2501 /// 2502 @property void reserved(ulong v) @safe pure nothrow @nogc 2503 { 2504 assert(v >= reserved_min, 2505 "Value is smaller than the minimum value of bitfield 'reserved'"); 2506 assert(v <= reserved_max, 2507 "Value is greater than the maximum value of bitfield 'reserved'"); 2508 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))( 2509 (perf_branch_entry_bitmanip & (-1 - cast( 2510 typeof(perf_branch_entry_bitmanip)) 18446744073692774400UL)) | ( 2511 (cast(typeof(perf_branch_entry_bitmanip)) v << 24U) & 18446744073692774400UL)); 2512 } 2513 2514 enum ulong reserved_min = cast(ulong) 0U; 2515 enum ulong reserved_max = cast(ulong) 1099511627775UL; 2516 }